en_rx.c 38.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
/*
 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 *
 */

34
#include <net/busy_poll.h>
35
#include <linux/bpf.h>
36
#include <linux/mlx4/cq.h>
37
#include <linux/slab.h>
38 39
#include <linux/mlx4/qp.h>
#include <linux/skbuff.h>
40
#include <linux/rculist.h>
41 42 43
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
44
#include <linux/irq.h>
45

46 47 48 49
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ip6_checksum.h>
#endif

50 51
#include "mlx4_en.h"

52 53 54 55 56 57 58 59 60
static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
			    struct mlx4_en_rx_alloc *page_alloc,
			    const struct mlx4_en_frag_info *frag_info,
			    gfp_t _gfp)
{
	int order;
	struct page *page;
	dma_addr_t dma;

61
	for (order = frag_info->order; ;) {
62 63 64
		gfp_t gfp = _gfp;

		if (order)
65
			gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NOMEMALLOC;
66 67 68 69 70 71 72 73
		page = alloc_pages(gfp, order);
		if (likely(page))
			break;
		if (--order < 0 ||
		    ((PAGE_SIZE << order) < frag_info->frag_size))
			return -ENOMEM;
	}
	dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE << order,
74
			   frag_info->dma_dir);
75 76 77 78
	if (dma_mapping_error(priv->ddev, dma)) {
		put_page(page);
		return -ENOMEM;
	}
79
	page_alloc->page_size = PAGE_SIZE << order;
80 81
	page_alloc->page = page;
	page_alloc->dma = dma;
82
	page_alloc->page_offset = 0;
83
	/* Not doing get_page() for each frag is a big win
84
	 * on asymetric workloads. Note we can not use atomic_set().
85
	 */
86
	page_ref_add(page, page_alloc->page_size / frag_info->frag_stride - 1);
87 88 89
	return 0;
}

90 91 92
static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
			       struct mlx4_en_rx_desc *rx_desc,
			       struct mlx4_en_rx_alloc *frags,
93 94
			       struct mlx4_en_rx_alloc *ring_alloc,
			       gfp_t gfp)
95
{
96
	struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
97
	const struct mlx4_en_frag_info *frag_info;
98 99
	struct page *page;
	dma_addr_t dma;
100
	int i;
101

102 103
	for (i = 0; i < priv->num_frags; i++) {
		frag_info = &priv->frag_info[i];
104
		page_alloc[i] = ring_alloc[i];
105 106 107 108
		page_alloc[i].page_offset += frag_info->frag_stride;

		if (page_alloc[i].page_offset + frag_info->frag_stride <=
		    ring_alloc[i].page_size)
109
			continue;
110

111 112
		if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp))
			goto out;
113
	}
114

115 116
	for (i = 0; i < priv->num_frags; i++) {
		frags[i] = ring_alloc[i];
117
		dma = ring_alloc[i].dma + ring_alloc[i].page_offset;
118 119
		ring_alloc[i] = page_alloc[i];
		rx_desc->data[i].addr = cpu_to_be64(dma);
120
	}
121

122
	return 0;
123 124 125

out:
	while (i--) {
126
		if (page_alloc[i].page != ring_alloc[i].page) {
127
			dma_unmap_page(priv->ddev, page_alloc[i].dma,
128 129
				page_alloc[i].page_size,
				priv->frag_info[i].dma_dir);
130
			page = page_alloc[i].page;
131 132 133
			/* Revert changes done by mlx4_alloc_pages */
			page_ref_sub(page, page_alloc[i].page_size /
					   priv->frag_info[i].frag_stride - 1);
134 135
			put_page(page);
		}
136 137 138 139 140 141 142 143
	}
	return -ENOMEM;
}

static void mlx4_en_free_frag(struct mlx4_en_priv *priv,
			      struct mlx4_en_rx_alloc *frags,
			      int i)
{
144
	const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
145
	u32 next_frag_end = frags[i].page_offset + 2 * frag_info->frag_stride;
146

147 148

	if (next_frag_end > frags[i].page_size)
149
		dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size,
150
			       frag_info->dma_dir);
151

152 153
	if (frags[i].page)
		put_page(frags[i].page);
154 155 156 157 158 159
}

static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
				  struct mlx4_en_rx_ring *ring)
{
	int i;
160
	struct mlx4_en_rx_alloc *page_alloc;
161 162

	for (i = 0; i < priv->num_frags; i++) {
163
		const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
164

165
		if (mlx4_alloc_pages(priv, &ring->page_alloc[i],
166
				     frag_info, GFP_KERNEL | __GFP_COLD))
167
			goto out;
168 169 170

		en_dbg(DRV, priv, "  frag %d allocator: - size:%d frags:%d\n",
		       i, ring->page_alloc[i].page_size,
171
		       page_ref_count(ring->page_alloc[i].page));
172 173 174 175 176
	}
	return 0;

out:
	while (i--) {
177 178
		struct page *page;

179
		page_alloc = &ring->page_alloc[i];
180
		dma_unmap_page(priv->ddev, page_alloc->dma,
181 182
			       page_alloc->page_size,
			       priv->frag_info[i].dma_dir);
183
		page = page_alloc->page;
184 185 186
		/* Revert changes done by mlx4_alloc_pages */
		page_ref_sub(page, page_alloc->page_size /
				   priv->frag_info[i].frag_stride - 1);
187
		put_page(page);
188 189 190 191 192 193 194 195 196 197 198 199
		page_alloc->page = NULL;
	}
	return -ENOMEM;
}

static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
				      struct mlx4_en_rx_ring *ring)
{
	struct mlx4_en_rx_alloc *page_alloc;
	int i;

	for (i = 0; i < priv->num_frags; i++) {
200 201
		const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];

202
		page_alloc = &ring->page_alloc[i];
203 204
		en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
		       i, page_count(page_alloc->page));
205

206
		dma_unmap_page(priv->ddev, page_alloc->dma,
207
				page_alloc->page_size, frag_info->dma_dir);
208 209
		while (page_alloc->page_offset + frag_info->frag_stride <
		       page_alloc->page_size) {
210
			put_page(page_alloc->page);
211
			page_alloc->page_offset += frag_info->frag_stride;
212
		}
213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
		page_alloc->page = NULL;
	}
}

static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
				 struct mlx4_en_rx_ring *ring, int index)
{
	struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
	int possible_frags;
	int i;

	/* Set size and memtype fields */
	for (i = 0; i < priv->num_frags; i++) {
		rx_desc->data[i].byte_count =
			cpu_to_be32(priv->frag_info[i].frag_size);
		rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key);
	}

	/* If the number of used fragments does not fill up the ring stride,
	 * remaining (unused) fragments must be padded with null address/size
	 * and a special memory key */
	possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE;
	for (i = priv->num_frags; i < possible_frags; i++) {
		rx_desc->data[i].byte_count = 0;
		rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD);
		rx_desc->data[i].addr = 0;
	}
}

static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
243 244
				   struct mlx4_en_rx_ring *ring, int index,
				   gfp_t gfp)
245 246
{
	struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
247 248
	struct mlx4_en_rx_alloc *frags = ring->rx_info +
					(index << priv->log_rx_info);
249

250 251 252 253 254 255
	if (ring->page_cache.index > 0) {
		frags[0] = ring->page_cache.buf[--ring->page_cache.index];
		rx_desc->data[0].addr = cpu_to_be64(frags[0].dma);
		return 0;
	}

256
	return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp);
257 258
}

259 260 261 262 263
static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring)
{
	return ring->prod == ring->cons;
}

264 265 266 267 268
static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
{
	*ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
}

269 270 271 272
static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
				 struct mlx4_en_rx_ring *ring,
				 int index)
{
273
	struct mlx4_en_rx_alloc *frags;
274 275
	int nr;

276
	frags = ring->rx_info + (index << priv->log_rx_info);
277
	for (nr = 0; nr < priv->num_frags; nr++) {
278
		en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
279
		mlx4_en_free_frag(priv, frags, nr);
280 281 282
	}
}

283 284 285 286 287
static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
{
	struct mlx4_en_rx_ring *ring;
	int ring_ind;
	int buf_ind;
288
	int new_size;
289 290 291

	for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
		for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
292
			ring = priv->rx_ring[ring_ind];
293 294

			if (mlx4_en_prepare_rx_desc(priv, ring,
295
						    ring->actual_size,
296
						    GFP_KERNEL | __GFP_COLD)) {
297
				if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
J
Joe Perches 已提交
298
					en_err(priv, "Failed to allocate enough rx buffers\n");
299 300
					return -ENOMEM;
				} else {
301
					new_size = rounddown_pow_of_two(ring->actual_size);
J
Joe Perches 已提交
302
					en_warn(priv, "Only %d buffers allocated reducing ring size to %d\n",
303
						ring->actual_size, new_size);
304
					goto reduce_rings;
305 306 307 308 309 310
				}
			}
			ring->actual_size++;
			ring->prod++;
		}
	}
311 312 313 314
	return 0;

reduce_rings:
	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
315
		ring = priv->rx_ring[ring_ind];
316 317 318 319 320 321 322
		while (ring->actual_size > new_size) {
			ring->actual_size--;
			ring->prod--;
			mlx4_en_free_rx_desc(priv, ring, ring->actual_size);
		}
	}

323 324 325 326 327 328 329 330
	return 0;
}

static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
				struct mlx4_en_rx_ring *ring)
{
	int index;

331 332
	en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
	       ring->cons, ring->prod);
333 334

	/* Unmap and free Rx buffers */
335
	while (!mlx4_en_is_ring_empty(ring)) {
336
		index = ring->cons & ring->size_mask;
337
		en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
338
		mlx4_en_free_rx_desc(priv, ring, index);
339 340 341 342
		++ring->cons;
	}
}

343 344 345 346
void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
{
	int i;
	int num_of_eqs;
347
	int num_rx_rings;
348 349 350
	struct mlx4_dev *dev = mdev->dev;

	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
M
Matan Barak 已提交
351 352 353 354
		num_of_eqs = max_t(int, MIN_RX_RINGS,
				   min_t(int,
					 mlx4_get_eqs_per_port(mdev->dev, i),
					 DEF_RX_RINGS));
355

356 357 358
		num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS :
			min_t(int, num_of_eqs,
			      netif_get_num_default_rss_queues());
359
		mdev->profile.prof[i].rx_ring_num =
360
			rounddown_pow_of_two(num_rx_rings);
361 362 363
	}
}

364
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
365
			   struct mlx4_en_rx_ring **pring,
366
			   u32 size, u16 stride, int node)
367 368
{
	struct mlx4_en_dev *mdev = priv->mdev;
369
	struct mlx4_en_rx_ring *ring;
370
	int err = -ENOMEM;
371 372
	int tmp;

373
	ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
374
	if (!ring) {
375 376 377 378 379
		ring = kzalloc(sizeof(*ring), GFP_KERNEL);
		if (!ring) {
			en_err(priv, "Failed to allocate RX ring structure\n");
			return -ENOMEM;
		}
380 381
	}

382 383 384 385 386 387
	ring->prod = 0;
	ring->cons = 0;
	ring->size = size;
	ring->size_mask = size - 1;
	ring->stride = stride;
	ring->log_stride = ffs(ring->stride) - 1;
388
	ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
389 390

	tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
391
					sizeof(struct mlx4_en_rx_alloc));
392
	ring->rx_info = vmalloc_node(tmp, node);
393
	if (!ring->rx_info) {
394 395 396 397 398
		ring->rx_info = vmalloc(tmp);
		if (!ring->rx_info) {
			err = -ENOMEM;
			goto err_ring;
		}
399
	}
400

401
	en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
402 403
		 ring->rx_info, tmp);

404
	/* Allocate HW buffers on provided NUMA node */
405
	set_dev_node(&mdev->dev->persist->pdev->dev, node);
406
	err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
407
	set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
408
	if (err)
409
		goto err_info;
410 411 412

	ring->buf = ring->wqres.buf.direct.buf;

413 414
	ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter;

415
	*pring = ring;
416 417
	return 0;

418
err_info:
419 420
	vfree(ring->rx_info);
	ring->rx_info = NULL;
421 422 423 424
err_ring:
	kfree(ring);
	*pring = NULL;

425 426 427 428 429 430 431 432 433 434 435 436 437
	return err;
}

int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
{
	struct mlx4_en_rx_ring *ring;
	int i;
	int ring_ind;
	int err;
	int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
					DS_SIZE * priv->num_frags);

	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
438
		ring = priv->rx_ring[ring_ind];
439 440 441 442

		ring->prod = 0;
		ring->cons = 0;
		ring->actual_size = 0;
443
		ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
444 445

		ring->stride = stride;
446 447 448
		if (ring->stride <= TXBB_SIZE)
			ring->buf += TXBB_SIZE;

449 450 451 452 453 454
		ring->log_stride = ffs(ring->stride) - 1;
		ring->buf_size = ring->size * ring->stride;

		memset(ring->buf, 0, ring->buf_size);
		mlx4_en_update_rx_prod_db(ring);

455
		/* Initialize all descriptors */
456 457 458 459 460 461
		for (i = 0; i < ring->size; i++)
			mlx4_en_init_rx_desc(priv, ring, i);

		/* Initialize page allocators */
		err = mlx4_en_init_allocator(priv, ring);
		if (err) {
462
			en_err(priv, "Failed initializing ring allocator\n");
463 464
			if (ring->stride <= TXBB_SIZE)
				ring->buf -= TXBB_SIZE;
465 466
			ring_ind--;
			goto err_allocator;
467 468
		}
	}
469 470
	err = mlx4_en_fill_rx_buffers(priv);
	if (err)
471 472 473
		goto err_buffers;

	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
474
		ring = priv->rx_ring[ring_ind];
475

476
		ring->size_mask = ring->actual_size - 1;
477 478 479 480 481 482 483
		mlx4_en_update_rx_prod_db(ring);
	}

	return 0;

err_buffers:
	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
484
		mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]);
485 486 487 488

	ring_ind = priv->rx_ring_num - 1;
err_allocator:
	while (ring_ind >= 0) {
489 490 491
		if (priv->rx_ring[ring_ind]->stride <= TXBB_SIZE)
			priv->rx_ring[ring_ind]->buf -= TXBB_SIZE;
		mlx4_en_destroy_allocator(priv, priv->rx_ring[ring_ind]);
492 493 494 495 496
		ring_ind--;
	}
	return err;
}

497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513
/* We recover from out of memory by scheduling our napi poll
 * function (mlx4_en_process_cq), which tries to allocate
 * all missing RX buffers (call to mlx4_en_refill_rx_buffers).
 */
void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
{
	int ring;

	if (!priv->port_up)
		return;

	for (ring = 0; ring < priv->rx_ring_num; ring++) {
		if (mlx4_en_is_ring_empty(priv->rx_ring[ring]))
			napi_reschedule(&priv->rx_cq[ring]->napi);
	}
}

514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531
/* When the rx ring is running in page-per-packet mode, a released frame can go
 * directly into a small cache, to avoid unmapping or touching the page
 * allocator. In bpf prog performance scenarios, buffers are either forwarded
 * or dropped, never converted to skbs, so every page can come directly from
 * this cache when it is sized to be a multiple of the napi budget.
 */
bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
			struct mlx4_en_rx_alloc *frame)
{
	struct mlx4_en_page_cache *cache = &ring->page_cache;

	if (cache->index >= MLX4_EN_CACHE_SIZE)
		return false;

	cache->buf[cache->index++] = *frame;
	return true;
}

532
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
533 534
			     struct mlx4_en_rx_ring **pring,
			     u32 size, u16 stride)
535 536
{
	struct mlx4_en_dev *mdev = priv->mdev;
537
	struct mlx4_en_rx_ring *ring = *pring;
538

539 540
	if (ring->xdp_prog)
		bpf_prog_put(ring->xdp_prog);
541
	mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
542 543
	vfree(ring->rx_info);
	ring->rx_info = NULL;
544 545
	kfree(ring);
	*pring = NULL;
546
#ifdef CONFIG_RFS_ACCEL
547
	mlx4_en_cleanup_filters(priv);
548
#endif
549 550 551 552 553
}

void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
				struct mlx4_en_rx_ring *ring)
{
554 555 556 557 558 559 560 561 562 563
	int i;

	for (i = 0; i < ring->page_cache.index; i++) {
		struct mlx4_en_rx_alloc *frame = &ring->page_cache.buf[i];

		dma_unmap_page(priv->ddev, frame->dma, frame->page_size,
			       priv->frag_info[0].dma_dir);
		put_page(frame->page);
	}
	ring->page_cache.index = 0;
564
	mlx4_en_free_rx_buf(priv, ring);
565 566
	if (ring->stride <= TXBB_SIZE)
		ring->buf -= TXBB_SIZE;
567 568 569 570 571 572
	mlx4_en_destroy_allocator(priv, ring);
}


static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
				    struct mlx4_en_rx_desc *rx_desc,
573
				    struct mlx4_en_rx_alloc *frags,
574
				    struct sk_buff *skb,
575 576
				    int length)
{
577
	struct skb_frag_struct *skb_frags_rx = skb_shinfo(skb)->frags;
578 579 580 581
	struct mlx4_en_frag_info *frag_info;
	int nr;
	dma_addr_t dma;

582
	/* Collect used fragments while replacing them in the HW descriptors */
583 584 585 586
	for (nr = 0; nr < priv->num_frags; nr++) {
		frag_info = &priv->frag_info[nr];
		if (length <= frag_info->frag_prefix_size)
			break;
587 588
		if (!frags[nr].page)
			goto fail;
589 590

		dma = be64_to_cpu(rx_desc->data[nr].addr);
591 592
		dma_sync_single_for_cpu(priv->ddev, dma, frag_info->frag_size,
					DMA_FROM_DEVICE);
593

594 595 596
		/* Save page reference in skb */
		__skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page);
		skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size);
597
		skb_frags_rx[nr].page_offset = frags[nr].page_offset;
598
		skb->truesize += frag_info->frag_stride;
599
		frags[nr].page = NULL;
600 601
	}
	/* Adjust size of last fragment to match actual length */
602
	if (nr > 0)
E
Eric Dumazet 已提交
603 604
		skb_frag_size_set(&skb_frags_rx[nr - 1],
			length - priv->frag_info[nr - 1].frag_prefix_size);
605 606 607 608 609
	return nr;

fail:
	while (nr > 0) {
		nr--;
610
		__skb_frag_unref(&skb_frags_rx[nr]);
611 612 613 614 615 616 617
	}
	return 0;
}


static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
				      struct mlx4_en_rx_desc *rx_desc,
618
				      struct mlx4_en_rx_alloc *frags,
619 620 621 622 623 624 625
				      unsigned int length)
{
	struct sk_buff *skb;
	void *va;
	int used_frags;
	dma_addr_t dma;

626
	skb = netdev_alloc_skb(priv->dev, SMALL_PACKET_SIZE + NET_IP_ALIGN);
627
	if (!skb) {
628
		en_dbg(RX_ERR, priv, "Failed allocating skb\n");
629 630 631 632 633 634 635
		return NULL;
	}
	skb_reserve(skb, NET_IP_ALIGN);
	skb->len = length;

	/* Get pointer to first fragment so we could copy the headers into the
	 * (linear part of the) skb */
636
	va = page_address(frags[0].page) + frags[0].page_offset;
637 638 639

	if (length <= SMALL_PACKET_SIZE) {
		/* We are copying all relevant data to the skb - temporarily
640
		 * sync buffers for the copy */
641
		dma = be64_to_cpu(rx_desc->data[0].addr);
642
		dma_sync_single_for_cpu(priv->ddev, dma, length,
643
					DMA_FROM_DEVICE);
644 645 646
		skb_copy_to_linear_data(skb, va, length);
		skb->tail += length;
	} else {
647 648
		unsigned int pull_len;

649
		/* Move relevant fragments to skb */
650 651
		used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, frags,
							skb, length);
652 653 654 655
		if (unlikely(!used_frags)) {
			kfree_skb(skb);
			return NULL;
		}
656 657
		skb_shinfo(skb)->nr_frags = used_frags;

658
		pull_len = eth_get_headlen(va, SMALL_PACKET_SIZE);
659
		/* Copy headers into the skb linear buffer */
660 661
		memcpy(skb->data, va, pull_len);
		skb->tail += pull_len;
662 663

		/* Skip headers in first fragment */
664
		skb_shinfo(skb)->frags[0].page_offset += pull_len;
665 666

		/* Adjust size of first fragment */
667 668
		skb_frag_size_sub(&skb_shinfo(skb)->frags[0], pull_len);
		skb->data_len = length - pull_len;
669 670 671 672
	}
	return skb;
}

673 674 675 676 677 678 679 680 681 682 683 684 685 686 687
static void validate_loopback(struct mlx4_en_priv *priv, struct sk_buff *skb)
{
	int i;
	int offset = ETH_HLEN;

	for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) {
		if (*(skb->data + offset) != (unsigned char) (i & 0xff))
			goto out_loopback;
	}
	/* Loopback found */
	priv->loopback_ok = 1;

out_loopback:
	dev_kfree_skb_any(skb);
}
688

689 690 691 692 693 694
static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
				     struct mlx4_en_rx_ring *ring)
{
	int index = ring->prod & ring->size_mask;

	while ((u32) (ring->prod - ring->cons) < ring->actual_size) {
695 696
		if (mlx4_en_prepare_rx_desc(priv, ring, index,
					    GFP_ATOMIC | __GFP_COLD))
697 698 699 700 701 702
			break;
		ring->prod++;
		index = ring->prod & ring->size_mask;
	}
}

703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739
/* When hardware doesn't strip the vlan, we need to calculate the checksum
 * over it and add it to the hardware's checksum calculation
 */
static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum,
					 struct vlan_hdr *vlanh)
{
	return csum_add(hw_checksum, *(__wsum *)vlanh);
}

/* Although the stack expects checksum which doesn't include the pseudo
 * header, the HW adds it. To address that, we are subtracting the pseudo
 * header checksum from the checksum value provided by the HW.
 */
static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
				struct iphdr *iph)
{
	__u16 length_for_csum = 0;
	__wsum csum_pseudo_header = 0;

	length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2));
	csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr,
						length_for_csum, iph->protocol, 0);
	skb->csum = csum_sub(hw_checksum, csum_pseudo_header);
}

#if IS_ENABLED(CONFIG_IPV6)
/* In IPv6 packets, besides subtracting the pseudo header checksum,
 * we also compute/add the IP header checksum which
 * is not added by the HW.
 */
static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
			       struct ipv6hdr *ipv6h)
{
	__wsum csum_pseudo_hdr = 0;

	if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS)
		return -1;
740
	hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr));
741 742 743 744 745 746 747 748 749 750 751 752

	csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
				       sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
	csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len);
	csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ntohs(ipv6h->nexthdr));

	skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr);
	skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0));
	return 0;
}
#endif
static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
753
		      netdev_features_t dev_features)
754 755 756 757 758 759 760
{
	__wsum hw_checksum = 0;

	void *hdr = (u8 *)va + sizeof(struct ethhdr);

	hw_checksum = csum_unfold((__force __sum16)cqe->checksum);

761
	if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
762
	    !(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) {
763 764 765 766 767 768 769 770 771 772 773 774 775 776
		hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr);
		hdr += sizeof(struct vlan_hdr);
	}

	if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4))
		get_fixed_ipv4_csum(hw_checksum, skb, hdr);
#if IS_ENABLED(CONFIG_IPV6)
	else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
		if (get_fixed_ipv6_csum(hw_checksum, skb, hdr))
			return -1;
#endif
	return 0;
}

777 778 779
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
780
	struct mlx4_en_dev *mdev = priv->mdev;
781
	struct mlx4_cqe *cqe;
782
	struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
783
	struct mlx4_en_rx_alloc *frags;
784
	struct mlx4_en_rx_desc *rx_desc;
785
	struct bpf_prog *xdp_prog;
786 787 788 789 790 791
	struct sk_buff *skb;
	int index;
	int nr;
	unsigned int length;
	int polled = 0;
	int ip_summed;
O
Or Gerlitz 已提交
792
	int factor = priv->cqe_factor;
793
	u64 timestamp;
794
	bool l2_tunnel;
795 796 797 798

	if (!priv->port_up)
		return 0;

799 800 801
	if (budget <= 0)
		return polled;

802 803
	xdp_prog = READ_ONCE(ring->xdp_prog);

804 805 806 807
	/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
	 * descriptor offset can be deduced from the CQE index instead of
	 * reading 'cqe->index' */
	index = cq->mcq.cons_index & ring->size_mask;
808
	cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor;
809 810 811 812 813

	/* Process all completed CQEs */
	while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
		    cq->mcq.cons_index & cq->size)) {

814
		frags = ring->rx_info + (index << priv->log_rx_info);
815 816 817 818 819
		rx_desc = ring->buf + (index << ring->log_stride);

		/*
		 * make sure we read the CQE after we read the ownership bit
		 */
820
		dma_rmb();
821 822 823 824

		/* Drop packet on bad receive or bad checksum */
		if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
						MLX4_CQE_OPCODE_ERROR)) {
J
Joe Perches 已提交
825 826 827
			en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n",
			       ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome,
			       ((struct mlx4_err_cqe *)cqe)->syndrome);
828 829 830
			goto next;
		}
		if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
831
			en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
832 833 834
			goto next;
		}

835 836 837 838 839 840 841 842 843 844 845 846 847
		/* Check if we need to drop the packet if SRIOV is not enabled
		 * and not performing the selftest or flb disabled
		 */
		if (priv->flags & MLX4_EN_FLAG_RX_FILTER_NEEDED) {
			struct ethhdr *ethh;
			dma_addr_t dma;
			/* Get pointer to first fragment since we haven't
			 * skb yet and cast it to ethhdr struct
			 */
			dma = be64_to_cpu(rx_desc->data[0].addr);
			dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
						DMA_FROM_DEVICE);
			ethh = (struct ethhdr *)(page_address(frags[0].page) +
848
						 frags[0].page_offset);
849

850 851 852 853 854 855 856 857 858
			if (is_multicast_ether_addr(ethh->h_dest)) {
				struct mlx4_mac_entry *entry;
				struct hlist_head *bucket;
				unsigned int mac_hash;

				/* Drop the packet, since HW loopback-ed it */
				mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX];
				bucket = &priv->mac_hash[mac_hash];
				rcu_read_lock();
859
				hlist_for_each_entry_rcu(entry, bucket, hlist) {
860 861 862 863 864 865 866 867
					if (ether_addr_equal_64bits(entry->mac,
								    ethh->h_source)) {
						rcu_read_unlock();
						goto next;
					}
				}
				rcu_read_unlock();
			}
868
		}
869

870 871 872 873
		/*
		 * Packet is OK - process it.
		 */
		length = be32_to_cpu(cqe->byte_cnt);
874
		length -= ring->fcs_del;
875 876
		ring->bytes += length;
		ring->packets++;
877 878
		l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
			(cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
879

880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904
		/* A bpf program gets first chance to drop the packet. It may
		 * read bytes but not past the end of the frag.
		 */
		if (xdp_prog) {
			struct xdp_buff xdp;
			dma_addr_t dma;
			u32 act;

			dma = be64_to_cpu(rx_desc->data[0].addr);
			dma_sync_single_for_cpu(priv->ddev, dma,
						priv->frag_info[0].frag_size,
						DMA_FROM_DEVICE);

			xdp.data = page_address(frags[0].page) +
							frags[0].page_offset;
			xdp.data_end = xdp.data + length;

			act = bpf_prog_run_xdp(xdp_prog, &xdp);
			switch (act) {
			case XDP_PASS:
				break;
			default:
				bpf_warn_invalid_xdp_action(act);
			case XDP_ABORTED:
			case XDP_DROP:
905 906
				if (mlx4_en_rx_recycle(ring, frags))
					goto consumed;
907 908 909 910
				goto next;
			}
		}

911
		if (likely(dev->features & NETIF_F_RXCSUM)) {
912 913 914 915 916 917 918 919 920 921
			if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
						      MLX4_CQE_STATUS_UDP)) {
				if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
				    cqe->checksum == cpu_to_be16(0xffff)) {
					ip_summed = CHECKSUM_UNNECESSARY;
					ring->csum_ok++;
				} else {
					ip_summed = CHECKSUM_NONE;
					ring->csum_none++;
				}
922
			} else {
923 924 925 926 927 928 929 930 931
				if (priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP &&
				    (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
							       MLX4_CQE_STATUS_IPV6))) {
					ip_summed = CHECKSUM_COMPLETE;
					ring->csum_complete++;
				} else {
					ip_summed = CHECKSUM_NONE;
					ring->csum_none++;
				}
932 933 934
			}
		} else {
			ip_summed = CHECKSUM_NONE;
935
			ring->csum_none++;
936 937
		}

938 939 940 941 942 943
		/* This packet is eligible for GRO if it is:
		 * - DIX Ethernet (type interpretation)
		 * - TCP/IP (v4)
		 * - without IP options
		 * - not an IP fragment
		 */
944
		if (dev->features & NETIF_F_GRO) {
945 946 947 948 949 950 951 952 953 954
			struct sk_buff *gro_skb = napi_get_frags(&cq->napi);
			if (!gro_skb)
				goto next;

			nr = mlx4_en_complete_rx_desc(priv,
				rx_desc, frags, gro_skb,
				length);
			if (!nr)
				goto next;

955 956
			if (ip_summed == CHECKSUM_COMPLETE) {
				void *va = skb_frag_address(skb_shinfo(gro_skb)->frags);
957 958
				if (check_csum(cqe, gro_skb, va,
					       dev->features)) {
959 960 961 962 963 964
					ip_summed = CHECKSUM_NONE;
					ring->csum_none++;
					ring->csum_complete--;
				}
			}

965 966 967 968 969 970
			skb_shinfo(gro_skb)->nr_frags = nr;
			gro_skb->len = length;
			gro_skb->data_len = length;
			gro_skb->ip_summed = ip_summed;

			if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY)
971 972
				gro_skb->csum_level = 1;

973
			if ((cqe->vlan_my_qpn &
974
			    cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) &&
975 976 977 978
			    (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
				u16 vid = be16_to_cpu(cqe->sl_vid);

				__vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid);
979 980 981 982 983 984
			} else if ((be32_to_cpu(cqe->vlan_my_qpn) &
				  MLX4_CQE_SVLAN_PRESENT_MASK) &&
				 (dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
				__vlan_hwaccel_put_tag(gro_skb,
						       htons(ETH_P_8021AD),
						       be16_to_cpu(cqe->sl_vid));
985 986 987 988 989
			}

			if (dev->features & NETIF_F_RXHASH)
				skb_set_hash(gro_skb,
					     be32_to_cpu(cqe->immed_rss_invalid),
990 991 992
					     (ip_summed == CHECKSUM_UNNECESSARY) ?
						PKT_HASH_TYPE_L4 :
						PKT_HASH_TYPE_L3);
993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007

			skb_record_rx_queue(gro_skb, cq->ring);

			if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
				timestamp = mlx4_en_get_cqe_ts(cqe);
				mlx4_en_fill_hwtstamps(mdev,
						       skb_hwtstamps(gro_skb),
						       timestamp);
			}

			napi_gro_frags(&cq->napi);
			goto next;
		}

		/* GRO not possible, complete processing here */
1008
		skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
1009
		if (!skb) {
1010
			ring->dropped++;
1011 1012 1013
			goto next;
		}

1014 1015 1016 1017 1018
                if (unlikely(priv->validate_loopback)) {
			validate_loopback(priv, skb);
			goto next;
		}

1019
		if (ip_summed == CHECKSUM_COMPLETE) {
1020
			if (check_csum(cqe, skb, skb->data, dev->features)) {
1021 1022 1023 1024 1025 1026
				ip_summed = CHECKSUM_NONE;
				ring->csum_complete--;
				ring->csum_none++;
			}
		}

1027 1028
		skb->ip_summed = ip_summed;
		skb->protocol = eth_type_trans(skb, dev);
1029
		skb_record_rx_queue(skb, cq->ring);
1030

1031 1032
		if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY)
			skb->csum_level = 1;
1033

Y
Yevgeny Petrilin 已提交
1034
		if (dev->features & NETIF_F_RXHASH)
T
Tom Herbert 已提交
1035 1036
			skb_set_hash(skb,
				     be32_to_cpu(cqe->immed_rss_invalid),
1037 1038 1039
				     (ip_summed == CHECKSUM_UNNECESSARY) ?
					PKT_HASH_TYPE_L4 :
					PKT_HASH_TYPE_L3);
Y
Yevgeny Petrilin 已提交
1040

1041
		if ((be32_to_cpu(cqe->vlan_my_qpn) &
1042
		    MLX4_CQE_CVLAN_PRESENT_MASK) &&
1043
		    (dev->features & NETIF_F_HW_VLAN_CTAG_RX))
1044
			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid));
1045 1046 1047 1048 1049
		else if ((be32_to_cpu(cqe->vlan_my_qpn) &
			  MLX4_CQE_SVLAN_PRESENT_MASK) &&
			 (dev->features & NETIF_F_HW_VLAN_STAG_RX))
			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD),
					       be16_to_cpu(cqe->sl_vid));
J
Jiri Pirko 已提交
1050

1051 1052 1053 1054 1055 1056
		if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
			timestamp = mlx4_en_get_cqe_ts(cqe);
			mlx4_en_fill_hwtstamps(mdev, skb_hwtstamps(skb),
					       timestamp);
		}

1057
		napi_gro_receive(&cq->napi, skb);
1058
next:
1059 1060 1061
		for (nr = 0; nr < priv->num_frags; nr++)
			mlx4_en_free_frag(priv, frags, nr);

1062
consumed:
1063 1064
		++cq->mcq.cons_index;
		index = (cq->mcq.cons_index) & ring->size_mask;
1065
		cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor;
1066
		if (++polled == budget)
1067 1068 1069 1070 1071 1072 1073 1074
			goto out;
	}

out:
	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
	mlx4_cq_set_ci(&cq->mcq);
	wmb(); /* ensure HW sees CQ consumer before we post new buffers */
	ring->cons = cq->mcq.cons_index;
1075
	mlx4_en_refill_rx_buffers(priv, ring);
1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
	mlx4_en_update_rx_prod_db(ring);
	return polled;
}


void mlx4_en_rx_irq(struct mlx4_cq *mcq)
{
	struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
	struct mlx4_en_priv *priv = netdev_priv(cq->dev);

E
Eric Dumazet 已提交
1086 1087
	if (likely(priv->port_up))
		napi_schedule_irqoff(&cq->napi);
1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102
	else
		mlx4_en_arm_cq(priv, cq);
}

/* Rx CQ polling - called by NAPI */
int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
{
	struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
	struct net_device *dev = cq->dev;
	struct mlx4_en_priv *priv = netdev_priv(dev);
	int done;

	done = mlx4_en_process_rx_cq(dev, cq, budget);

	/* If we used up all the quota - we're probably not done yet... */
1103
	if (done == budget) {
1104
		const struct cpumask *aff;
1105 1106
		struct irq_data *idata;
		int cpu_curr;
1107

1108
		INC_PERF_COUNTER(priv->pstats.napi_quota);
1109 1110

		cpu_curr = smp_processor_id();
1111 1112
		idata = irq_desc_get_irq_data(cq->irq_desc);
		aff = irq_data_get_affinity_mask(idata);
1113

1114 1115 1116 1117 1118 1119 1120 1121
		if (likely(cpumask_test_cpu(cpu_curr, aff)))
			return budget;

		/* Current cpu is not according to smp_irq_affinity -
		 * probably affinity changed. need to stop this NAPI
		 * poll, and restart it on the right CPU
		 */
		done = 0;
1122
	}
E
Eric Dumazet 已提交
1123 1124 1125
	/* Done for now */
	napi_complete_done(napi, done);
	mlx4_en_arm_cq(priv, cq);
1126 1127 1128
	return done;
}

1129
static const int frag_sizes[] = {
1130 1131 1132 1133 1134 1135 1136 1137
	FRAG_SZ0,
	FRAG_SZ1,
	FRAG_SZ2,
	FRAG_SZ3
};

void mlx4_en_calc_rx_buf(struct net_device *dev)
{
1138
	enum dma_data_direction dma_dir = PCI_DMA_FROMDEVICE;
1139
	struct mlx4_en_priv *priv = netdev_priv(dev);
1140
	int eff_mtu = MLX4_EN_EFF_MTU(dev->mtu);
1141 1142
	int order = MLX4_EN_ALLOC_PREFER_ORDER;
	u32 align = SMP_CACHE_BYTES;
1143 1144 1145
	int buf_size = 0;
	int i = 0;

1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156
	/* bpf requires buffers to be set up as 1 packet per page.
	 * This only works when num_frags == 1.
	 */
	if (priv->xdp_ring_num) {
		/* This will gain efficient xdp frame recycling at the expense
		 * of more costly truesize accounting
		 */
		align = PAGE_SIZE;
		order = 0;
	}

1157
	while (buf_size < eff_mtu) {
1158
		priv->frag_info[i].order = order;
1159 1160 1161 1162
		priv->frag_info[i].frag_size =
			(eff_mtu > buf_size + frag_sizes[i]) ?
				frag_sizes[i] : eff_mtu - buf_size;
		priv->frag_info[i].frag_prefix_size = buf_size;
1163
		priv->frag_info[i].frag_stride =
1164 1165
				ALIGN(priv->frag_info[i].frag_size, align);
		priv->frag_info[i].dma_dir = dma_dir;
1166 1167 1168 1169 1170 1171
		buf_size += priv->frag_info[i].frag_size;
		i++;
	}

	priv->num_frags = i;
	priv->rx_skb_size = eff_mtu;
1172
	priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc));
1173

J
Joe Perches 已提交
1174 1175
	en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n",
	       eff_mtu, priv->num_frags);
1176
	for (i = 0; i < priv->num_frags; i++) {
1177
		en_err(priv,
1178
		       "  frag:%d - size:%d prefix:%d stride:%d\n",
1179 1180 1181 1182
		       i,
		       priv->frag_info[i].frag_size,
		       priv->frag_info[i].frag_prefix_size,
		       priv->frag_info[i].frag_stride);
1183 1184 1185 1186 1187
	}
}

/* RSS related functions */

1188 1189
static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
				 struct mlx4_en_rx_ring *ring,
1190 1191 1192 1193 1194 1195 1196
				 enum mlx4_qp_state *state,
				 struct mlx4_qp *qp)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	struct mlx4_qp_context *context;
	int err = 0;

1197 1198
	context = kmalloc(sizeof(*context), GFP_KERNEL);
	if (!context)
1199 1200
		return -ENOMEM;

1201
	err = mlx4_qp_alloc(mdev->dev, qpn, qp, GFP_KERNEL);
1202
	if (err) {
1203
		en_err(priv, "Failed to allocate qp #%x\n", qpn);
1204 1205 1206 1207 1208
		goto out;
	}
	qp->event = mlx4_en_sqp_event;

	memset(context, 0, sizeof *context);
1209
	mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
1210
				qpn, ring->cqn, -1, context);
1211
	context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
1212

1213
	/* Cancel FCS removal if FW allows */
1214
	if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) {
1215
		context->param3 |= cpu_to_be32(1 << 29);
1216 1217 1218 1219
		if (priv->dev->features & NETIF_F_RXFCS)
			ring->fcs_del = 0;
		else
			ring->fcs_del = ETH_FCS_LEN;
1220 1221
	} else
		ring->fcs_del = 0;
1222

1223
	err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state);
1224 1225 1226 1227
	if (err) {
		mlx4_qp_remove(mdev->dev, qp);
		mlx4_qp_free(mdev->dev, qp);
	}
1228
	mlx4_en_update_rx_prod_db(ring);
1229 1230 1231 1232 1233
out:
	kfree(context);
	return err;
}

1234 1235 1236 1237 1238
int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv)
{
	int err;
	u32 qpn;

M
Matan Barak 已提交
1239 1240
	err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn,
				    MLX4_RESERVE_A0_QP);
1241 1242 1243 1244
	if (err) {
		en_err(priv, "Failed reserving drop qpn\n");
		return err;
	}
1245
	err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp, GFP_KERNEL);
1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264
	if (err) {
		en_err(priv, "Failed allocating drop qp\n");
		mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
		return err;
	}

	return 0;
}

void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv)
{
	u32 qpn;

	qpn = priv->drop_qp.qpn;
	mlx4_qp_remove(priv->mdev->dev, &priv->drop_qp);
	mlx4_qp_free(priv->mdev->dev, &priv->drop_qp);
	mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
}

1265 1266 1267 1268 1269 1270
/* Allocate rx qp's and configure them according to rss map */
int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	struct mlx4_en_rss_map *rss_map = &priv->rss_map;
	struct mlx4_qp_context context;
1271
	struct mlx4_rss_context *rss_context;
1272
	int rss_rings;
1273
	void *ptr;
1274
	u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 |
1275
			MLX4_RSS_TCP_IPV6);
1276
	int i, qpn;
1277 1278 1279
	int err = 0;
	int good_qps = 0;

1280
	en_dbg(DRV, priv, "Configuring rss steering\n");
1281 1282
	err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num,
				    priv->rx_ring_num,
1283
				    &rss_map->base_qpn, 0);
1284
	if (err) {
1285
		en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num);
1286 1287 1288
		return err;
	}

1289
	for (i = 0; i < priv->rx_ring_num; i++) {
1290
		qpn = rss_map->base_qpn + i;
1291
		err = mlx4_en_config_rss_qp(priv, qpn, priv->rx_ring[i],
1292 1293 1294 1295 1296 1297 1298 1299 1300
					    &rss_map->state[i],
					    &rss_map->qps[i]);
		if (err)
			goto rss_err;

		++good_qps;
	}

	/* Configure RSS indirection qp */
1301
	err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp, GFP_KERNEL);
1302
	if (err) {
1303
		en_err(priv, "Failed to allocate RSS indirection QP\n");
1304
		goto rss_err;
1305 1306 1307
	}
	rss_map->indir_qp.event = mlx4_en_sqp_event;
	mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
1308
				priv->rx_ring[0]->cqn, -1, &context);
1309

1310 1311 1312 1313 1314
	if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num)
		rss_rings = priv->rx_ring_num;
	else
		rss_rings = priv->prof->rss_rings;

1315 1316
	ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path)
					+ MLX4_RSS_OFFSET_IN_QPC_PRI_PATH;
1317
	rss_context = ptr;
1318
	rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 |
1319
					    (rss_map->base_qpn));
1320
	rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
1321 1322 1323 1324
	if (priv->mdev->profile.udp_rss) {
		rss_mask |=  MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6;
		rss_context->base_qpn_udp = rss_context->default_qpn;
	}
1325 1326 1327 1328 1329 1330

	if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
		en_info(priv, "Setting RSS context tunnel type to RSS on inner headers\n");
		rss_mask |= MLX4_RSS_BY_INNER_HEADERS;
	}

Y
Yevgeny Petrilin 已提交
1331
	rss_context->flags = rss_mask;
1332
	rss_context->hash_fn = MLX4_RSS_HASH_TOP;
1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343
	if (priv->rss_hash_fn == ETH_RSS_HASH_XOR) {
		rss_context->hash_fn = MLX4_RSS_HASH_XOR;
	} else if (priv->rss_hash_fn == ETH_RSS_HASH_TOP) {
		rss_context->hash_fn = MLX4_RSS_HASH_TOP;
		memcpy(rss_context->rss_key, priv->rss_key,
		       MLX4_EN_RSS_KEY_SIZE);
	} else {
		en_err(priv, "Unknown RSS hash function requested\n");
		err = -EINVAL;
		goto indir_err;
	}
1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362
	err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
			       &rss_map->indir_qp, &rss_map->indir_state);
	if (err)
		goto indir_err;

	return 0;

indir_err:
	mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
		       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
	mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
	mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
rss_err:
	for (i = 0; i < good_qps; i++) {
		mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
			       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
		mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
		mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
	}
1363
	mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377
	return err;
}

void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	struct mlx4_en_rss_map *rss_map = &priv->rss_map;
	int i;

	mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
		       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
	mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
	mlx4_qp_free(mdev->dev, &rss_map->indir_qp);

1378
	for (i = 0; i < priv->rx_ring_num; i++) {
1379 1380 1381 1382 1383
		mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
			       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
		mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
		mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
	}
1384
	mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
1385
}