en_rx.c 30.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
/*
 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 *
 */

34
#include <net/busy_poll.h>
35
#include <linux/mlx4/cq.h>
36
#include <linux/slab.h>
37 38
#include <linux/mlx4/qp.h>
#include <linux/skbuff.h>
39
#include <linux/rculist.h>
40 41 42 43 44 45
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>

#include "mlx4_en.h"

46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
			    struct mlx4_en_rx_alloc *page_alloc,
			    const struct mlx4_en_frag_info *frag_info,
			    gfp_t _gfp)
{
	int order;
	struct page *page;
	dma_addr_t dma;

	for (order = MLX4_EN_ALLOC_PREFER_ORDER; ;) {
		gfp_t gfp = _gfp;

		if (order)
			gfp |= __GFP_COMP | __GFP_NOWARN;
		page = alloc_pages(gfp, order);
		if (likely(page))
			break;
		if (--order < 0 ||
		    ((PAGE_SIZE << order) < frag_info->frag_size))
			return -ENOMEM;
	}
	dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE << order,
			   PCI_DMA_FROMDEVICE);
	if (dma_mapping_error(priv->ddev, dma)) {
		put_page(page);
		return -ENOMEM;
	}
73
	page_alloc->page_size = PAGE_SIZE << order;
74 75
	page_alloc->page = page;
	page_alloc->dma = dma;
76
	page_alloc->page_offset = frag_info->frag_align;
77 78 79
	/* Not doing get_page() for each frag is a big win
	 * on asymetric workloads.
	 */
80 81
	atomic_set(&page->_count,
		   page_alloc->page_size / frag_info->frag_stride);
82 83 84
	return 0;
}

85 86 87
static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
			       struct mlx4_en_rx_desc *rx_desc,
			       struct mlx4_en_rx_alloc *frags,
88 89
			       struct mlx4_en_rx_alloc *ring_alloc,
			       gfp_t gfp)
90
{
91
	struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
92
	const struct mlx4_en_frag_info *frag_info;
93 94
	struct page *page;
	dma_addr_t dma;
95
	int i;
96

97 98
	for (i = 0; i < priv->num_frags; i++) {
		frag_info = &priv->frag_info[i];
99
		page_alloc[i] = ring_alloc[i];
100 101 102 103
		page_alloc[i].page_offset += frag_info->frag_stride;

		if (page_alloc[i].page_offset + frag_info->frag_stride <=
		    ring_alloc[i].page_size)
104
			continue;
105

106 107
		if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp))
			goto out;
108
	}
109

110 111
	for (i = 0; i < priv->num_frags; i++) {
		frags[i] = ring_alloc[i];
112
		dma = ring_alloc[i].dma + ring_alloc[i].page_offset;
113 114
		ring_alloc[i] = page_alloc[i];
		rx_desc->data[i].addr = cpu_to_be64(dma);
115
	}
116

117
	return 0;
118 119 120 121

out:
	while (i--) {
		frag_info = &priv->frag_info[i];
122
		if (page_alloc[i].page != ring_alloc[i].page) {
123
			dma_unmap_page(priv->ddev, page_alloc[i].dma,
124
				page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
125 126 127 128
			page = page_alloc[i].page;
			atomic_set(&page->_count, 1);
			put_page(page);
		}
129 130 131 132 133 134 135 136
	}
	return -ENOMEM;
}

static void mlx4_en_free_frag(struct mlx4_en_priv *priv,
			      struct mlx4_en_rx_alloc *frags,
			      int i)
{
137
	const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
138
	u32 next_frag_end = frags[i].page_offset + 2 * frag_info->frag_stride;
139

140 141

	if (next_frag_end > frags[i].page_size)
142 143
		dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size,
			       PCI_DMA_FROMDEVICE);
144

145 146
	if (frags[i].page)
		put_page(frags[i].page);
147 148 149 150 151 152
}

static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
				  struct mlx4_en_rx_ring *ring)
{
	int i;
153
	struct mlx4_en_rx_alloc *page_alloc;
154 155

	for (i = 0; i < priv->num_frags; i++) {
156
		const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
157

158 159
		if (mlx4_alloc_pages(priv, &ring->page_alloc[i],
				     frag_info, GFP_KERNEL))
160
			goto out;
161 162 163 164 165
	}
	return 0;

out:
	while (i--) {
166 167
		struct page *page;

168
		page_alloc = &ring->page_alloc[i];
169
		dma_unmap_page(priv->ddev, page_alloc->dma,
170
			       page_alloc->page_size, PCI_DMA_FROMDEVICE);
171 172 173
		page = page_alloc->page;
		atomic_set(&page->_count, 1);
		put_page(page);
174 175 176 177 178 179 180 181 182 183 184 185
		page_alloc->page = NULL;
	}
	return -ENOMEM;
}

static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
				      struct mlx4_en_rx_ring *ring)
{
	struct mlx4_en_rx_alloc *page_alloc;
	int i;

	for (i = 0; i < priv->num_frags; i++) {
186 187
		const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];

188
		page_alloc = &ring->page_alloc[i];
189 190
		en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
		       i, page_count(page_alloc->page));
191

192
		dma_unmap_page(priv->ddev, page_alloc->dma,
193 194 195
				page_alloc->page_size, PCI_DMA_FROMDEVICE);
		while (page_alloc->page_offset + frag_info->frag_stride <
		       page_alloc->page_size) {
196
			put_page(page_alloc->page);
197
			page_alloc->page_offset += frag_info->frag_stride;
198
		}
199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
		page_alloc->page = NULL;
	}
}

static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
				 struct mlx4_en_rx_ring *ring, int index)
{
	struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
	int possible_frags;
	int i;

	/* Set size and memtype fields */
	for (i = 0; i < priv->num_frags; i++) {
		rx_desc->data[i].byte_count =
			cpu_to_be32(priv->frag_info[i].frag_size);
		rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key);
	}

	/* If the number of used fragments does not fill up the ring stride,
	 * remaining (unused) fragments must be padded with null address/size
	 * and a special memory key */
	possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE;
	for (i = priv->num_frags; i < possible_frags; i++) {
		rx_desc->data[i].byte_count = 0;
		rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD);
		rx_desc->data[i].addr = 0;
	}
}

static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
229 230
				   struct mlx4_en_rx_ring *ring, int index,
				   gfp_t gfp)
231 232
{
	struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
233 234
	struct mlx4_en_rx_alloc *frags = ring->rx_info +
					(index << priv->log_rx_info);
235

236
	return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp);
237 238 239 240 241 242 243
}

static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
{
	*ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
}

244 245 246 247
static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
				 struct mlx4_en_rx_ring *ring,
				 int index)
{
248
	struct mlx4_en_rx_alloc *frags;
249 250
	int nr;

251
	frags = ring->rx_info + (index << priv->log_rx_info);
252
	for (nr = 0; nr < priv->num_frags; nr++) {
253
		en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
254
		mlx4_en_free_frag(priv, frags, nr);
255 256 257
	}
}

258 259 260 261 262
static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
{
	struct mlx4_en_rx_ring *ring;
	int ring_ind;
	int buf_ind;
263
	int new_size;
264 265 266

	for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
		for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
267
			ring = priv->rx_ring[ring_ind];
268 269

			if (mlx4_en_prepare_rx_desc(priv, ring,
270 271
						    ring->actual_size,
						    GFP_KERNEL)) {
272
				if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
273 274
					en_err(priv, "Failed to allocate "
						     "enough rx buffers\n");
275 276
					return -ENOMEM;
				} else {
277
					new_size = rounddown_pow_of_two(ring->actual_size);
278 279 280
					en_warn(priv, "Only %d buffers allocated "
						      "reducing ring size to %d",
						ring->actual_size, new_size);
281
					goto reduce_rings;
282 283 284 285 286 287
				}
			}
			ring->actual_size++;
			ring->prod++;
		}
	}
288 289 290 291
	return 0;

reduce_rings:
	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
292
		ring = priv->rx_ring[ring_ind];
293 294 295 296 297 298 299
		while (ring->actual_size > new_size) {
			ring->actual_size--;
			ring->prod--;
			mlx4_en_free_rx_desc(priv, ring, ring->actual_size);
		}
	}

300 301 302 303 304 305 306 307
	return 0;
}

static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
				struct mlx4_en_rx_ring *ring)
{
	int index;

308 309
	en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
	       ring->cons, ring->prod);
310 311

	/* Unmap and free Rx buffers */
312
	BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
313 314
	while (ring->cons != ring->prod) {
		index = ring->cons & ring->size_mask;
315
		en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
316
		mlx4_en_free_rx_desc(priv, ring, index);
317 318 319 320
		++ring->cons;
	}
}

321 322 323 324
void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
{
	int i;
	int num_of_eqs;
325
	int num_rx_rings;
326 327 328 329 330 331 332 333 334 335 336 337 338
	struct mlx4_dev *dev = mdev->dev;

	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
		if (!dev->caps.comp_pool)
			num_of_eqs = max_t(int, MIN_RX_RINGS,
					   min_t(int,
						 dev->caps.num_comp_vectors,
						 DEF_RX_RINGS));
		else
			num_of_eqs = min_t(int, MAX_MSIX_P_PORT,
					   dev->caps.comp_pool/
					   dev->caps.num_ports) - 1;

339 340
		num_rx_rings = min_t(int, num_of_eqs,
				     netif_get_num_default_rss_queues());
341
		mdev->profile.prof[i].rx_ring_num =
342
			rounddown_pow_of_two(num_rx_rings);
343 344 345
	}
}

346
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
347
			   struct mlx4_en_rx_ring **pring,
348
			   u32 size, u16 stride, int node)
349 350
{
	struct mlx4_en_dev *mdev = priv->mdev;
351
	struct mlx4_en_rx_ring *ring;
352
	int err = -ENOMEM;
353 354
	int tmp;

355
	ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
356
	if (!ring) {
357 358 359 360 361
		ring = kzalloc(sizeof(*ring), GFP_KERNEL);
		if (!ring) {
			en_err(priv, "Failed to allocate RX ring structure\n");
			return -ENOMEM;
		}
362 363
	}

364 365 366 367 368 369
	ring->prod = 0;
	ring->cons = 0;
	ring->size = size;
	ring->size_mask = size - 1;
	ring->stride = stride;
	ring->log_stride = ffs(ring->stride) - 1;
370
	ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
371 372

	tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
373
					sizeof(struct mlx4_en_rx_alloc));
374
	ring->rx_info = vmalloc_node(tmp, node);
375
	if (!ring->rx_info) {
376 377 378 379 380
		ring->rx_info = vmalloc(tmp);
		if (!ring->rx_info) {
			err = -ENOMEM;
			goto err_ring;
		}
381
	}
382

383
	en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
384 385
		 ring->rx_info, tmp);

386 387
	/* Allocate HW buffers on provided NUMA node */
	set_dev_node(&mdev->dev->pdev->dev, node);
388 389
	err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
				 ring->buf_size, 2 * PAGE_SIZE);
390
	set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node);
391
	if (err)
392
		goto err_info;
393 394 395

	err = mlx4_en_map_buffer(&ring->wqres.buf);
	if (err) {
396
		en_err(priv, "Failed to map RX buffer\n");
397 398 399 400
		goto err_hwq;
	}
	ring->buf = ring->wqres.buf.direct.buf;

401 402
	ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter;

403
	*pring = ring;
404 405 406 407
	return 0;

err_hwq:
	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
408
err_info:
409 410
	vfree(ring->rx_info);
	ring->rx_info = NULL;
411 412 413 414
err_ring:
	kfree(ring);
	*pring = NULL;

415 416 417 418 419 420 421 422 423 424 425 426 427
	return err;
}

int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
{
	struct mlx4_en_rx_ring *ring;
	int i;
	int ring_ind;
	int err;
	int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
					DS_SIZE * priv->num_frags);

	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
428
		ring = priv->rx_ring[ring_ind];
429 430 431 432

		ring->prod = 0;
		ring->cons = 0;
		ring->actual_size = 0;
433
		ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
434 435

		ring->stride = stride;
436 437 438
		if (ring->stride <= TXBB_SIZE)
			ring->buf += TXBB_SIZE;

439 440 441 442 443 444
		ring->log_stride = ffs(ring->stride) - 1;
		ring->buf_size = ring->size * ring->stride;

		memset(ring->buf, 0, ring->buf_size);
		mlx4_en_update_rx_prod_db(ring);

445
		/* Initialize all descriptors */
446 447 448 449 450 451
		for (i = 0; i < ring->size; i++)
			mlx4_en_init_rx_desc(priv, ring, i);

		/* Initialize page allocators */
		err = mlx4_en_init_allocator(priv, ring);
		if (err) {
452
			en_err(priv, "Failed initializing ring allocator\n");
453 454
			if (ring->stride <= TXBB_SIZE)
				ring->buf -= TXBB_SIZE;
455 456
			ring_ind--;
			goto err_allocator;
457 458
		}
	}
459 460
	err = mlx4_en_fill_rx_buffers(priv);
	if (err)
461 462 463
		goto err_buffers;

	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
464
		ring = priv->rx_ring[ring_ind];
465

466
		ring->size_mask = ring->actual_size - 1;
467 468 469 470 471 472 473
		mlx4_en_update_rx_prod_db(ring);
	}

	return 0;

err_buffers:
	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
474
		mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]);
475 476 477 478

	ring_ind = priv->rx_ring_num - 1;
err_allocator:
	while (ring_ind >= 0) {
479 480 481
		if (priv->rx_ring[ring_ind]->stride <= TXBB_SIZE)
			priv->rx_ring[ring_ind]->buf -= TXBB_SIZE;
		mlx4_en_destroy_allocator(priv, priv->rx_ring[ring_ind]);
482 483 484 485 486 487
		ring_ind--;
	}
	return err;
}

void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
488 489
			     struct mlx4_en_rx_ring **pring,
			     u32 size, u16 stride)
490 491
{
	struct mlx4_en_dev *mdev = priv->mdev;
492
	struct mlx4_en_rx_ring *ring = *pring;
493 494

	mlx4_en_unmap_buffer(&ring->wqres.buf);
495
	mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
496 497
	vfree(ring->rx_info);
	ring->rx_info = NULL;
498 499
	kfree(ring);
	*pring = NULL;
500
#ifdef CONFIG_RFS_ACCEL
501
	mlx4_en_cleanup_filters(priv);
502
#endif
503 504 505 506 507 508
}

void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
				struct mlx4_en_rx_ring *ring)
{
	mlx4_en_free_rx_buf(priv, ring);
509 510
	if (ring->stride <= TXBB_SIZE)
		ring->buf -= TXBB_SIZE;
511 512 513 514 515 516
	mlx4_en_destroy_allocator(priv, ring);
}


static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
				    struct mlx4_en_rx_desc *rx_desc,
517
				    struct mlx4_en_rx_alloc *frags,
518
				    struct sk_buff *skb,
519 520
				    int length)
{
521
	struct skb_frag_struct *skb_frags_rx = skb_shinfo(skb)->frags;
522 523 524 525
	struct mlx4_en_frag_info *frag_info;
	int nr;
	dma_addr_t dma;

526
	/* Collect used fragments while replacing them in the HW descriptors */
527 528 529 530
	for (nr = 0; nr < priv->num_frags; nr++) {
		frag_info = &priv->frag_info[nr];
		if (length <= frag_info->frag_prefix_size)
			break;
531 532
		if (!frags[nr].page)
			goto fail;
533 534

		dma = be64_to_cpu(rx_desc->data[nr].addr);
535 536
		dma_sync_single_for_cpu(priv->ddev, dma, frag_info->frag_size,
					DMA_FROM_DEVICE);
537

538 539 540
		/* Save page reference in skb */
		__skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page);
		skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size);
541
		skb_frags_rx[nr].page_offset = frags[nr].page_offset;
542
		skb->truesize += frag_info->frag_stride;
543
		frags[nr].page = NULL;
544 545
	}
	/* Adjust size of last fragment to match actual length */
546
	if (nr > 0)
E
Eric Dumazet 已提交
547 548
		skb_frag_size_set(&skb_frags_rx[nr - 1],
			length - priv->frag_info[nr - 1].frag_prefix_size);
549 550 551 552 553
	return nr;

fail:
	while (nr > 0) {
		nr--;
554
		__skb_frag_unref(&skb_frags_rx[nr]);
555 556 557 558 559 560 561
	}
	return 0;
}


static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
				      struct mlx4_en_rx_desc *rx_desc,
562
				      struct mlx4_en_rx_alloc *frags,
563 564 565 566 567 568 569
				      unsigned int length)
{
	struct sk_buff *skb;
	void *va;
	int used_frags;
	dma_addr_t dma;

570
	skb = netdev_alloc_skb(priv->dev, SMALL_PACKET_SIZE + NET_IP_ALIGN);
571
	if (!skb) {
572
		en_dbg(RX_ERR, priv, "Failed allocating skb\n");
573 574 575 576 577 578 579
		return NULL;
	}
	skb_reserve(skb, NET_IP_ALIGN);
	skb->len = length;

	/* Get pointer to first fragment so we could copy the headers into the
	 * (linear part of the) skb */
580
	va = page_address(frags[0].page) + frags[0].page_offset;
581 582 583

	if (length <= SMALL_PACKET_SIZE) {
		/* We are copying all relevant data to the skb - temporarily
584
		 * sync buffers for the copy */
585
		dma = be64_to_cpu(rx_desc->data[0].addr);
586
		dma_sync_single_for_cpu(priv->ddev, dma, length,
587
					DMA_FROM_DEVICE);
588 589 590 591
		skb_copy_to_linear_data(skb, va, length);
		skb->tail += length;
	} else {
		/* Move relevant fragments to skb */
592 593
		used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, frags,
							skb, length);
594 595 596 597
		if (unlikely(!used_frags)) {
			kfree_skb(skb);
			return NULL;
		}
598 599 600 601 602 603 604 605 606 607
		skb_shinfo(skb)->nr_frags = used_frags;

		/* Copy headers into the skb linear buffer */
		memcpy(skb->data, va, HEADER_COPY_SIZE);
		skb->tail += HEADER_COPY_SIZE;

		/* Skip headers in first fragment */
		skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE;

		/* Adjust size of first fragment */
E
Eric Dumazet 已提交
608
		skb_frag_size_sub(&skb_shinfo(skb)->frags[0], HEADER_COPY_SIZE);
609 610 611 612 613
		skb->data_len = length - HEADER_COPY_SIZE;
	}
	return skb;
}

614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
static void validate_loopback(struct mlx4_en_priv *priv, struct sk_buff *skb)
{
	int i;
	int offset = ETH_HLEN;

	for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) {
		if (*(skb->data + offset) != (unsigned char) (i & 0xff))
			goto out_loopback;
	}
	/* Loopback found */
	priv->loopback_ok = 1;

out_loopback:
	dev_kfree_skb_any(skb);
}
629

630 631 632 633 634 635
static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
				     struct mlx4_en_rx_ring *ring)
{
	int index = ring->prod & ring->size_mask;

	while ((u32) (ring->prod - ring->cons) < ring->actual_size) {
636
		if (mlx4_en_prepare_rx_desc(priv, ring, index, GFP_ATOMIC))
637 638 639 640 641 642
			break;
		ring->prod++;
		index = ring->prod & ring->size_mask;
	}
}

643 644 645
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
646
	struct mlx4_en_dev *mdev = priv->mdev;
647
	struct mlx4_cqe *cqe;
648
	struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
649
	struct mlx4_en_rx_alloc *frags;
650 651 652 653 654 655 656
	struct mlx4_en_rx_desc *rx_desc;
	struct sk_buff *skb;
	int index;
	int nr;
	unsigned int length;
	int polled = 0;
	int ip_summed;
O
Or Gerlitz 已提交
657
	int factor = priv->cqe_factor;
658
	u64 timestamp;
659
	bool l2_tunnel;
660 661 662 663 664 665 666 667

	if (!priv->port_up)
		return 0;

	/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
	 * descriptor offset can be deduced from the CQE index instead of
	 * reading 'cqe->index' */
	index = cq->mcq.cons_index & ring->size_mask;
O
Or Gerlitz 已提交
668
	cqe = &cq->buf[(index << factor) + factor];
669 670 671 672 673

	/* Process all completed CQEs */
	while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
		    cq->mcq.cons_index & cq->size)) {

674
		frags = ring->rx_info + (index << priv->log_rx_info);
675 676 677 678 679 680 681 682 683 684
		rx_desc = ring->buf + (index << ring->log_stride);

		/*
		 * make sure we read the CQE after we read the ownership bit
		 */
		rmb();

		/* Drop packet on bad receive or bad checksum */
		if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
						MLX4_CQE_OPCODE_ERROR)) {
685
			en_err(priv, "CQE completed in error - vendor "
686 687 688 689 690 691
				  "syndrom:%d syndrom:%d\n",
				  ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
				  ((struct mlx4_err_cqe *) cqe)->syndrome);
			goto next;
		}
		if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
692
			en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
693 694 695
			goto next;
		}

696 697 698 699 700 701 702 703 704 705 706 707 708
		/* Check if we need to drop the packet if SRIOV is not enabled
		 * and not performing the selftest or flb disabled
		 */
		if (priv->flags & MLX4_EN_FLAG_RX_FILTER_NEEDED) {
			struct ethhdr *ethh;
			dma_addr_t dma;
			/* Get pointer to first fragment since we haven't
			 * skb yet and cast it to ethhdr struct
			 */
			dma = be64_to_cpu(rx_desc->data[0].addr);
			dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
						DMA_FROM_DEVICE);
			ethh = (struct ethhdr *)(page_address(frags[0].page) +
709
						 frags[0].page_offset);
710

711 712 713 714 715 716 717 718 719
			if (is_multicast_ether_addr(ethh->h_dest)) {
				struct mlx4_mac_entry *entry;
				struct hlist_head *bucket;
				unsigned int mac_hash;

				/* Drop the packet, since HW loopback-ed it */
				mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX];
				bucket = &priv->mac_hash[mac_hash];
				rcu_read_lock();
720
				hlist_for_each_entry_rcu(entry, bucket, hlist) {
721 722 723 724 725 726 727 728
					if (ether_addr_equal_64bits(entry->mac,
								    ethh->h_source)) {
						rcu_read_unlock();
						goto next;
					}
				}
				rcu_read_unlock();
			}
729
		}
730

731 732 733 734
		/*
		 * Packet is OK - process it.
		 */
		length = be32_to_cpu(cqe->byte_cnt);
735
		length -= ring->fcs_del;
736 737
		ring->bytes += length;
		ring->packets++;
738 739
		l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
			(cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
740

741
		if (likely(dev->features & NETIF_F_RXCSUM)) {
742 743
			if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
			    (cqe->checksum == cpu_to_be16(0xffff))) {
744
				ring->csum_ok++;
745
				/* This packet is eligible for GRO if it is:
746 747 748
				 * - DIX Ethernet (type interpretation)
				 * - TCP/IP (v4)
				 * - without IP options
749 750 751
				 * - not an IP fragment
				 * - no LLS polling in progress
				 */
752
				if (!mlx4_en_cq_busy_polling(cq) &&
753
				    (dev->features & NETIF_F_GRO)) {
754
					struct sk_buff *gro_skb = napi_get_frags(&cq->napi);
755 756
					if (!gro_skb)
						goto next;
757

758 759 760
					nr = mlx4_en_complete_rx_desc(priv,
						rx_desc, frags, gro_skb,
						length);
761 762 763
					if (!nr)
						goto next;

764 765 766 767 768
					skb_shinfo(gro_skb)->nr_frags = nr;
					gro_skb->len = length;
					gro_skb->data_len = length;
					gro_skb->ip_summed = CHECKSUM_UNNECESSARY;

769 770
					if (l2_tunnel)
						gro_skb->encapsulation = 1;
771 772 773
					if ((cqe->vlan_my_qpn &
					    cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) &&
					    (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
J
Jiri Pirko 已提交
774 775
						u16 vid = be16_to_cpu(cqe->sl_vid);

776
						__vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid);
J
Jiri Pirko 已提交
777 778
					}

Y
Yevgeny Petrilin 已提交
779
					if (dev->features & NETIF_F_RXHASH)
T
Tom Herbert 已提交
780 781 782
						skb_set_hash(gro_skb,
							     be32_to_cpu(cqe->immed_rss_invalid),
							     PKT_HASH_TYPE_L3);
Y
Yevgeny Petrilin 已提交
783

784
					skb_record_rx_queue(gro_skb, cq->ring);
785

786 787 788 789 790 791 792 793
					if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
						timestamp = mlx4_en_get_cqe_ts(cqe);
						mlx4_en_fill_hwtstamps(mdev,
								       skb_hwtstamps(gro_skb),
								       timestamp);
					}

					napi_gro_frags(&cq->napi);
794 795 796
					goto next;
				}

797
				/* GRO not possible, complete processing here */
798 799 800
				ip_summed = CHECKSUM_UNNECESSARY;
			} else {
				ip_summed = CHECKSUM_NONE;
801
				ring->csum_none++;
802 803 804
			}
		} else {
			ip_summed = CHECKSUM_NONE;
805
			ring->csum_none++;
806 807
		}

808
		skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
809 810 811 812 813
		if (!skb) {
			priv->stats.rx_dropped++;
			goto next;
		}

814 815 816 817 818
                if (unlikely(priv->validate_loopback)) {
			validate_loopback(priv, skb);
			goto next;
		}

819 820
		skb->ip_summed = ip_summed;
		skb->protocol = eth_type_trans(skb, dev);
821
		skb_record_rx_queue(skb, cq->ring);
822

823 824 825
		if (l2_tunnel)
			skb->encapsulation = 1;

Y
Yevgeny Petrilin 已提交
826
		if (dev->features & NETIF_F_RXHASH)
T
Tom Herbert 已提交
827 828 829
			skb_set_hash(skb,
				     be32_to_cpu(cqe->immed_rss_invalid),
				     PKT_HASH_TYPE_L3);
Y
Yevgeny Petrilin 已提交
830

831 832 833
		if ((be32_to_cpu(cqe->vlan_my_qpn) &
		    MLX4_CQE_VLAN_PRESENT_MASK) &&
		    (dev->features & NETIF_F_HW_VLAN_CTAG_RX))
834
			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid));
J
Jiri Pirko 已提交
835

836 837 838 839 840 841
		if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
			timestamp = mlx4_en_get_cqe_ts(cqe);
			mlx4_en_fill_hwtstamps(mdev, skb_hwtstamps(skb),
					       timestamp);
		}

842
		skb_mark_napi_id(skb, &cq->napi);
843

844 845 846 847
		if (!mlx4_en_cq_busy_polling(cq))
			napi_gro_receive(&cq->napi, skb);
		else
			netif_receive_skb(skb);
848 849

next:
850 851 852
		for (nr = 0; nr < priv->num_frags; nr++)
			mlx4_en_free_frag(priv, frags, nr);

853 854
		++cq->mcq.cons_index;
		index = (cq->mcq.cons_index) & ring->size_mask;
O
Or Gerlitz 已提交
855
		cqe = &cq->buf[(index << factor) + factor];
856
		if (++polled == budget)
857 858 859 860 861 862 863 864
			goto out;
	}

out:
	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
	mlx4_cq_set_ci(&cq->mcq);
	wmb(); /* ensure HW sees CQ consumer before we post new buffers */
	ring->cons = cq->mcq.cons_index;
865
	mlx4_en_refill_rx_buffers(priv, ring);
866 867 868 869 870 871 872 873 874 875 876
	mlx4_en_update_rx_prod_db(ring);
	return polled;
}


void mlx4_en_rx_irq(struct mlx4_cq *mcq)
{
	struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
	struct mlx4_en_priv *priv = netdev_priv(cq->dev);

	if (priv->port_up)
877
		napi_schedule(&cq->napi);
878 879 880 881 882 883 884 885 886 887 888 889
	else
		mlx4_en_arm_cq(priv, cq);
}

/* Rx CQ polling - called by NAPI */
int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
{
	struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
	struct net_device *dev = cq->dev;
	struct mlx4_en_priv *priv = netdev_priv(dev);
	int done;

890 891 892
	if (!mlx4_en_cq_lock_napi(cq))
		return budget;

893 894
	done = mlx4_en_process_rx_cq(dev, cq, budget);

895 896
	mlx4_en_cq_unlock_napi(cq);

897 898 899 900 901
	/* If we used up all the quota - we're probably not done yet... */
	if (done == budget)
		INC_PERF_COUNTER(priv->pstats.napi_quota);
	else {
		/* Done for now */
902
		napi_complete(napi);
903 904 905 906 907
		mlx4_en_arm_cq(priv, cq);
	}
	return done;
}

908
static const int frag_sizes[] = {
909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941
	FRAG_SZ0,
	FRAG_SZ1,
	FRAG_SZ2,
	FRAG_SZ3
};

void mlx4_en_calc_rx_buf(struct net_device *dev)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN + ETH_LLC_SNAP_SIZE;
	int buf_size = 0;
	int i = 0;

	while (buf_size < eff_mtu) {
		priv->frag_info[i].frag_size =
			(eff_mtu > buf_size + frag_sizes[i]) ?
				frag_sizes[i] : eff_mtu - buf_size;
		priv->frag_info[i].frag_prefix_size = buf_size;
		if (!i)	{
			priv->frag_info[i].frag_align = NET_IP_ALIGN;
			priv->frag_info[i].frag_stride =
				ALIGN(frag_sizes[i] + NET_IP_ALIGN, SMP_CACHE_BYTES);
		} else {
			priv->frag_info[i].frag_align = 0;
			priv->frag_info[i].frag_stride =
				ALIGN(frag_sizes[i], SMP_CACHE_BYTES);
		}
		buf_size += priv->frag_info[i].frag_size;
		i++;
	}

	priv->num_frags = i;
	priv->rx_skb_size = eff_mtu;
942
	priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc));
943

944
	en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
945 946
		  "num_frags:%d):\n", eff_mtu, priv->num_frags);
	for (i = 0; i < priv->num_frags; i++) {
947 948 949 950 951 952 953
		en_err(priv,
		       "  frag:%d - size:%d prefix:%d align:%d stride:%d\n",
		       i,
		       priv->frag_info[i].frag_size,
		       priv->frag_info[i].frag_prefix_size,
		       priv->frag_info[i].frag_align,
		       priv->frag_info[i].frag_stride);
954 955 956 957 958
	}
}

/* RSS related functions */

959 960
static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
				 struct mlx4_en_rx_ring *ring,
961 962 963 964 965 966 967
				 enum mlx4_qp_state *state,
				 struct mlx4_qp *qp)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	struct mlx4_qp_context *context;
	int err = 0;

968 969
	context = kmalloc(sizeof(*context), GFP_KERNEL);
	if (!context)
970 971 972 973
		return -ENOMEM;

	err = mlx4_qp_alloc(mdev->dev, qpn, qp);
	if (err) {
974
		en_err(priv, "Failed to allocate qp #%x\n", qpn);
975 976 977 978 979
		goto out;
	}
	qp->event = mlx4_en_sqp_event;

	memset(context, 0, sizeof *context);
980
	mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
981
				qpn, ring->cqn, -1, context);
982
	context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
983

984
	/* Cancel FCS removal if FW allows */
985
	if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) {
986
		context->param3 |= cpu_to_be32(1 << 29);
987 988 989
		ring->fcs_del = ETH_FCS_LEN;
	} else
		ring->fcs_del = 0;
990

991
	err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state);
992 993 994 995
	if (err) {
		mlx4_qp_remove(mdev->dev, qp);
		mlx4_qp_free(mdev->dev, qp);
	}
996
	mlx4_en_update_rx_prod_db(ring);
997 998 999 1000 1001
out:
	kfree(context);
	return err;
}

1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031
int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv)
{
	int err;
	u32 qpn;

	err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn);
	if (err) {
		en_err(priv, "Failed reserving drop qpn\n");
		return err;
	}
	err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp);
	if (err) {
		en_err(priv, "Failed allocating drop qp\n");
		mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
		return err;
	}

	return 0;
}

void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv)
{
	u32 qpn;

	qpn = priv->drop_qp.qpn;
	mlx4_qp_remove(priv->mdev->dev, &priv->drop_qp);
	mlx4_qp_free(priv->mdev->dev, &priv->drop_qp);
	mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
}

1032 1033 1034 1035 1036 1037
/* Allocate rx qp's and configure them according to rss map */
int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	struct mlx4_en_rss_map *rss_map = &priv->rss_map;
	struct mlx4_qp_context context;
1038
	struct mlx4_rss_context *rss_context;
1039
	int rss_rings;
1040
	void *ptr;
1041
	u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 |
1042
			MLX4_RSS_TCP_IPV6);
1043
	int i, qpn;
1044 1045
	int err = 0;
	int good_qps = 0;
Y
Yevgeny Petrilin 已提交
1046 1047 1048
	static const u32 rsskey[10] = { 0xD181C62C, 0xF7F4DB5B, 0x1983A2FC,
				0x943E1ADB, 0xD9389E6B, 0xD1039C2C, 0xA74499AD,
				0x593D56D9, 0xF3253C06, 0x2ADC1FFC};
1049

1050
	en_dbg(DRV, priv, "Configuring rss steering\n");
1051 1052 1053
	err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num,
				    priv->rx_ring_num,
				    &rss_map->base_qpn);
1054
	if (err) {
1055
		en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num);
1056 1057 1058
		return err;
	}

1059
	for (i = 0; i < priv->rx_ring_num; i++) {
1060
		qpn = rss_map->base_qpn + i;
1061
		err = mlx4_en_config_rss_qp(priv, qpn, priv->rx_ring[i],
1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072
					    &rss_map->state[i],
					    &rss_map->qps[i]);
		if (err)
			goto rss_err;

		++good_qps;
	}

	/* Configure RSS indirection qp */
	err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp);
	if (err) {
1073
		en_err(priv, "Failed to allocate RSS indirection QP\n");
1074
		goto rss_err;
1075 1076 1077
	}
	rss_map->indir_qp.event = mlx4_en_sqp_event;
	mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
1078
				priv->rx_ring[0]->cqn, -1, &context);
1079

1080 1081 1082 1083 1084
	if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num)
		rss_rings = priv->rx_ring_num;
	else
		rss_rings = priv->prof->rss_rings;

1085 1086
	ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path)
					+ MLX4_RSS_OFFSET_IN_QPC_PRI_PATH;
1087
	rss_context = ptr;
1088
	rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 |
1089
					    (rss_map->base_qpn));
1090
	rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
1091 1092 1093 1094
	if (priv->mdev->profile.udp_rss) {
		rss_mask |=  MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6;
		rss_context->base_qpn_udp = rss_context->default_qpn;
	}
1095 1096 1097 1098 1099 1100

	if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
		en_info(priv, "Setting RSS context tunnel type to RSS on inner headers\n");
		rss_mask |= MLX4_RSS_BY_INNER_HEADERS;
	}

Y
Yevgeny Petrilin 已提交
1101
	rss_context->flags = rss_mask;
1102
	rss_context->hash_fn = MLX4_RSS_HASH_TOP;
Y
Yevgeny Petrilin 已提交
1103
	for (i = 0; i < 10; i++)
1104
		rss_context->rss_key[i] = cpu_to_be32(rsskey[i]);
1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124

	err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
			       &rss_map->indir_qp, &rss_map->indir_state);
	if (err)
		goto indir_err;

	return 0;

indir_err:
	mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
		       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
	mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
	mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
rss_err:
	for (i = 0; i < good_qps; i++) {
		mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
			       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
		mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
		mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
	}
1125
	mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139
	return err;
}

void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	struct mlx4_en_rss_map *rss_map = &priv->rss_map;
	int i;

	mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
		       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
	mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
	mlx4_qp_free(mdev->dev, &rss_map->indir_qp);

1140
	for (i = 0; i < priv->rx_ring_num; i++) {
1141 1142 1143 1144 1145
		mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
			       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
		mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
		mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
	}
1146
	mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
1147
}