en_rx.c 31.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
/*
 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 *
 */

34
#include <net/busy_poll.h>
35
#include <linux/mlx4/cq.h>
36
#include <linux/slab.h>
37 38
#include <linux/mlx4/qp.h>
#include <linux/skbuff.h>
39
#include <linux/rculist.h>
40 41 42
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
43
#include <linux/irq.h>
44 45 46

#include "mlx4_en.h"

47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
			    struct mlx4_en_rx_alloc *page_alloc,
			    const struct mlx4_en_frag_info *frag_info,
			    gfp_t _gfp)
{
	int order;
	struct page *page;
	dma_addr_t dma;

	for (order = MLX4_EN_ALLOC_PREFER_ORDER; ;) {
		gfp_t gfp = _gfp;

		if (order)
			gfp |= __GFP_COMP | __GFP_NOWARN;
		page = alloc_pages(gfp, order);
		if (likely(page))
			break;
		if (--order < 0 ||
		    ((PAGE_SIZE << order) < frag_info->frag_size))
			return -ENOMEM;
	}
	dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE << order,
			   PCI_DMA_FROMDEVICE);
	if (dma_mapping_error(priv->ddev, dma)) {
		put_page(page);
		return -ENOMEM;
	}
74
	page_alloc->page_size = PAGE_SIZE << order;
75 76
	page_alloc->page = page;
	page_alloc->dma = dma;
77
	page_alloc->page_offset = frag_info->frag_align;
78
	/* Not doing get_page() for each frag is a big win
79
	 * on asymetric workloads. Note we can not use atomic_set().
80
	 */
81 82
	atomic_add(page_alloc->page_size / frag_info->frag_stride - 1,
		   &page->_count);
83 84 85
	return 0;
}

86 87 88
static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
			       struct mlx4_en_rx_desc *rx_desc,
			       struct mlx4_en_rx_alloc *frags,
89 90
			       struct mlx4_en_rx_alloc *ring_alloc,
			       gfp_t gfp)
91
{
92
	struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
93
	const struct mlx4_en_frag_info *frag_info;
94 95
	struct page *page;
	dma_addr_t dma;
96
	int i;
97

98 99
	for (i = 0; i < priv->num_frags; i++) {
		frag_info = &priv->frag_info[i];
100
		page_alloc[i] = ring_alloc[i];
101 102 103 104
		page_alloc[i].page_offset += frag_info->frag_stride;

		if (page_alloc[i].page_offset + frag_info->frag_stride <=
		    ring_alloc[i].page_size)
105
			continue;
106

107 108
		if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp))
			goto out;
109
	}
110

111 112
	for (i = 0; i < priv->num_frags; i++) {
		frags[i] = ring_alloc[i];
113
		dma = ring_alloc[i].dma + ring_alloc[i].page_offset;
114 115
		ring_alloc[i] = page_alloc[i];
		rx_desc->data[i].addr = cpu_to_be64(dma);
116
	}
117

118
	return 0;
119 120 121

out:
	while (i--) {
122
		if (page_alloc[i].page != ring_alloc[i].page) {
123
			dma_unmap_page(priv->ddev, page_alloc[i].dma,
124
				page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
125 126 127 128
			page = page_alloc[i].page;
			atomic_set(&page->_count, 1);
			put_page(page);
		}
129 130 131 132 133 134 135 136
	}
	return -ENOMEM;
}

static void mlx4_en_free_frag(struct mlx4_en_priv *priv,
			      struct mlx4_en_rx_alloc *frags,
			      int i)
{
137
	const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
138
	u32 next_frag_end = frags[i].page_offset + 2 * frag_info->frag_stride;
139

140 141

	if (next_frag_end > frags[i].page_size)
142 143
		dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size,
			       PCI_DMA_FROMDEVICE);
144

145 146
	if (frags[i].page)
		put_page(frags[i].page);
147 148 149 150 151 152
}

static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
				  struct mlx4_en_rx_ring *ring)
{
	int i;
153
	struct mlx4_en_rx_alloc *page_alloc;
154 155

	for (i = 0; i < priv->num_frags; i++) {
156
		const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
157

158 159
		if (mlx4_alloc_pages(priv, &ring->page_alloc[i],
				     frag_info, GFP_KERNEL))
160
			goto out;
161 162 163 164 165
	}
	return 0;

out:
	while (i--) {
166 167
		struct page *page;

168
		page_alloc = &ring->page_alloc[i];
169
		dma_unmap_page(priv->ddev, page_alloc->dma,
170
			       page_alloc->page_size, PCI_DMA_FROMDEVICE);
171 172 173
		page = page_alloc->page;
		atomic_set(&page->_count, 1);
		put_page(page);
174 175 176 177 178 179 180 181 182 183 184 185
		page_alloc->page = NULL;
	}
	return -ENOMEM;
}

static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
				      struct mlx4_en_rx_ring *ring)
{
	struct mlx4_en_rx_alloc *page_alloc;
	int i;

	for (i = 0; i < priv->num_frags; i++) {
186 187
		const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];

188
		page_alloc = &ring->page_alloc[i];
189 190
		en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
		       i, page_count(page_alloc->page));
191

192
		dma_unmap_page(priv->ddev, page_alloc->dma,
193 194 195
				page_alloc->page_size, PCI_DMA_FROMDEVICE);
		while (page_alloc->page_offset + frag_info->frag_stride <
		       page_alloc->page_size) {
196
			put_page(page_alloc->page);
197
			page_alloc->page_offset += frag_info->frag_stride;
198
		}
199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
		page_alloc->page = NULL;
	}
}

static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
				 struct mlx4_en_rx_ring *ring, int index)
{
	struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
	int possible_frags;
	int i;

	/* Set size and memtype fields */
	for (i = 0; i < priv->num_frags; i++) {
		rx_desc->data[i].byte_count =
			cpu_to_be32(priv->frag_info[i].frag_size);
		rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key);
	}

	/* If the number of used fragments does not fill up the ring stride,
	 * remaining (unused) fragments must be padded with null address/size
	 * and a special memory key */
	possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE;
	for (i = priv->num_frags; i < possible_frags; i++) {
		rx_desc->data[i].byte_count = 0;
		rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD);
		rx_desc->data[i].addr = 0;
	}
}

static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
229 230
				   struct mlx4_en_rx_ring *ring, int index,
				   gfp_t gfp)
231 232
{
	struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
233 234
	struct mlx4_en_rx_alloc *frags = ring->rx_info +
					(index << priv->log_rx_info);
235

236
	return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp);
237 238 239 240 241 242 243
}

static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
{
	*ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
}

244 245 246 247
static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
				 struct mlx4_en_rx_ring *ring,
				 int index)
{
248
	struct mlx4_en_rx_alloc *frags;
249 250
	int nr;

251
	frags = ring->rx_info + (index << priv->log_rx_info);
252
	for (nr = 0; nr < priv->num_frags; nr++) {
253
		en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
254
		mlx4_en_free_frag(priv, frags, nr);
255 256 257
	}
}

258 259 260 261 262
static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
{
	struct mlx4_en_rx_ring *ring;
	int ring_ind;
	int buf_ind;
263
	int new_size;
264 265 266

	for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
		for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
267
			ring = priv->rx_ring[ring_ind];
268 269

			if (mlx4_en_prepare_rx_desc(priv, ring,
270 271
						    ring->actual_size,
						    GFP_KERNEL)) {
272
				if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
J
Joe Perches 已提交
273
					en_err(priv, "Failed to allocate enough rx buffers\n");
274 275
					return -ENOMEM;
				} else {
276
					new_size = rounddown_pow_of_two(ring->actual_size);
J
Joe Perches 已提交
277
					en_warn(priv, "Only %d buffers allocated reducing ring size to %d\n",
278
						ring->actual_size, new_size);
279
					goto reduce_rings;
280 281 282 283 284 285
				}
			}
			ring->actual_size++;
			ring->prod++;
		}
	}
286 287 288 289
	return 0;

reduce_rings:
	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
290
		ring = priv->rx_ring[ring_ind];
291 292 293 294 295 296 297
		while (ring->actual_size > new_size) {
			ring->actual_size--;
			ring->prod--;
			mlx4_en_free_rx_desc(priv, ring, ring->actual_size);
		}
	}

298 299 300 301 302 303 304 305
	return 0;
}

static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
				struct mlx4_en_rx_ring *ring)
{
	int index;

306 307
	en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
	       ring->cons, ring->prod);
308 309

	/* Unmap and free Rx buffers */
310
	BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
311 312
	while (ring->cons != ring->prod) {
		index = ring->cons & ring->size_mask;
313
		en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
314
		mlx4_en_free_rx_desc(priv, ring, index);
315 316 317 318
		++ring->cons;
	}
}

319 320 321 322
void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
{
	int i;
	int num_of_eqs;
323
	int num_rx_rings;
324 325 326 327 328 329 330 331 332 333 334 335 336
	struct mlx4_dev *dev = mdev->dev;

	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
		if (!dev->caps.comp_pool)
			num_of_eqs = max_t(int, MIN_RX_RINGS,
					   min_t(int,
						 dev->caps.num_comp_vectors,
						 DEF_RX_RINGS));
		else
			num_of_eqs = min_t(int, MAX_MSIX_P_PORT,
					   dev->caps.comp_pool/
					   dev->caps.num_ports) - 1;

337 338 339
		num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS :
			min_t(int, num_of_eqs,
			      netif_get_num_default_rss_queues());
340
		mdev->profile.prof[i].rx_ring_num =
341
			rounddown_pow_of_two(num_rx_rings);
342 343 344
	}
}

345
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
346
			   struct mlx4_en_rx_ring **pring,
347
			   u32 size, u16 stride, int node)
348 349
{
	struct mlx4_en_dev *mdev = priv->mdev;
350
	struct mlx4_en_rx_ring *ring;
351
	int err = -ENOMEM;
352 353
	int tmp;

354
	ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
355
	if (!ring) {
356 357 358 359 360
		ring = kzalloc(sizeof(*ring), GFP_KERNEL);
		if (!ring) {
			en_err(priv, "Failed to allocate RX ring structure\n");
			return -ENOMEM;
		}
361 362
	}

363 364 365 366 367 368
	ring->prod = 0;
	ring->cons = 0;
	ring->size = size;
	ring->size_mask = size - 1;
	ring->stride = stride;
	ring->log_stride = ffs(ring->stride) - 1;
369
	ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
370 371

	tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
372
					sizeof(struct mlx4_en_rx_alloc));
373
	ring->rx_info = vmalloc_node(tmp, node);
374
	if (!ring->rx_info) {
375 376 377 378 379
		ring->rx_info = vmalloc(tmp);
		if (!ring->rx_info) {
			err = -ENOMEM;
			goto err_ring;
		}
380
	}
381

382
	en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
383 384
		 ring->rx_info, tmp);

385 386
	/* Allocate HW buffers on provided NUMA node */
	set_dev_node(&mdev->dev->pdev->dev, node);
387 388
	err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
				 ring->buf_size, 2 * PAGE_SIZE);
389
	set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node);
390
	if (err)
391
		goto err_info;
392 393 394

	err = mlx4_en_map_buffer(&ring->wqres.buf);
	if (err) {
395
		en_err(priv, "Failed to map RX buffer\n");
396 397 398 399
		goto err_hwq;
	}
	ring->buf = ring->wqres.buf.direct.buf;

400 401
	ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter;

402
	*pring = ring;
403 404 405 406
	return 0;

err_hwq:
	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
407
err_info:
408 409
	vfree(ring->rx_info);
	ring->rx_info = NULL;
410 411 412 413
err_ring:
	kfree(ring);
	*pring = NULL;

414 415 416 417 418 419 420 421 422 423 424 425 426
	return err;
}

int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
{
	struct mlx4_en_rx_ring *ring;
	int i;
	int ring_ind;
	int err;
	int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
					DS_SIZE * priv->num_frags);

	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
427
		ring = priv->rx_ring[ring_ind];
428 429 430 431

		ring->prod = 0;
		ring->cons = 0;
		ring->actual_size = 0;
432
		ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
433 434

		ring->stride = stride;
435 436 437
		if (ring->stride <= TXBB_SIZE)
			ring->buf += TXBB_SIZE;

438 439 440 441 442 443
		ring->log_stride = ffs(ring->stride) - 1;
		ring->buf_size = ring->size * ring->stride;

		memset(ring->buf, 0, ring->buf_size);
		mlx4_en_update_rx_prod_db(ring);

444
		/* Initialize all descriptors */
445 446 447 448 449 450
		for (i = 0; i < ring->size; i++)
			mlx4_en_init_rx_desc(priv, ring, i);

		/* Initialize page allocators */
		err = mlx4_en_init_allocator(priv, ring);
		if (err) {
451
			en_err(priv, "Failed initializing ring allocator\n");
452 453
			if (ring->stride <= TXBB_SIZE)
				ring->buf -= TXBB_SIZE;
454 455
			ring_ind--;
			goto err_allocator;
456 457
		}
	}
458 459
	err = mlx4_en_fill_rx_buffers(priv);
	if (err)
460 461 462
		goto err_buffers;

	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
463
		ring = priv->rx_ring[ring_ind];
464

465
		ring->size_mask = ring->actual_size - 1;
466 467 468 469 470 471 472
		mlx4_en_update_rx_prod_db(ring);
	}

	return 0;

err_buffers:
	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
473
		mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]);
474 475 476 477

	ring_ind = priv->rx_ring_num - 1;
err_allocator:
	while (ring_ind >= 0) {
478 479 480
		if (priv->rx_ring[ring_ind]->stride <= TXBB_SIZE)
			priv->rx_ring[ring_ind]->buf -= TXBB_SIZE;
		mlx4_en_destroy_allocator(priv, priv->rx_ring[ring_ind]);
481 482 483 484 485 486
		ring_ind--;
	}
	return err;
}

void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
487 488
			     struct mlx4_en_rx_ring **pring,
			     u32 size, u16 stride)
489 490
{
	struct mlx4_en_dev *mdev = priv->mdev;
491
	struct mlx4_en_rx_ring *ring = *pring;
492 493

	mlx4_en_unmap_buffer(&ring->wqres.buf);
494
	mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
495 496
	vfree(ring->rx_info);
	ring->rx_info = NULL;
497 498
	kfree(ring);
	*pring = NULL;
499
#ifdef CONFIG_RFS_ACCEL
500
	mlx4_en_cleanup_filters(priv);
501
#endif
502 503 504 505 506 507
}

void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
				struct mlx4_en_rx_ring *ring)
{
	mlx4_en_free_rx_buf(priv, ring);
508 509
	if (ring->stride <= TXBB_SIZE)
		ring->buf -= TXBB_SIZE;
510 511 512 513 514 515
	mlx4_en_destroy_allocator(priv, ring);
}


static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
				    struct mlx4_en_rx_desc *rx_desc,
516
				    struct mlx4_en_rx_alloc *frags,
517
				    struct sk_buff *skb,
518 519
				    int length)
{
520
	struct skb_frag_struct *skb_frags_rx = skb_shinfo(skb)->frags;
521 522 523 524
	struct mlx4_en_frag_info *frag_info;
	int nr;
	dma_addr_t dma;

525
	/* Collect used fragments while replacing them in the HW descriptors */
526 527 528 529
	for (nr = 0; nr < priv->num_frags; nr++) {
		frag_info = &priv->frag_info[nr];
		if (length <= frag_info->frag_prefix_size)
			break;
530 531
		if (!frags[nr].page)
			goto fail;
532 533

		dma = be64_to_cpu(rx_desc->data[nr].addr);
534 535
		dma_sync_single_for_cpu(priv->ddev, dma, frag_info->frag_size,
					DMA_FROM_DEVICE);
536

537 538 539
		/* Save page reference in skb */
		__skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page);
		skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size);
540
		skb_frags_rx[nr].page_offset = frags[nr].page_offset;
541
		skb->truesize += frag_info->frag_stride;
542
		frags[nr].page = NULL;
543 544
	}
	/* Adjust size of last fragment to match actual length */
545
	if (nr > 0)
E
Eric Dumazet 已提交
546 547
		skb_frag_size_set(&skb_frags_rx[nr - 1],
			length - priv->frag_info[nr - 1].frag_prefix_size);
548 549 550 551 552
	return nr;

fail:
	while (nr > 0) {
		nr--;
553
		__skb_frag_unref(&skb_frags_rx[nr]);
554 555 556 557 558 559 560
	}
	return 0;
}


static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
				      struct mlx4_en_rx_desc *rx_desc,
561
				      struct mlx4_en_rx_alloc *frags,
562 563 564 565 566 567 568
				      unsigned int length)
{
	struct sk_buff *skb;
	void *va;
	int used_frags;
	dma_addr_t dma;

569
	skb = netdev_alloc_skb(priv->dev, SMALL_PACKET_SIZE + NET_IP_ALIGN);
570
	if (!skb) {
571
		en_dbg(RX_ERR, priv, "Failed allocating skb\n");
572 573 574 575 576 577 578
		return NULL;
	}
	skb_reserve(skb, NET_IP_ALIGN);
	skb->len = length;

	/* Get pointer to first fragment so we could copy the headers into the
	 * (linear part of the) skb */
579
	va = page_address(frags[0].page) + frags[0].page_offset;
580 581 582

	if (length <= SMALL_PACKET_SIZE) {
		/* We are copying all relevant data to the skb - temporarily
583
		 * sync buffers for the copy */
584
		dma = be64_to_cpu(rx_desc->data[0].addr);
585
		dma_sync_single_for_cpu(priv->ddev, dma, length,
586
					DMA_FROM_DEVICE);
587 588 589
		skb_copy_to_linear_data(skb, va, length);
		skb->tail += length;
	} else {
590 591
		unsigned int pull_len;

592
		/* Move relevant fragments to skb */
593 594
		used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, frags,
							skb, length);
595 596 597 598
		if (unlikely(!used_frags)) {
			kfree_skb(skb);
			return NULL;
		}
599 600
		skb_shinfo(skb)->nr_frags = used_frags;

601
		pull_len = eth_get_headlen(va, SMALL_PACKET_SIZE);
602
		/* Copy headers into the skb linear buffer */
603 604
		memcpy(skb->data, va, pull_len);
		skb->tail += pull_len;
605 606

		/* Skip headers in first fragment */
607
		skb_shinfo(skb)->frags[0].page_offset += pull_len;
608 609

		/* Adjust size of first fragment */
610 611
		skb_frag_size_sub(&skb_shinfo(skb)->frags[0], pull_len);
		skb->data_len = length - pull_len;
612 613 614 615
	}
	return skb;
}

616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
static void validate_loopback(struct mlx4_en_priv *priv, struct sk_buff *skb)
{
	int i;
	int offset = ETH_HLEN;

	for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) {
		if (*(skb->data + offset) != (unsigned char) (i & 0xff))
			goto out_loopback;
	}
	/* Loopback found */
	priv->loopback_ok = 1;

out_loopback:
	dev_kfree_skb_any(skb);
}
631

632 633 634 635 636 637
static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
				     struct mlx4_en_rx_ring *ring)
{
	int index = ring->prod & ring->size_mask;

	while ((u32) (ring->prod - ring->cons) < ring->actual_size) {
638
		if (mlx4_en_prepare_rx_desc(priv, ring, index, GFP_ATOMIC))
639 640 641 642 643 644
			break;
		ring->prod++;
		index = ring->prod & ring->size_mask;
	}
}

645 646 647
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
648
	struct mlx4_en_dev *mdev = priv->mdev;
649
	struct mlx4_cqe *cqe;
650
	struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
651
	struct mlx4_en_rx_alloc *frags;
652 653 654 655 656 657 658
	struct mlx4_en_rx_desc *rx_desc;
	struct sk_buff *skb;
	int index;
	int nr;
	unsigned int length;
	int polled = 0;
	int ip_summed;
O
Or Gerlitz 已提交
659
	int factor = priv->cqe_factor;
660
	u64 timestamp;
661
	bool l2_tunnel;
662 663 664 665

	if (!priv->port_up)
		return 0;

666 667 668
	if (budget <= 0)
		return polled;

669 670 671 672
	/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
	 * descriptor offset can be deduced from the CQE index instead of
	 * reading 'cqe->index' */
	index = cq->mcq.cons_index & ring->size_mask;
673
	cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor;
674 675 676 677 678

	/* Process all completed CQEs */
	while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
		    cq->mcq.cons_index & cq->size)) {

679
		frags = ring->rx_info + (index << priv->log_rx_info);
680 681 682 683 684 685 686 687 688 689
		rx_desc = ring->buf + (index << ring->log_stride);

		/*
		 * make sure we read the CQE after we read the ownership bit
		 */
		rmb();

		/* Drop packet on bad receive or bad checksum */
		if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
						MLX4_CQE_OPCODE_ERROR)) {
J
Joe Perches 已提交
690 691 692
			en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n",
			       ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome,
			       ((struct mlx4_err_cqe *)cqe)->syndrome);
693 694 695
			goto next;
		}
		if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
696
			en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
697 698 699
			goto next;
		}

700 701 702 703 704 705 706 707 708 709 710 711 712
		/* Check if we need to drop the packet if SRIOV is not enabled
		 * and not performing the selftest or flb disabled
		 */
		if (priv->flags & MLX4_EN_FLAG_RX_FILTER_NEEDED) {
			struct ethhdr *ethh;
			dma_addr_t dma;
			/* Get pointer to first fragment since we haven't
			 * skb yet and cast it to ethhdr struct
			 */
			dma = be64_to_cpu(rx_desc->data[0].addr);
			dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
						DMA_FROM_DEVICE);
			ethh = (struct ethhdr *)(page_address(frags[0].page) +
713
						 frags[0].page_offset);
714

715 716 717 718 719 720 721 722 723
			if (is_multicast_ether_addr(ethh->h_dest)) {
				struct mlx4_mac_entry *entry;
				struct hlist_head *bucket;
				unsigned int mac_hash;

				/* Drop the packet, since HW loopback-ed it */
				mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX];
				bucket = &priv->mac_hash[mac_hash];
				rcu_read_lock();
724
				hlist_for_each_entry_rcu(entry, bucket, hlist) {
725 726 727 728 729 730 731 732
					if (ether_addr_equal_64bits(entry->mac,
								    ethh->h_source)) {
						rcu_read_unlock();
						goto next;
					}
				}
				rcu_read_unlock();
			}
733
		}
734

735 736 737 738
		/*
		 * Packet is OK - process it.
		 */
		length = be32_to_cpu(cqe->byte_cnt);
739
		length -= ring->fcs_del;
740 741
		ring->bytes += length;
		ring->packets++;
742 743
		l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
			(cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
744

745
		if (likely(dev->features & NETIF_F_RXCSUM)) {
746 747
			if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
			    (cqe->checksum == cpu_to_be16(0xffff))) {
748
				ring->csum_ok++;
749
				/* This packet is eligible for GRO if it is:
750 751 752
				 * - DIX Ethernet (type interpretation)
				 * - TCP/IP (v4)
				 * - without IP options
753 754 755
				 * - not an IP fragment
				 * - no LLS polling in progress
				 */
756
				if (!mlx4_en_cq_busy_polling(cq) &&
757
				    (dev->features & NETIF_F_GRO)) {
758
					struct sk_buff *gro_skb = napi_get_frags(&cq->napi);
759 760
					if (!gro_skb)
						goto next;
761

762 763 764
					nr = mlx4_en_complete_rx_desc(priv,
						rx_desc, frags, gro_skb,
						length);
765 766 767
					if (!nr)
						goto next;

768 769 770 771 772
					skb_shinfo(gro_skb)->nr_frags = nr;
					gro_skb->len = length;
					gro_skb->data_len = length;
					gro_skb->ip_summed = CHECKSUM_UNNECESSARY;

773
					if (l2_tunnel)
774
						gro_skb->csum_level = 1;
775 776 777
					if ((cqe->vlan_my_qpn &
					    cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) &&
					    (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
J
Jiri Pirko 已提交
778 779
						u16 vid = be16_to_cpu(cqe->sl_vid);

780
						__vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid);
J
Jiri Pirko 已提交
781 782
					}

Y
Yevgeny Petrilin 已提交
783
					if (dev->features & NETIF_F_RXHASH)
T
Tom Herbert 已提交
784 785 786
						skb_set_hash(gro_skb,
							     be32_to_cpu(cqe->immed_rss_invalid),
							     PKT_HASH_TYPE_L3);
Y
Yevgeny Petrilin 已提交
787

788
					skb_record_rx_queue(gro_skb, cq->ring);
J
Jason Wang 已提交
789
					skb_mark_napi_id(gro_skb, &cq->napi);
790

791 792 793 794 795 796 797 798
					if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
						timestamp = mlx4_en_get_cqe_ts(cqe);
						mlx4_en_fill_hwtstamps(mdev,
								       skb_hwtstamps(gro_skb),
								       timestamp);
					}

					napi_gro_frags(&cq->napi);
799 800 801
					goto next;
				}

802
				/* GRO not possible, complete processing here */
803 804 805
				ip_summed = CHECKSUM_UNNECESSARY;
			} else {
				ip_summed = CHECKSUM_NONE;
806
				ring->csum_none++;
807 808 809
			}
		} else {
			ip_summed = CHECKSUM_NONE;
810
			ring->csum_none++;
811 812
		}

813
		skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
814 815 816 817 818
		if (!skb) {
			priv->stats.rx_dropped++;
			goto next;
		}

819 820 821 822 823
                if (unlikely(priv->validate_loopback)) {
			validate_loopback(priv, skb);
			goto next;
		}

824 825
		skb->ip_summed = ip_summed;
		skb->protocol = eth_type_trans(skb, dev);
826
		skb_record_rx_queue(skb, cq->ring);
827

828 829
		if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY)
			skb->csum_level = 1;
830

Y
Yevgeny Petrilin 已提交
831
		if (dev->features & NETIF_F_RXHASH)
T
Tom Herbert 已提交
832 833 834
			skb_set_hash(skb,
				     be32_to_cpu(cqe->immed_rss_invalid),
				     PKT_HASH_TYPE_L3);
Y
Yevgeny Petrilin 已提交
835

836 837 838
		if ((be32_to_cpu(cqe->vlan_my_qpn) &
		    MLX4_CQE_VLAN_PRESENT_MASK) &&
		    (dev->features & NETIF_F_HW_VLAN_CTAG_RX))
839
			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid));
J
Jiri Pirko 已提交
840

841 842 843 844 845 846
		if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
			timestamp = mlx4_en_get_cqe_ts(cqe);
			mlx4_en_fill_hwtstamps(mdev, skb_hwtstamps(skb),
					       timestamp);
		}

847
		skb_mark_napi_id(skb, &cq->napi);
848

849 850 851 852
		if (!mlx4_en_cq_busy_polling(cq))
			napi_gro_receive(&cq->napi, skb);
		else
			netif_receive_skb(skb);
853 854

next:
855 856 857
		for (nr = 0; nr < priv->num_frags; nr++)
			mlx4_en_free_frag(priv, frags, nr);

858 859
		++cq->mcq.cons_index;
		index = (cq->mcq.cons_index) & ring->size_mask;
860
		cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor;
861
		if (++polled == budget)
862 863 864 865 866 867 868 869
			goto out;
	}

out:
	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
	mlx4_cq_set_ci(&cq->mcq);
	wmb(); /* ensure HW sees CQ consumer before we post new buffers */
	ring->cons = cq->mcq.cons_index;
870
	mlx4_en_refill_rx_buffers(priv, ring);
871 872 873 874 875 876 877 878 879 880 881
	mlx4_en_update_rx_prod_db(ring);
	return polled;
}


void mlx4_en_rx_irq(struct mlx4_cq *mcq)
{
	struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
	struct mlx4_en_priv *priv = netdev_priv(cq->dev);

	if (priv->port_up)
882
		napi_schedule(&cq->napi);
883 884 885 886 887 888 889 890 891 892 893 894
	else
		mlx4_en_arm_cq(priv, cq);
}

/* Rx CQ polling - called by NAPI */
int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
{
	struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
	struct net_device *dev = cq->dev;
	struct mlx4_en_priv *priv = netdev_priv(dev);
	int done;

895 896 897
	if (!mlx4_en_cq_lock_napi(cq))
		return budget;

898 899
	done = mlx4_en_process_rx_cq(dev, cq, budget);

900 901
	mlx4_en_cq_unlock_napi(cq);

902
	/* If we used up all the quota - we're probably not done yet... */
903
	if (done == budget) {
904 905 906
		int cpu_curr;
		const struct cpumask *aff;

907
		INC_PERF_COUNTER(priv->pstats.napi_quota);
908 909 910 911 912 913 914 915 916

		cpu_curr = smp_processor_id();
		aff = irq_desc_get_irq_data(cq->irq_desc)->affinity;

		if (unlikely(!cpumask_test_cpu(cpu_curr, aff))) {
			/* Current cpu is not according to smp_irq_affinity -
			 * probably affinity changed. need to stop this NAPI
			 * poll, and restart it on the right CPU
			 */
917 918 919 920 921
			napi_complete(napi);
			mlx4_en_arm_cq(priv, cq);
			return 0;
		}
	} else {
922
		/* Done for now */
923
		napi_complete(napi);
924 925 926 927 928
		mlx4_en_arm_cq(priv, cq);
	}
	return done;
}

929
static const int frag_sizes[] = {
930 931 932 933 934 935 936 937 938
	FRAG_SZ0,
	FRAG_SZ1,
	FRAG_SZ2,
	FRAG_SZ3
};

void mlx4_en_calc_rx_buf(struct net_device *dev)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
939
	int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN;
940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962
	int buf_size = 0;
	int i = 0;

	while (buf_size < eff_mtu) {
		priv->frag_info[i].frag_size =
			(eff_mtu > buf_size + frag_sizes[i]) ?
				frag_sizes[i] : eff_mtu - buf_size;
		priv->frag_info[i].frag_prefix_size = buf_size;
		if (!i)	{
			priv->frag_info[i].frag_align = NET_IP_ALIGN;
			priv->frag_info[i].frag_stride =
				ALIGN(frag_sizes[i] + NET_IP_ALIGN, SMP_CACHE_BYTES);
		} else {
			priv->frag_info[i].frag_align = 0;
			priv->frag_info[i].frag_stride =
				ALIGN(frag_sizes[i], SMP_CACHE_BYTES);
		}
		buf_size += priv->frag_info[i].frag_size;
		i++;
	}

	priv->num_frags = i;
	priv->rx_skb_size = eff_mtu;
963
	priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc));
964

J
Joe Perches 已提交
965 966
	en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n",
	       eff_mtu, priv->num_frags);
967
	for (i = 0; i < priv->num_frags; i++) {
968 969 970 971 972 973 974
		en_err(priv,
		       "  frag:%d - size:%d prefix:%d align:%d stride:%d\n",
		       i,
		       priv->frag_info[i].frag_size,
		       priv->frag_info[i].frag_prefix_size,
		       priv->frag_info[i].frag_align,
		       priv->frag_info[i].frag_stride);
975 976 977 978 979
	}
}

/* RSS related functions */

980 981
static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
				 struct mlx4_en_rx_ring *ring,
982 983 984 985 986 987 988
				 enum mlx4_qp_state *state,
				 struct mlx4_qp *qp)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	struct mlx4_qp_context *context;
	int err = 0;

989 990
	context = kmalloc(sizeof(*context), GFP_KERNEL);
	if (!context)
991 992
		return -ENOMEM;

993
	err = mlx4_qp_alloc(mdev->dev, qpn, qp, GFP_KERNEL);
994
	if (err) {
995
		en_err(priv, "Failed to allocate qp #%x\n", qpn);
996 997 998 999 1000
		goto out;
	}
	qp->event = mlx4_en_sqp_event;

	memset(context, 0, sizeof *context);
1001
	mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
1002
				qpn, ring->cqn, -1, context);
1003
	context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
1004

1005
	/* Cancel FCS removal if FW allows */
1006
	if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) {
1007
		context->param3 |= cpu_to_be32(1 << 29);
1008 1009 1010
		ring->fcs_del = ETH_FCS_LEN;
	} else
		ring->fcs_del = 0;
1011

1012
	err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state);
1013 1014 1015 1016
	if (err) {
		mlx4_qp_remove(mdev->dev, qp);
		mlx4_qp_free(mdev->dev, qp);
	}
1017
	mlx4_en_update_rx_prod_db(ring);
1018 1019 1020 1021 1022
out:
	kfree(context);
	return err;
}

1023 1024 1025 1026 1027 1028 1029 1030 1031 1032
int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv)
{
	int err;
	u32 qpn;

	err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn);
	if (err) {
		en_err(priv, "Failed reserving drop qpn\n");
		return err;
	}
1033
	err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp, GFP_KERNEL);
1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052
	if (err) {
		en_err(priv, "Failed allocating drop qp\n");
		mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
		return err;
	}

	return 0;
}

void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv)
{
	u32 qpn;

	qpn = priv->drop_qp.qpn;
	mlx4_qp_remove(priv->mdev->dev, &priv->drop_qp);
	mlx4_qp_free(priv->mdev->dev, &priv->drop_qp);
	mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
}

1053 1054 1055 1056 1057 1058
/* Allocate rx qp's and configure them according to rss map */
int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	struct mlx4_en_rss_map *rss_map = &priv->rss_map;
	struct mlx4_qp_context context;
1059
	struct mlx4_rss_context *rss_context;
1060
	int rss_rings;
1061
	void *ptr;
1062
	u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 |
1063
			MLX4_RSS_TCP_IPV6);
1064
	int i, qpn;
1065 1066
	int err = 0;
	int good_qps = 0;
Y
Yevgeny Petrilin 已提交
1067 1068 1069
	static const u32 rsskey[10] = { 0xD181C62C, 0xF7F4DB5B, 0x1983A2FC,
				0x943E1ADB, 0xD9389E6B, 0xD1039C2C, 0xA74499AD,
				0x593D56D9, 0xF3253C06, 0x2ADC1FFC};
1070

1071
	en_dbg(DRV, priv, "Configuring rss steering\n");
1072 1073 1074
	err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num,
				    priv->rx_ring_num,
				    &rss_map->base_qpn);
1075
	if (err) {
1076
		en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num);
1077 1078 1079
		return err;
	}

1080
	for (i = 0; i < priv->rx_ring_num; i++) {
1081
		qpn = rss_map->base_qpn + i;
1082
		err = mlx4_en_config_rss_qp(priv, qpn, priv->rx_ring[i],
1083 1084 1085 1086 1087 1088 1089 1090 1091
					    &rss_map->state[i],
					    &rss_map->qps[i]);
		if (err)
			goto rss_err;

		++good_qps;
	}

	/* Configure RSS indirection qp */
1092
	err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp, GFP_KERNEL);
1093
	if (err) {
1094
		en_err(priv, "Failed to allocate RSS indirection QP\n");
1095
		goto rss_err;
1096 1097 1098
	}
	rss_map->indir_qp.event = mlx4_en_sqp_event;
	mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
1099
				priv->rx_ring[0]->cqn, -1, &context);
1100

1101 1102 1103 1104 1105
	if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num)
		rss_rings = priv->rx_ring_num;
	else
		rss_rings = priv->prof->rss_rings;

1106 1107
	ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path)
					+ MLX4_RSS_OFFSET_IN_QPC_PRI_PATH;
1108
	rss_context = ptr;
1109
	rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 |
1110
					    (rss_map->base_qpn));
1111
	rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
1112 1113 1114 1115
	if (priv->mdev->profile.udp_rss) {
		rss_mask |=  MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6;
		rss_context->base_qpn_udp = rss_context->default_qpn;
	}
1116 1117 1118 1119 1120 1121

	if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
		en_info(priv, "Setting RSS context tunnel type to RSS on inner headers\n");
		rss_mask |= MLX4_RSS_BY_INNER_HEADERS;
	}

Y
Yevgeny Petrilin 已提交
1122
	rss_context->flags = rss_mask;
1123
	rss_context->hash_fn = MLX4_RSS_HASH_TOP;
Y
Yevgeny Petrilin 已提交
1124
	for (i = 0; i < 10; i++)
1125
		rss_context->rss_key[i] = cpu_to_be32(rsskey[i]);
1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145

	err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
			       &rss_map->indir_qp, &rss_map->indir_state);
	if (err)
		goto indir_err;

	return 0;

indir_err:
	mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
		       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
	mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
	mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
rss_err:
	for (i = 0; i < good_qps; i++) {
		mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
			       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
		mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
		mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
	}
1146
	mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
	return err;
}

void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	struct mlx4_en_rss_map *rss_map = &priv->rss_map;
	int i;

	mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
		       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
	mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
	mlx4_qp_free(mdev->dev, &rss_map->indir_qp);

1161
	for (i = 0; i < priv->rx_ring_num; i++) {
1162 1163 1164 1165 1166
		mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
			       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
		mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
		mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
	}
1167
	mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
1168
}