rx.c 20.2 KB
Newer Older
1 2 3
/****************************************************************************
 * Driver for Solarflare Solarstorm network controllers and boards
 * Copyright 2005-2006 Fen Systems Ltd.
4
 * Copyright 2005-2009 Solarflare Communications Inc.
5 6 7 8 9 10 11 12
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published
 * by the Free Software Foundation, incorporated herein by reference.
 */

#include <linux/socket.h>
#include <linux/in.h>
13
#include <linux/slab.h>
14 15 16 17 18 19 20
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <net/ip.h>
#include <net/checksum.h>
#include "net_driver.h"
#include "efx.h"
B
Ben Hutchings 已提交
21
#include "nic.h"
22
#include "selftest.h"
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
#include "workarounds.h"

/* Number of RX descriptors pushed at once. */
#define EFX_RX_BATCH  8

/* Size of buffer allocated for skb header area. */
#define EFX_SKB_HEADERS  64u

/*
 * rx_alloc_method - RX buffer allocation method
 *
 * This driver supports two methods for allocating and using RX buffers:
 * each RX buffer may be backed by an skb or by an order-n page.
 *
 * When LRO is in use then the second method has a lower overhead,
 * since we don't have to allocate then free skbs on reassembled frames.
 *
 * Values:
 *   - RX_ALLOC_METHOD_AUTO = 0
 *   - RX_ALLOC_METHOD_SKB  = 1
 *   - RX_ALLOC_METHOD_PAGE = 2
 *
 * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count
 * controlled by the parameters below.
 *
 *   - Since pushing and popping descriptors are separated by the rx_queue
 *     size, so the watermarks should be ~rxd_size.
 *   - The performance win by using page-based allocation for LRO is less
 *     than the performance hit of using page-based allocation of non-LRO,
 *     so the watermarks should reflect this.
 *
 * Per channel we maintain a single variable, updated by each channel:
 *
 *   rx_alloc_level += (lro_performed ? RX_ALLOC_FACTOR_LRO :
 *                      RX_ALLOC_FACTOR_SKB)
 * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which
 * limits the hysteresis), and update the allocation strategy:
 *
 *   rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ?
 *                      RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
 */
64
static int rx_alloc_method = RX_ALLOC_METHOD_AUTO;
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84

#define RX_ALLOC_LEVEL_LRO 0x2000
#define RX_ALLOC_LEVEL_MAX 0x3000
#define RX_ALLOC_FACTOR_LRO 1
#define RX_ALLOC_FACTOR_SKB (-2)

/* This is the percentage fill level below which new RX descriptors
 * will be added to the RX descriptor ring.
 */
static unsigned int rx_refill_threshold = 90;

/* This is the percentage fill level to which an RX queue will be refilled
 * when the "RX refill threshold" is reached.
 */
static unsigned int rx_refill_limit = 95;

/*
 * RX maximum head room required.
 *
 * This must be at least 1 to prevent overflow and at least 2 to allow
85 86
 * pipelined receives. Then a further 1 because efx_recycle_rx_buffer()
 * might insert two buffers.
87
 */
88
#define EFX_RXD_HEAD_ROOM 3
89

90 91 92 93 94
static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf)
{
	/* Offset is always within one page, so we don't need to consider
	 * the page order.
	 */
95
	return (__force unsigned long) buf->data & (PAGE_SIZE - 1);
96 97 98 99 100
}
static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
{
	return PAGE_SIZE << efx->rx_buffer_order;
}
101 102

/**
103
 * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers
104 105 106
 *
 * @rx_queue:		Efx RX queue
 *
107 108 109 110
 * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a
 * struct efx_rx_buffer for each one. Return a negative error code or 0
 * on success. May fail having only inserted fewer than EFX_RX_BATCH
 * buffers.
111
 */
112
static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
113 114 115
{
	struct efx_nic *efx = rx_queue->efx;
	struct net_device *net_dev = efx->net_dev;
116
	struct efx_rx_buffer *rx_buf;
117
	int skb_len = efx->rx_buffer_len;
118
	unsigned index, count;
119

120 121 122
	for (count = 0; count < EFX_RX_BATCH; ++count) {
		index = rx_queue->added_count & EFX_RXQ_MASK;
		rx_buf = efx_rx_buffer(rx_queue, index);
123

124 125 126 127
		rx_buf->skb = netdev_alloc_skb(net_dev, skb_len);
		if (unlikely(!rx_buf->skb))
			return -ENOMEM;
		rx_buf->page = NULL;
128

129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
		/* Adjust the SKB for padding and checksum */
		skb_reserve(rx_buf->skb, NET_IP_ALIGN);
		rx_buf->len = skb_len - NET_IP_ALIGN;
		rx_buf->data = (char *)rx_buf->skb->data;
		rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY;

		rx_buf->dma_addr = pci_map_single(efx->pci_dev,
						  rx_buf->data, rx_buf->len,
						  PCI_DMA_FROMDEVICE);
		if (unlikely(pci_dma_mapping_error(efx->pci_dev,
						   rx_buf->dma_addr))) {
			dev_kfree_skb_any(rx_buf->skb);
			rx_buf->skb = NULL;
			return -EIO;
		}
144

145 146
		++rx_queue->added_count;
		++rx_queue->alloc_skb_count;
147 148 149 150 151 152
	}

	return 0;
}

/**
153
 * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers
154 155 156
 *
 * @rx_queue:		Efx RX queue
 *
157 158 159 160
 * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA,
 * and populates struct efx_rx_buffers for each one. Return a negative error
 * code or 0 on success. If a single page can be split between two buffers,
 * then the page will either be inserted fully, or not at at all.
161
 */
162
static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
163 164
{
	struct efx_nic *efx = rx_queue->efx;
165 166 167 168 169 170 171 172 173 174 175 176 177
	struct efx_rx_buffer *rx_buf;
	struct page *page;
	char *page_addr;
	dma_addr_t dma_addr;
	unsigned index, count;

	/* We can split a page between two buffers */
	BUILD_BUG_ON(EFX_RX_BATCH & 1);

	for (count = 0; count < EFX_RX_BATCH; ++count) {
		page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
				   efx->rx_buffer_order);
		if (unlikely(page == NULL))
178
			return -ENOMEM;
179 180
		dma_addr = pci_map_page(efx->pci_dev, page, 0,
					efx_rx_buf_size(efx),
181
					PCI_DMA_FROMDEVICE);
182
		if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) {
183
			__free_pages(page, efx->rx_buffer_order);
184 185
			return -EIO;
		}
186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
		EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1));
		page_addr = page_address(page) + EFX_PAGE_IP_ALIGN;
		dma_addr += EFX_PAGE_IP_ALIGN;

	split:
		index = rx_queue->added_count & EFX_RXQ_MASK;
		rx_buf = efx_rx_buffer(rx_queue, index);
		rx_buf->dma_addr = dma_addr;
		rx_buf->skb = NULL;
		rx_buf->page = page;
		rx_buf->data = page_addr;
		rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
		++rx_queue->added_count;
		++rx_queue->alloc_page_count;

		if ((~count & 1) && (efx->rx_buffer_len < (PAGE_SIZE >> 1))) {
			/* Use the second half of the page */
			get_page(page);
			dma_addr += (PAGE_SIZE >> 1);
			page_addr += (PAGE_SIZE >> 1);
			++count;
			goto split;
208 209 210 211 212 213
		}
	}

	return 0;
}

214 215
static void efx_unmap_rx_buffer(struct efx_nic *efx,
				struct efx_rx_buffer *rx_buf)
216 217 218
{
	if (rx_buf->page) {
		EFX_BUG_ON_PARANOID(rx_buf->skb);
219 220 221 222 223 224 225

		/* Unmap the buffer if there's only one buffer per page(s),
		 * or this is the second half of a two buffer page. */
		if (efx->rx_buffer_order != 0 ||
		    (efx_rx_buf_offset(rx_buf) & (PAGE_SIZE >> 1)) != 0) {
			pci_unmap_page(efx->pci_dev,
				       rx_buf->dma_addr & ~(PAGE_SIZE - 1),
226 227
				       efx_rx_buf_size(efx),
				       PCI_DMA_FROMDEVICE);
228 229 230 231 232 233 234
		}
	} else if (likely(rx_buf->skb)) {
		pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
				 rx_buf->len, PCI_DMA_FROMDEVICE);
	}
}

235 236
static void efx_free_rx_buffer(struct efx_nic *efx,
			       struct efx_rx_buffer *rx_buf)
237 238 239 240 241 242 243 244 245 246
{
	if (rx_buf->page) {
		__free_pages(rx_buf->page, efx->rx_buffer_order);
		rx_buf->page = NULL;
	} else if (likely(rx_buf->skb)) {
		dev_kfree_skb_any(rx_buf->skb);
		rx_buf->skb = NULL;
	}
}

247 248
static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
			       struct efx_rx_buffer *rx_buf)
249 250 251 252 253
{
	efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
	efx_free_rx_buffer(rx_queue->efx, rx_buf);
}

254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
/* Attempt to resurrect the other receive buffer that used to share this page,
 * which had previously been passed up to the kernel and freed. */
static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
				    struct efx_rx_buffer *rx_buf)
{
	struct efx_rx_buffer *new_buf;
	unsigned index;

	/* We could have recycled the 1st half, then refilled
	 * the queue, and now recycle the 2nd half.
	 * EFX_RXD_HEAD_ROOM ensures that there is always room
	 * to reinsert two buffers (once). */
	get_page(rx_buf->page);

	index = rx_queue->added_count & EFX_RXQ_MASK;
	new_buf = efx_rx_buffer(rx_queue, index);
	new_buf->dma_addr = rx_buf->dma_addr - (PAGE_SIZE >> 1);
	new_buf->skb = NULL;
	new_buf->page = rx_buf->page;
	new_buf->data = rx_buf->data - (PAGE_SIZE >> 1);
	new_buf->len = rx_buf->len;
	++rx_queue->added_count;
}

/* Recycle the given rx buffer directly back into the rx_queue. There is
 * always room to add this buffer, because we've just popped a buffer. */
static void efx_recycle_rx_buffer(struct efx_channel *channel,
				  struct efx_rx_buffer *rx_buf)
{
	struct efx_nic *efx = channel->efx;
	struct efx_rx_queue *rx_queue = &efx->rx_queue[channel->channel];
	struct efx_rx_buffer *new_buf;
	unsigned index;

	if (rx_buf->page != NULL && efx->rx_buffer_len < (PAGE_SIZE >> 1)) {
		if (efx_rx_buf_offset(rx_buf) & (PAGE_SIZE >> 1)) {
			/* This is the 2nd half of a page split between two
			 * buffers, If page_count() is > 1 then the kernel
			 * is holding onto the previous buffer */
			if (page_count(rx_buf->page) != 1) {
				efx_fini_rx_buffer(rx_queue, rx_buf);
				return;
			}

			efx_resurrect_rx_buffer(rx_queue, rx_buf);
		} else {
			/* Free the 1st buffer's reference on the page. If the
			 * 2nd buffer is also discarded, this buffer will be
			 * resurrected above */
			put_page(rx_buf->page);
			rx_buf->page = NULL;
			return;
		}
	}

	index = rx_queue->added_count & EFX_RXQ_MASK;
	new_buf = efx_rx_buffer(rx_queue, index);

	memcpy(new_buf, rx_buf, sizeof(*new_buf));
	rx_buf->page = NULL;
	rx_buf->skb = NULL;
	++rx_queue->added_count;
}

318 319 320 321 322
/**
 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
 * @rx_queue:		RX descriptor queue
 * This will aim to fill the RX descriptor queue up to
 * @rx_queue->@fast_fill_limit. If there is insufficient atomic
323 324 325 326 327
 * memory to do so, a slow fill will be scheduled.
 *
 * The caller must provide serialisation (none is used here). In practise,
 * this means this function must run from the NAPI handler, or be called
 * when NAPI is disabled.
328
 */
329
void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
330
{
331 332 333
	struct efx_channel *channel = rx_queue->channel;
	unsigned fill_level;
	int space, rc = 0;
334

335
	/* Calculate current fill level, and exit if we don't need to fill */
336
	fill_level = (rx_queue->added_count - rx_queue->removed_count);
337
	EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
338
	if (fill_level >= rx_queue->fast_fill_trigger)
339
		goto out;
340 341

	/* Record minimum fill level */
342
	if (unlikely(fill_level < rx_queue->min_fill)) {
343 344
		if (fill_level)
			rx_queue->min_fill = fill_level;
345
	}
346 347 348

	space = rx_queue->fast_fill_limit - fill_level;
	if (space < EFX_RX_BATCH)
349
		goto out;
350 351 352 353

	EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from"
		  " level %d to level %d using %s allocation\n",
		  rx_queue->queue, fill_level, rx_queue->fast_fill_limit,
354
		  channel->rx_alloc_push_pages ? "page" : "skb");
355 356

	do {
357 358 359 360 361 362 363 364 365
		if (channel->rx_alloc_push_pages)
			rc = efx_init_rx_buffers_page(rx_queue);
		else
			rc = efx_init_rx_buffers_skb(rx_queue);
		if (unlikely(rc)) {
			/* Ensure that we don't leave the rx queue empty */
			if (rx_queue->added_count == rx_queue->removed_count)
				efx_schedule_slow_fill(rx_queue);
			goto out;
366 367 368 369 370 371 372 373
		}
	} while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH);

	EFX_TRACE(rx_queue->efx, "RX queue %d fast-filled descriptor ring "
		  "to level %d\n", rx_queue->queue,
		  rx_queue->added_count - rx_queue->removed_count);

 out:
374 375
	if (rx_queue->notified_count != rx_queue->added_count)
		efx_nic_notify_rx_desc(rx_queue);
376 377
}

378
void efx_rx_slow_fill(unsigned long context)
379
{
380 381
	struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
	struct efx_channel *channel = rx_queue->channel;
382

383 384
	/* Post an event to cause NAPI to run and refill the queue */
	efx_nic_generate_fill_event(channel);
385 386 387
	++rx_queue->slow_fill_count;
}

388 389 390 391
static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
				     struct efx_rx_buffer *rx_buf,
				     int len, bool *discard,
				     bool *leak_packet)
392 393 394 395 396 397 398 399 400 401
{
	struct efx_nic *efx = rx_queue->efx;
	unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;

	if (likely(len <= max_len))
		return;

	/* The packet must be discarded, but this is only a fatal error
	 * if the caller indicated it was
	 */
402
	*discard = true;
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427

	if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
		EFX_ERR_RL(efx, " RX queue %d seriously overlength "
			   "RX event (0x%x > 0x%x+0x%x). Leaking\n",
			   rx_queue->queue, len, max_len,
			   efx->type->rx_buffer_padding);
		/* If this buffer was skb-allocated, then the meta
		 * data at the end of the skb will be trashed. So
		 * we have no choice but to leak the fragment.
		 */
		*leak_packet = (rx_buf->skb != NULL);
		efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
	} else {
		EFX_ERR_RL(efx, " RX queue %d overlength RX event "
			   "(0x%x > 0x%x)\n", rx_queue->queue, len, max_len);
	}

	rx_queue->channel->n_rx_overlength++;
}

/* Pass a received packet up through the generic LRO stack
 *
 * Handles driverlink veto, and passes the fragment up via
 * the appropriate LRO method
 */
428
static void efx_rx_packet_lro(struct efx_channel *channel,
429 430
			      struct efx_rx_buffer *rx_buf,
			      bool checksummed)
431
{
H
Herbert Xu 已提交
432
	struct napi_struct *napi = &channel->napi_str;
433
	gro_result_t gro_result;
434 435 436

	/* Pass the skb/page into the LRO engine */
	if (rx_buf->page) {
437 438
		struct page *page = rx_buf->page;
		struct sk_buff *skb;
439

440 441 442 443
		EFX_BUG_ON_PARANOID(rx_buf->skb);
		rx_buf->page = NULL;

		skb = napi_get_frags(napi);
444
		if (!skb) {
445 446
			put_page(page);
			return;
447 448
		}

449
		skb_shinfo(skb)->frags[0].page = page;
450 451 452 453 454 455 456 457
		skb_shinfo(skb)->frags[0].page_offset =
			efx_rx_buf_offset(rx_buf);
		skb_shinfo(skb)->frags[0].size = rx_buf->len;
		skb_shinfo(skb)->nr_frags = 1;

		skb->len = rx_buf->len;
		skb->data_len = rx_buf->len;
		skb->truesize += rx_buf->len;
458 459
		skb->ip_summed =
			checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
460

461 462
		skb_record_rx_queue(skb, channel->channel);

463
		gro_result = napi_gro_frags(napi);
464
	} else {
465
		struct sk_buff *skb = rx_buf->skb;
466

467 468
		EFX_BUG_ON_PARANOID(!skb);
		EFX_BUG_ON_PARANOID(!checksummed);
469
		rx_buf->skb = NULL;
470 471

		gro_result = napi_gro_receive(napi, skb);
472
	}
473 474 475 476 477 478 479

	if (gro_result == GRO_NORMAL) {
		channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
	} else if (gro_result != GRO_DROP) {
		channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
		channel->irq_mod_score += 2;
	}
480 481 482
}

void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
483
		   unsigned int len, bool checksummed, bool discard)
484 485
{
	struct efx_nic *efx = rx_queue->efx;
486
	struct efx_channel *channel = rx_queue->channel;
487
	struct efx_rx_buffer *rx_buf;
488
	bool leak_packet = false;
489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513

	rx_buf = efx_rx_buffer(rx_queue, index);
	EFX_BUG_ON_PARANOID(!rx_buf->data);
	EFX_BUG_ON_PARANOID(rx_buf->skb && rx_buf->page);
	EFX_BUG_ON_PARANOID(!(rx_buf->skb || rx_buf->page));

	/* This allows the refill path to post another buffer.
	 * EFX_RXD_HEAD_ROOM ensures that the slot we are using
	 * isn't overwritten yet.
	 */
	rx_queue->removed_count++;

	/* Validate the length encoded in the event vs the descriptor pushed */
	efx_rx_packet__check_len(rx_queue, rx_buf, len,
				 &discard, &leak_packet);

	EFX_TRACE(efx, "RX queue %d received id %x at %llx+%x %s%s\n",
		  rx_queue->queue, index,
		  (unsigned long long)rx_buf->dma_addr, len,
		  (checksummed ? " [SUMMED]" : ""),
		  (discard ? " [DISCARD]" : ""));

	/* Discard packet, if instructed to do so */
	if (unlikely(discard)) {
		if (unlikely(leak_packet))
514
			channel->n_skbuff_leaks++;
515
		else
516 517 518 519 520
			efx_recycle_rx_buffer(channel, rx_buf);

		/* Don't hold off the previous receive */
		rx_buf = NULL;
		goto out;
521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536
	}

	/* Release card resources - assumes all RX buffers consumed in-order
	 * per RX queue
	 */
	efx_unmap_rx_buffer(efx, rx_buf);

	/* Prefetch nice and early so data will (hopefully) be in cache by
	 * the time we look at it.
	 */
	prefetch(rx_buf->data);

	/* Pipeline receives so that we give time for packet headers to be
	 * prefetched into cache.
	 */
	rx_buf->len = len;
537
out:
538 539 540 541 542 543 544 545 546 547
	if (rx_queue->channel->rx_pkt)
		__efx_rx_packet(rx_queue->channel,
				rx_queue->channel->rx_pkt,
				rx_queue->channel->rx_pkt_csummed);
	rx_queue->channel->rx_pkt = rx_buf;
	rx_queue->channel->rx_pkt_csummed = checksummed;
}

/* Handle a received packet.  Second half: Touches packet payload. */
void __efx_rx_packet(struct efx_channel *channel,
548
		     struct efx_rx_buffer *rx_buf, bool checksummed)
549 550 551 552
{
	struct efx_nic *efx = channel->efx;
	struct sk_buff *skb;

553 554 555 556 557 558
	/* If we're in loopback test, then pass the packet directly to the
	 * loopback layer, and free the rx_buf here
	 */
	if (unlikely(efx->loopback_selftest)) {
		efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len);
		efx_free_rx_buffer(efx, rx_buf);
559
		return;
560 561
	}

562 563 564 565 566 567 568 569 570
	if (rx_buf->skb) {
		prefetch(skb_shinfo(rx_buf->skb));

		skb_put(rx_buf->skb, rx_buf->len);

		/* Move past the ethernet header. rx_buf->data still points
		 * at the ethernet header */
		rx_buf->skb->protocol = eth_type_trans(rx_buf->skb,
						       efx->net_dev);
571 572

		skb_record_rx_queue(rx_buf->skb, channel->channel);
573 574
	}

H
Herbert Xu 已提交
575
	if (likely(checksummed || rx_buf->page)) {
576
		efx_rx_packet_lro(channel, rx_buf, checksummed);
577
		return;
578 579
	}

H
Herbert Xu 已提交
580 581 582
	/* We now own the SKB */
	skb = rx_buf->skb;
	rx_buf->skb = NULL;
583 584 585
	EFX_BUG_ON_PARANOID(!skb);

	/* Set the SKB flags */
H
Herbert Xu 已提交
586
	skb->ip_summed = CHECKSUM_NONE;
587 588 589 590 591 592 593 594 595 596 597 598 599

	/* Pass the packet up */
	netif_receive_skb(skb);

	/* Update allocation strategy method */
	channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
}

void efx_rx_strategy(struct efx_channel *channel)
{
	enum efx_rx_alloc_method method = rx_alloc_method;

	/* Only makes sense to use page based allocation if LRO is enabled */
H
Herbert Xu 已提交
600
	if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626
		method = RX_ALLOC_METHOD_SKB;
	} else if (method == RX_ALLOC_METHOD_AUTO) {
		/* Constrain the rx_alloc_level */
		if (channel->rx_alloc_level < 0)
			channel->rx_alloc_level = 0;
		else if (channel->rx_alloc_level > RX_ALLOC_LEVEL_MAX)
			channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX;

		/* Decide on the allocation method */
		method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_LRO) ?
			  RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB);
	}

	/* Push the option */
	channel->rx_alloc_push_pages = (method == RX_ALLOC_METHOD_PAGE);
}

int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
{
	struct efx_nic *efx = rx_queue->efx;
	unsigned int rxq_size;
	int rc;

	EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue);

	/* Allocate RX buffers */
627
	rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer);
628
	rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL);
629 630
	if (!rx_queue->buffer)
		return -ENOMEM;
631

632
	rc = efx_nic_probe_rx(rx_queue);
633 634 635 636
	if (rc) {
		kfree(rx_queue->buffer);
		rx_queue->buffer = NULL;
	}
637 638 639
	return rc;
}

640
void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
641 642 643 644 645 646 647 648 649 650 651 652 653
{
	unsigned int max_fill, trigger, limit;

	EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue);

	/* Initialise ptr fields */
	rx_queue->added_count = 0;
	rx_queue->notified_count = 0;
	rx_queue->removed_count = 0;
	rx_queue->min_fill = -1U;
	rx_queue->min_overfill = -1U;

	/* Initialise limit fields */
654
	max_fill = EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM;
655 656 657 658 659 660 661 662
	trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
	limit = max_fill * min(rx_refill_limit, 100U) / 100U;

	rx_queue->max_fill = max_fill;
	rx_queue->fast_fill_trigger = trigger;
	rx_queue->fast_fill_limit = limit;

	/* Set up RX descriptor ring */
663
	efx_nic_init_rx(rx_queue);
664 665 666 667 668 669 670 671 672
}

void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
{
	int i;
	struct efx_rx_buffer *rx_buf;

	EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue);

673
	del_timer_sync(&rx_queue->slow_fill);
674
	efx_nic_fini_rx(rx_queue);
675 676 677

	/* Release RX buffers NB start at index 0 not current HW ptr */
	if (rx_queue->buffer) {
678
		for (i = 0; i <= EFX_RXQ_MASK; i++) {
679 680 681 682 683 684 685 686 687 688
			rx_buf = efx_rx_buffer(rx_queue, i);
			efx_fini_rx_buffer(rx_queue, rx_buf);
		}
	}
}

void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
{
	EFX_LOG(rx_queue->efx, "destroying RX queue %d\n", rx_queue->queue);

689
	efx_nic_remove_rx(rx_queue);
690 691 692 693 694 695 696 697 698 699 700 701 702

	kfree(rx_queue->buffer);
	rx_queue->buffer = NULL;
}


module_param(rx_alloc_method, int, 0644);
MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers");

module_param(rx_refill_threshold, uint, 0444);
MODULE_PARM_DESC(rx_refill_threshold,
		 "RX descriptor ring fast/slow fill threshold (%)");