rx.c 20.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/****************************************************************************
 * Driver for Solarflare Solarstorm network controllers and boards
 * Copyright 2005-2006 Fen Systems Ltd.
 * Copyright 2005-2008 Solarflare Communications Inc.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published
 * by the Free Software Foundation, incorporated herein by reference.
 */

#include <linux/socket.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <net/ip.h>
#include <net/checksum.h>
#include "net_driver.h"
#include "rx.h"
#include "efx.h"
#include "falcon.h"
22
#include "selftest.h"
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
#include "workarounds.h"

/* Number of RX descriptors pushed at once. */
#define EFX_RX_BATCH  8

/* Size of buffer allocated for skb header area. */
#define EFX_SKB_HEADERS  64u

/*
 * rx_alloc_method - RX buffer allocation method
 *
 * This driver supports two methods for allocating and using RX buffers:
 * each RX buffer may be backed by an skb or by an order-n page.
 *
 * When LRO is in use then the second method has a lower overhead,
 * since we don't have to allocate then free skbs on reassembled frames.
 *
 * Values:
 *   - RX_ALLOC_METHOD_AUTO = 0
 *   - RX_ALLOC_METHOD_SKB  = 1
 *   - RX_ALLOC_METHOD_PAGE = 2
 *
 * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count
 * controlled by the parameters below.
 *
 *   - Since pushing and popping descriptors are separated by the rx_queue
 *     size, so the watermarks should be ~rxd_size.
 *   - The performance win by using page-based allocation for LRO is less
 *     than the performance hit of using page-based allocation of non-LRO,
 *     so the watermarks should reflect this.
 *
 * Per channel we maintain a single variable, updated by each channel:
 *
 *   rx_alloc_level += (lro_performed ? RX_ALLOC_FACTOR_LRO :
 *                      RX_ALLOC_FACTOR_SKB)
 * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which
 * limits the hysteresis), and update the allocation strategy:
 *
 *   rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ?
 *                      RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
 */
64
static int rx_alloc_method = RX_ALLOC_METHOD_AUTO;
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88

#define RX_ALLOC_LEVEL_LRO 0x2000
#define RX_ALLOC_LEVEL_MAX 0x3000
#define RX_ALLOC_FACTOR_LRO 1
#define RX_ALLOC_FACTOR_SKB (-2)

/* This is the percentage fill level below which new RX descriptors
 * will be added to the RX descriptor ring.
 */
static unsigned int rx_refill_threshold = 90;

/* This is the percentage fill level to which an RX queue will be refilled
 * when the "RX refill threshold" is reached.
 */
static unsigned int rx_refill_limit = 95;

/*
 * RX maximum head room required.
 *
 * This must be at least 1 to prevent overflow and at least 2 to allow
 * pipelined receives.
 */
#define EFX_RXD_HEAD_ROOM 2

89 90 91 92 93
static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf)
{
	/* Offset is always within one page, so we don't need to consider
	 * the page order.
	 */
94
	return (__force unsigned long) buf->data & (PAGE_SIZE - 1);
95 96 97 98 99
}
static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
{
	return PAGE_SIZE << efx->rx_buffer_order;
}
100 101 102 103 104 105 106 107 108 109 110 111


/**
 * efx_init_rx_buffer_skb - create new RX buffer using skb-based allocation
 *
 * @rx_queue:		Efx RX queue
 * @rx_buf:		RX buffer structure to populate
 *
 * This allocates memory for a new receive buffer, maps it for DMA,
 * and populates a struct efx_rx_buffer with the relevant
 * information.  Return a negative error code or 0 on success.
 */
112 113
static int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
				  struct efx_rx_buffer *rx_buf)
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
{
	struct efx_nic *efx = rx_queue->efx;
	struct net_device *net_dev = efx->net_dev;
	int skb_len = efx->rx_buffer_len;

	rx_buf->skb = netdev_alloc_skb(net_dev, skb_len);
	if (unlikely(!rx_buf->skb))
		return -ENOMEM;

	/* Adjust the SKB for padding and checksum */
	skb_reserve(rx_buf->skb, NET_IP_ALIGN);
	rx_buf->len = skb_len - NET_IP_ALIGN;
	rx_buf->data = (char *)rx_buf->skb->data;
	rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY;

	rx_buf->dma_addr = pci_map_single(efx->pci_dev,
					  rx_buf->data, rx_buf->len,
					  PCI_DMA_FROMDEVICE);

133
	if (unlikely(pci_dma_mapping_error(efx->pci_dev, rx_buf->dma_addr))) {
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
		dev_kfree_skb_any(rx_buf->skb);
		rx_buf->skb = NULL;
		return -EIO;
	}

	return 0;
}

/**
 * efx_init_rx_buffer_page - create new RX buffer using page-based allocation
 *
 * @rx_queue:		Efx RX queue
 * @rx_buf:		RX buffer structure to populate
 *
 * This allocates memory for a new receive buffer, maps it for DMA,
 * and populates a struct efx_rx_buffer with the relevant
 * information.  Return a negative error code or 0 on success.
 */
152 153
static int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
				   struct efx_rx_buffer *rx_buf)
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
{
	struct efx_nic *efx = rx_queue->efx;
	int bytes, space, offset;

	bytes = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;

	/* If there is space left in the previously allocated page,
	 * then use it. Otherwise allocate a new one */
	rx_buf->page = rx_queue->buf_page;
	if (rx_buf->page == NULL) {
		dma_addr_t dma_addr;

		rx_buf->page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
					   efx->rx_buffer_order);
		if (unlikely(rx_buf->page == NULL))
			return -ENOMEM;

		dma_addr = pci_map_page(efx->pci_dev, rx_buf->page,
172
					0, efx_rx_buf_size(efx),
173 174
					PCI_DMA_FROMDEVICE);

175
		if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) {
176 177 178 179 180 181 182
			__free_pages(rx_buf->page, efx->rx_buffer_order);
			rx_buf->page = NULL;
			return -EIO;
		}

		rx_queue->buf_page = rx_buf->page;
		rx_queue->buf_dma_addr = dma_addr;
183
		rx_queue->buf_data = (page_address(rx_buf->page) +
184 185 186 187 188
				      EFX_PAGE_IP_ALIGN);
	}

	rx_buf->len = bytes;
	rx_buf->data = rx_queue->buf_data;
189 190
	offset = efx_rx_buf_offset(rx_buf);
	rx_buf->dma_addr = rx_queue->buf_dma_addr + offset;
191 192 193 194 195 196 197

	/* Try to pack multiple buffers per page */
	if (efx->rx_buffer_order == 0) {
		/* The next buffer starts on the next 512 byte boundary */
		rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff);
		offset += ((bytes + 0x1ff) & ~0x1ff);

198
		space = efx_rx_buf_size(efx) - offset;
199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
		if (space >= bytes) {
			/* Refs dropped on kernel releasing each skb */
			get_page(rx_queue->buf_page);
			goto out;
		}
	}

	/* This is the final RX buffer for this page, so mark it for
	 * unmapping */
	rx_queue->buf_page = NULL;
	rx_buf->unmap_addr = rx_queue->buf_dma_addr;

 out:
	return 0;
}

/* This allocates memory for a new receive buffer, maps it for DMA,
 * and populates a struct efx_rx_buffer with the relevant
 * information.
 */
219 220
static int efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
			      struct efx_rx_buffer *new_rx_buf)
221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
{
	int rc = 0;

	if (rx_queue->channel->rx_alloc_push_pages) {
		new_rx_buf->skb = NULL;
		rc = efx_init_rx_buffer_page(rx_queue, new_rx_buf);
		rx_queue->alloc_page_count++;
	} else {
		new_rx_buf->page = NULL;
		rc = efx_init_rx_buffer_skb(rx_queue, new_rx_buf);
		rx_queue->alloc_skb_count++;
	}

	if (unlikely(rc < 0))
		EFX_LOG_RL(rx_queue->efx, "%s RXQ[%d] =%d\n", __func__,
			   rx_queue->queue, rc);
	return rc;
}

240 241
static void efx_unmap_rx_buffer(struct efx_nic *efx,
				struct efx_rx_buffer *rx_buf)
242 243 244 245 246
{
	if (rx_buf->page) {
		EFX_BUG_ON_PARANOID(rx_buf->skb);
		if (rx_buf->unmap_addr) {
			pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr,
247 248
				       efx_rx_buf_size(efx),
				       PCI_DMA_FROMDEVICE);
249 250 251 252 253 254 255 256
			rx_buf->unmap_addr = 0;
		}
	} else if (likely(rx_buf->skb)) {
		pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
				 rx_buf->len, PCI_DMA_FROMDEVICE);
	}
}

257 258
static void efx_free_rx_buffer(struct efx_nic *efx,
			       struct efx_rx_buffer *rx_buf)
259 260 261 262 263 264 265 266 267 268
{
	if (rx_buf->page) {
		__free_pages(rx_buf->page, efx->rx_buffer_order);
		rx_buf->page = NULL;
	} else if (likely(rx_buf->skb)) {
		dev_kfree_skb_any(rx_buf->skb);
		rx_buf->skb = NULL;
	}
}

269 270
static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
			       struct efx_rx_buffer *rx_buf)
271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
{
	efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
	efx_free_rx_buffer(rx_queue->efx, rx_buf);
}

/**
 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
 * @rx_queue:		RX descriptor queue
 * @retry:              Recheck the fill level
 * This will aim to fill the RX descriptor queue up to
 * @rx_queue->@fast_fill_limit. If there is insufficient atomic
 * memory to do so, the caller should retry.
 */
static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
					  int retry)
{
	struct efx_rx_buffer *rx_buf;
	unsigned fill_level, index;
	int i, space, rc = 0;

	/* Calculate current fill level.  Do this outside the lock,
	 * because most of the time we'll end up not wanting to do the
	 * fill anyway.
	 */
	fill_level = (rx_queue->added_count - rx_queue->removed_count);
296
	EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
297 298 299 300 301 302

	/* Don't fill if we don't need to */
	if (fill_level >= rx_queue->fast_fill_trigger)
		return 0;

	/* Record minimum fill level */
303
	if (unlikely(fill_level < rx_queue->min_fill)) {
304 305
		if (fill_level)
			rx_queue->min_fill = fill_level;
306
	}
307 308 309 310 311 312 313 314 315 316 317

	/* Acquire RX add lock.  If this lock is contended, then a fast
	 * fill must already be in progress (e.g. in the refill
	 * tasklet), so we don't need to do anything
	 */
	if (!spin_trylock_bh(&rx_queue->add_lock))
		return -1;

 retry:
	/* Recalculate current fill level now that we have the lock */
	fill_level = (rx_queue->added_count - rx_queue->removed_count);
318
	EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
319 320 321 322 323 324 325 326 327 328 329
	space = rx_queue->fast_fill_limit - fill_level;
	if (space < EFX_RX_BATCH)
		goto out_unlock;

	EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from"
		  " level %d to level %d using %s allocation\n",
		  rx_queue->queue, fill_level, rx_queue->fast_fill_limit,
		  rx_queue->channel->rx_alloc_push_pages ? "page" : "skb");

	do {
		for (i = 0; i < EFX_RX_BATCH; ++i) {
330
			index = rx_queue->added_count & EFX_RXQ_MASK;
331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
			rx_buf = efx_rx_buffer(rx_queue, index);
			rc = efx_init_rx_buffer(rx_queue, rx_buf);
			if (unlikely(rc))
				goto out;
			++rx_queue->added_count;
		}
	} while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH);

	EFX_TRACE(rx_queue->efx, "RX queue %d fast-filled descriptor ring "
		  "to level %d\n", rx_queue->queue,
		  rx_queue->added_count - rx_queue->removed_count);

 out:
	/* Send write pointer to card. */
	falcon_notify_rx_desc(rx_queue);

	/* If the fast fill is running inside from the refill tasklet, then
	 * for SMP systems it may be running on a different CPU to
	 * RX event processing, which means that the fill level may now be
	 * out of date. */
	if (unlikely(retry && (rc == 0)))
		goto retry;

 out_unlock:
	spin_unlock_bh(&rx_queue->add_lock);

	return rc;
}

/**
 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
 * @rx_queue:		RX descriptor queue
 *
 * This will aim to fill the RX descriptor queue up to
 * @rx_queue->@fast_fill_limit.  If there is insufficient memory to do so,
 * it will schedule a work item to immediately continue the fast fill
 */
void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
{
	int rc;

	rc = __efx_fast_push_rx_descriptors(rx_queue, 0);
	if (unlikely(rc)) {
		/* Schedule the work item to run immediately. The hope is
		 * that work is immediately pending to free some memory
		 * (e.g. an RX event or TX completion)
		 */
		efx_schedule_slow_fill(rx_queue, 0);
	}
}

void efx_rx_work(struct work_struct *data)
{
	struct efx_rx_queue *rx_queue;
	int rc;

	rx_queue = container_of(data, struct efx_rx_queue, work.work);

	if (unlikely(!rx_queue->channel->enabled))
		return;

	EFX_TRACE(rx_queue->efx, "RX queue %d worker thread executing on CPU "
		  "%d\n", rx_queue->queue, raw_smp_processor_id());

	++rx_queue->slow_fill_count;
	/* Push new RX descriptors, allowing at least 1 jiffy for
	 * the kernel to free some more memory. */
	rc = __efx_fast_push_rx_descriptors(rx_queue, 1);
	if (rc)
		efx_schedule_slow_fill(rx_queue, 1);
}

403 404 405 406
static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
				     struct efx_rx_buffer *rx_buf,
				     int len, bool *discard,
				     bool *leak_packet)
407 408 409 410 411 412 413 414 415 416
{
	struct efx_nic *efx = rx_queue->efx;
	unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;

	if (likely(len <= max_len))
		return;

	/* The packet must be discarded, but this is only a fatal error
	 * if the caller indicated it was
	 */
417
	*discard = true;
418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442

	if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
		EFX_ERR_RL(efx, " RX queue %d seriously overlength "
			   "RX event (0x%x > 0x%x+0x%x). Leaking\n",
			   rx_queue->queue, len, max_len,
			   efx->type->rx_buffer_padding);
		/* If this buffer was skb-allocated, then the meta
		 * data at the end of the skb will be trashed. So
		 * we have no choice but to leak the fragment.
		 */
		*leak_packet = (rx_buf->skb != NULL);
		efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
	} else {
		EFX_ERR_RL(efx, " RX queue %d overlength RX event "
			   "(0x%x > 0x%x)\n", rx_queue->queue, len, max_len);
	}

	rx_queue->channel->n_rx_overlength++;
}

/* Pass a received packet up through the generic LRO stack
 *
 * Handles driverlink veto, and passes the fragment up via
 * the appropriate LRO method
 */
443
static void efx_rx_packet_lro(struct efx_channel *channel,
444 445
			      struct efx_rx_buffer *rx_buf,
			      bool checksummed)
446
{
H
Herbert Xu 已提交
447
	struct napi_struct *napi = &channel->napi_str;
448
	gro_result_t gro_result;
449 450 451

	/* Pass the skb/page into the LRO engine */
	if (rx_buf->page) {
452 453
		struct page *page = rx_buf->page;
		struct sk_buff *skb;
454

455 456 457 458
		EFX_BUG_ON_PARANOID(rx_buf->skb);
		rx_buf->page = NULL;

		skb = napi_get_frags(napi);
459
		if (!skb) {
460 461
			put_page(page);
			return;
462 463
		}

464
		skb_shinfo(skb)->frags[0].page = page;
465 466 467 468 469 470 471 472
		skb_shinfo(skb)->frags[0].page_offset =
			efx_rx_buf_offset(rx_buf);
		skb_shinfo(skb)->frags[0].size = rx_buf->len;
		skb_shinfo(skb)->nr_frags = 1;

		skb->len = rx_buf->len;
		skb->data_len = rx_buf->len;
		skb->truesize += rx_buf->len;
473 474
		skb->ip_summed =
			checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
475

476 477
		skb_record_rx_queue(skb, channel->channel);

478
		gro_result = napi_gro_frags(napi);
479
	} else {
480
		struct sk_buff *skb = rx_buf->skb;
481

482 483
		EFX_BUG_ON_PARANOID(!skb);
		EFX_BUG_ON_PARANOID(!checksummed);
484
		rx_buf->skb = NULL;
485 486

		gro_result = napi_gro_receive(napi, skb);
487
	}
488 489 490 491 492 493 494

	if (gro_result == GRO_NORMAL) {
		channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
	} else if (gro_result != GRO_DROP) {
		channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
		channel->irq_mod_score += 2;
	}
495 496 497
}

void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
498
		   unsigned int len, bool checksummed, bool discard)
499 500 501
{
	struct efx_nic *efx = rx_queue->efx;
	struct efx_rx_buffer *rx_buf;
502
	bool leak_packet = false;
503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559

	rx_buf = efx_rx_buffer(rx_queue, index);
	EFX_BUG_ON_PARANOID(!rx_buf->data);
	EFX_BUG_ON_PARANOID(rx_buf->skb && rx_buf->page);
	EFX_BUG_ON_PARANOID(!(rx_buf->skb || rx_buf->page));

	/* This allows the refill path to post another buffer.
	 * EFX_RXD_HEAD_ROOM ensures that the slot we are using
	 * isn't overwritten yet.
	 */
	rx_queue->removed_count++;

	/* Validate the length encoded in the event vs the descriptor pushed */
	efx_rx_packet__check_len(rx_queue, rx_buf, len,
				 &discard, &leak_packet);

	EFX_TRACE(efx, "RX queue %d received id %x at %llx+%x %s%s\n",
		  rx_queue->queue, index,
		  (unsigned long long)rx_buf->dma_addr, len,
		  (checksummed ? " [SUMMED]" : ""),
		  (discard ? " [DISCARD]" : ""));

	/* Discard packet, if instructed to do so */
	if (unlikely(discard)) {
		if (unlikely(leak_packet))
			rx_queue->channel->n_skbuff_leaks++;
		else
			/* We haven't called efx_unmap_rx_buffer yet,
			 * so fini the entire rx_buffer here */
			efx_fini_rx_buffer(rx_queue, rx_buf);
		return;
	}

	/* Release card resources - assumes all RX buffers consumed in-order
	 * per RX queue
	 */
	efx_unmap_rx_buffer(efx, rx_buf);

	/* Prefetch nice and early so data will (hopefully) be in cache by
	 * the time we look at it.
	 */
	prefetch(rx_buf->data);

	/* Pipeline receives so that we give time for packet headers to be
	 * prefetched into cache.
	 */
	rx_buf->len = len;
	if (rx_queue->channel->rx_pkt)
		__efx_rx_packet(rx_queue->channel,
				rx_queue->channel->rx_pkt,
				rx_queue->channel->rx_pkt_csummed);
	rx_queue->channel->rx_pkt = rx_buf;
	rx_queue->channel->rx_pkt_csummed = checksummed;
}

/* Handle a received packet.  Second half: Touches packet payload. */
void __efx_rx_packet(struct efx_channel *channel,
560
		     struct efx_rx_buffer *rx_buf, bool checksummed)
561 562 563 564
{
	struct efx_nic *efx = channel->efx;
	struct sk_buff *skb;

565 566 567 568 569 570
	/* If we're in loopback test, then pass the packet directly to the
	 * loopback layer, and free the rx_buf here
	 */
	if (unlikely(efx->loopback_selftest)) {
		efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len);
		efx_free_rx_buffer(efx, rx_buf);
571
		return;
572 573
	}

574 575 576 577 578 579 580 581 582
	if (rx_buf->skb) {
		prefetch(skb_shinfo(rx_buf->skb));

		skb_put(rx_buf->skb, rx_buf->len);

		/* Move past the ethernet header. rx_buf->data still points
		 * at the ethernet header */
		rx_buf->skb->protocol = eth_type_trans(rx_buf->skb,
						       efx->net_dev);
583 584

		skb_record_rx_queue(rx_buf->skb, channel->channel);
585 586
	}

H
Herbert Xu 已提交
587
	if (likely(checksummed || rx_buf->page)) {
588
		efx_rx_packet_lro(channel, rx_buf, checksummed);
589
		return;
590 591
	}

H
Herbert Xu 已提交
592 593 594
	/* We now own the SKB */
	skb = rx_buf->skb;
	rx_buf->skb = NULL;
595 596 597
	EFX_BUG_ON_PARANOID(!skb);

	/* Set the SKB flags */
H
Herbert Xu 已提交
598
	skb->ip_summed = CHECKSUM_NONE;
599 600 601 602 603 604 605 606 607 608 609 610 611

	/* Pass the packet up */
	netif_receive_skb(skb);

	/* Update allocation strategy method */
	channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
}

void efx_rx_strategy(struct efx_channel *channel)
{
	enum efx_rx_alloc_method method = rx_alloc_method;

	/* Only makes sense to use page based allocation if LRO is enabled */
H
Herbert Xu 已提交
612
	if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
		method = RX_ALLOC_METHOD_SKB;
	} else if (method == RX_ALLOC_METHOD_AUTO) {
		/* Constrain the rx_alloc_level */
		if (channel->rx_alloc_level < 0)
			channel->rx_alloc_level = 0;
		else if (channel->rx_alloc_level > RX_ALLOC_LEVEL_MAX)
			channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX;

		/* Decide on the allocation method */
		method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_LRO) ?
			  RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB);
	}

	/* Push the option */
	channel->rx_alloc_push_pages = (method == RX_ALLOC_METHOD_PAGE);
}

int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
{
	struct efx_nic *efx = rx_queue->efx;
	unsigned int rxq_size;
	int rc;

	EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue);

	/* Allocate RX buffers */
639
	rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer);
640
	rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL);
641 642
	if (!rx_queue->buffer)
		return -ENOMEM;
643 644

	rc = falcon_probe_rx(rx_queue);
645 646 647 648
	if (rc) {
		kfree(rx_queue->buffer);
		rx_queue->buffer = NULL;
	}
649 650 651
	return rc;
}

652
void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
653 654 655 656 657 658 659 660 661 662 663 664 665
{
	unsigned int max_fill, trigger, limit;

	EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue);

	/* Initialise ptr fields */
	rx_queue->added_count = 0;
	rx_queue->notified_count = 0;
	rx_queue->removed_count = 0;
	rx_queue->min_fill = -1U;
	rx_queue->min_overfill = -1U;

	/* Initialise limit fields */
666
	max_fill = EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM;
667 668 669 670 671 672 673 674
	trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
	limit = max_fill * min(rx_refill_limit, 100U) / 100U;

	rx_queue->max_fill = max_fill;
	rx_queue->fast_fill_trigger = trigger;
	rx_queue->fast_fill_limit = limit;

	/* Set up RX descriptor ring */
675
	falcon_init_rx(rx_queue);
676 677 678 679 680 681 682 683 684 685 686 687 688
}

void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
{
	int i;
	struct efx_rx_buffer *rx_buf;

	EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue);

	falcon_fini_rx(rx_queue);

	/* Release RX buffers NB start at index 0 not current HW ptr */
	if (rx_queue->buffer) {
689
		for (i = 0; i <= EFX_RXQ_MASK; i++) {
690 691 692 693 694 695 696 697
			rx_buf = efx_rx_buffer(rx_queue, i);
			efx_fini_rx_buffer(rx_queue, rx_buf);
		}
	}

	/* For a page that is part-way through splitting into RX buffers */
	if (rx_queue->buf_page != NULL) {
		pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr,
698 699
			       efx_rx_buf_size(rx_queue->efx),
			       PCI_DMA_FROMDEVICE);
700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723
		__free_pages(rx_queue->buf_page,
			     rx_queue->efx->rx_buffer_order);
		rx_queue->buf_page = NULL;
	}
}

void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
{
	EFX_LOG(rx_queue->efx, "destroying RX queue %d\n", rx_queue->queue);

	falcon_remove_rx(rx_queue);

	kfree(rx_queue->buffer);
	rx_queue->buffer = NULL;
}


module_param(rx_alloc_method, int, 0644);
MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers");

module_param(rx_refill_threshold, uint, 0444);
MODULE_PARM_DESC(rx_refill_threshold,
		 "RX descriptor ring fast/slow fill threshold (%)");