tx.c 37.8 KB
Newer Older
1
/****************************************************************************
B
Ben Hutchings 已提交
2
 * Driver for Solarflare network controllers and boards
3
 * Copyright 2005-2006 Fen Systems Ltd.
B
Ben Hutchings 已提交
4
 * Copyright 2005-2013 Solarflare Communications Inc.
5 6 7 8 9 10 11 12 13 14
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published
 * by the Free Software Foundation, incorporated herein by reference.
 */

#include <linux/pci.h>
#include <linux/tcp.h>
#include <linux/ip.h>
#include <linux/in.h>
B
Ben Hutchings 已提交
15
#include <linux/ipv6.h>
16
#include <linux/slab.h>
B
Ben Hutchings 已提交
17
#include <net/ipv6.h>
18 19
#include <linux/if_ether.h>
#include <linux/highmem.h>
20
#include <linux/cache.h>
21 22
#include "net_driver.h"
#include "efx.h"
23
#include "io.h"
B
Ben Hutchings 已提交
24
#include "nic.h"
25
#include "workarounds.h"
26
#include "ef10_regs.h"
27

28 29 30 31 32 33 34 35
#ifdef EFX_USE_PIO

#define EFX_PIOBUF_SIZE_MAX ER_DZ_TX_PIOBUF_SIZE
#define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;

#endif /* EFX_USE_PIO */

36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
static inline unsigned int
efx_tx_queue_get_insert_index(const struct efx_tx_queue *tx_queue)
{
	return tx_queue->insert_count & tx_queue->ptr_mask;
}

static inline struct efx_tx_buffer *
__efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
{
	return &tx_queue->buffer[efx_tx_queue_get_insert_index(tx_queue)];
}

static inline struct efx_tx_buffer *
efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
{
	struct efx_tx_buffer *buffer =
		__efx_tx_queue_get_insert_buffer(tx_queue);

	EFX_BUG_ON_PARANOID(buffer->len);
	EFX_BUG_ON_PARANOID(buffer->flags);
	EFX_BUG_ON_PARANOID(buffer->unmap_len);

	return buffer;
}

61
static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
T
Tom Herbert 已提交
62 63 64
			       struct efx_tx_buffer *buffer,
			       unsigned int *pkts_compl,
			       unsigned int *bytes_compl)
65 66
{
	if (buffer->unmap_len) {
67
		struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
68
		dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
69
		if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
70 71
			dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
					 DMA_TO_DEVICE);
72
		else
73 74
			dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
				       DMA_TO_DEVICE);
75 76 77
		buffer->unmap_len = 0;
	}

78
	if (buffer->flags & EFX_TX_BUF_SKB) {
T
Tom Herbert 已提交
79 80
		(*pkts_compl)++;
		(*bytes_compl) += buffer->skb->len;
81
		dev_consume_skb_any((struct sk_buff *)buffer->skb);
82 83 84
		netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
			   "TX queue %d transmission id %x complete\n",
			   tx_queue->queue, tx_queue->read_count);
85 86
	} else if (buffer->flags & EFX_TX_BUF_HEAP) {
		kfree(buffer->heap_buf);
87
	}
88

89 90
	buffer->len = 0;
	buffer->flags = 0;
91 92
}

B
Ben Hutchings 已提交
93
static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
94
			       struct sk_buff *skb);
95

96 97 98 99 100 101 102 103 104
static inline unsigned
efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
{
	/* Depending on the NIC revision, we can use descriptor
	 * lengths up to 8K or 8K-1.  However, since PCI Express
	 * devices must split read requests at 4K boundaries, there is
	 * little benefit from using descriptors that cross those
	 * boundaries and we keep things simple by not doing so.
	 */
105
	unsigned len = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1;
106 107 108 109 110 111 112 113

	/* Work around hardware bug for unaligned buffers. */
	if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
		len = min_t(unsigned, len, 512 - (dma_addr & 0xf));

	return len;
}

114 115 116 117 118 119 120
unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
{
	/* Header and payload descriptor for each output segment, plus
	 * one for every input fragment boundary within a segment
	 */
	unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;

121 122 123 124
	/* Possibly one more per segment for the alignment workaround,
	 * or for option descriptors
	 */
	if (EFX_WORKAROUND_5391(efx) || efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
125 126 127 128 129 130 131 132 133 134
		max_descs += EFX_TSO_MAX_SEGS;

	/* Possibly more for PCIe page boundaries within input fragments */
	if (PAGE_SIZE > EFX_PAGE_SIZE)
		max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
				   DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));

	return max_descs;
}

135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
{
	/* We need to consider both queues that the net core sees as one */
	struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
	struct efx_nic *efx = txq1->efx;
	unsigned int fill_level;

	fill_level = max(txq1->insert_count - txq1->old_read_count,
			 txq2->insert_count - txq2->old_read_count);
	if (likely(fill_level < efx->txq_stop_thresh))
		return;

	/* We used the stale old_read_count above, which gives us a
	 * pessimistic estimate of the fill level (which may even
	 * validly be >= efx->txq_entries).  Now try again using
	 * read_count (more likely to be a cache miss).
	 *
	 * If we read read_count and then conditionally stop the
	 * queue, it is possible for the completion path to race with
	 * us and complete all outstanding descriptors in the middle,
	 * after which there will be no more completions to wake it.
	 * Therefore we stop the queue first, then read read_count
	 * (with a memory barrier to ensure the ordering), then
	 * restart the queue if the fill level turns out to be low
	 * enough.
	 */
	netif_tx_stop_queue(txq1->core_txq);
	smp_mb();
	txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
	txq2->old_read_count = ACCESS_ONCE(txq2->read_count);

	fill_level = max(txq1->insert_count - txq1->old_read_count,
			 txq2->insert_count - txq2->old_read_count);
	EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries);
	if (likely(fill_level < efx->txq_stop_thresh)) {
		smp_mb();
		if (likely(!efx->loopback_selftest))
			netif_tx_start_queue(txq1->core_txq);
	}
}

176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
#ifdef EFX_USE_PIO

struct efx_short_copy_buffer {
	int used;
	u8 buf[L1_CACHE_BYTES];
};

/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
 * Advances piobuf pointer. Leaves additional data in the copy buffer.
 */
static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
				    u8 *data, int len,
				    struct efx_short_copy_buffer *copy_buf)
{
	int block_len = len & ~(sizeof(copy_buf->buf) - 1);

192
	__iowrite64_copy(*piobuf, data, block_len >> 3);
193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
	*piobuf += block_len;
	len -= block_len;

	if (len) {
		data += block_len;
		BUG_ON(copy_buf->used);
		BUG_ON(len > sizeof(copy_buf->buf));
		memcpy(copy_buf->buf, data, len);
		copy_buf->used = len;
	}
}

/* Copy to PIO, respecting dword alignment, popping data from copy buffer first.
 * Advances piobuf pointer. Leaves additional data in the copy buffer.
 */
static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
				       u8 *data, int len,
				       struct efx_short_copy_buffer *copy_buf)
{
	if (copy_buf->used) {
		/* if the copy buffer is partially full, fill it up and write */
		int copy_to_buf =
			min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len);

		memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf);
		copy_buf->used += copy_to_buf;

		/* if we didn't fill it up then we're done for now */
		if (copy_buf->used < sizeof(copy_buf->buf))
			return;

224 225
		__iowrite64_copy(*piobuf, copy_buf->buf,
				 sizeof(copy_buf->buf) >> 3);
226 227 228 229 230 231 232 233 234 235 236 237 238 239
		*piobuf += sizeof(copy_buf->buf);
		data += copy_to_buf;
		len -= copy_to_buf;
		copy_buf->used = 0;
	}

	efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf);
}

static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
				  struct efx_short_copy_buffer *copy_buf)
{
	/* if there's anything in it, write the whole buffer, including junk */
	if (copy_buf->used)
240 241
		__iowrite64_copy(piobuf, copy_buf->buf,
				 sizeof(copy_buf->buf) >> 3);
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
}

/* Traverse skb structure and copy fragments in to PIO buffer.
 * Advances piobuf pointer.
 */
static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
				     u8 __iomem **piobuf,
				     struct efx_short_copy_buffer *copy_buf)
{
	int i;

	efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb),
				copy_buf);

	for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
		u8 *vaddr;

		vaddr = kmap_atomic(skb_frag_page(f));

		efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + f->page_offset,
					   skb_frag_size(f), copy_buf);
		kunmap_atomic(vaddr);
	}

	EFX_BUG_ON_PARANOID(skb_shinfo(skb)->frag_list);
}

static struct efx_tx_buffer *
efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
{
	struct efx_tx_buffer *buffer =
		efx_tx_queue_get_insert_buffer(tx_queue);
	u8 __iomem *piobuf = tx_queue->piobuf;

	/* Copy to PIO buffer. Ensure the writes are padded to the end
	 * of a cache line, as this is required for write-combining to be
	 * effective on at least x86.
	 */

	if (skb_shinfo(skb)->nr_frags) {
		/* The size of the copy buffer will ensure all writes
		 * are the size of a cache line.
		 */
		struct efx_short_copy_buffer copy_buf;

		copy_buf.used = 0;

		efx_skb_copy_bits_to_pio(tx_queue->efx, skb,
					 &piobuf, &copy_buf);
		efx_flush_copy_buffer(tx_queue->efx, piobuf, &copy_buf);
	} else {
		/* Pad the write to the size of a cache line.
		 * We can do this because we know the skb_shared_info sruct is
		 * after the source, and the destination buffer is big enough.
		 */
		BUILD_BUG_ON(L1_CACHE_BYTES >
			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
300 301
		__iowrite64_copy(tx_queue->piobuf, skb->data,
				 ALIGN(skb->len, L1_CACHE_BYTES) >> 3);
302 303 304 305 306 307 308 309 310 311 312 313 314 315 316
	}

	EFX_POPULATE_QWORD_5(buffer->option,
			     ESF_DZ_TX_DESC_IS_OPT, 1,
			     ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO,
			     ESF_DZ_TX_PIO_CONT, 0,
			     ESF_DZ_TX_PIO_BYTE_CNT, skb->len,
			     ESF_DZ_TX_PIO_BUF_ADDR,
			     tx_queue->piobuf_offset);
	++tx_queue->pio_packets;
	++tx_queue->insert_count;
	return buffer;
}
#endif /* EFX_USE_PIO */

317 318 319 320 321 322 323 324 325 326
/*
 * Add a socket buffer to a TX queue
 *
 * This maps all fragments of a socket buffer for DMA and adds them to
 * the TX queue.  The queue's insert pointer will be incremented by
 * the number of fragments in the socket buffer.
 *
 * If any DMA mapping fails, any mapped fragments will be unmapped,
 * the queue's insert pointer will be restored to its original value.
 *
327 328 329
 * This function is split out from efx_hard_start_xmit to allow the
 * loopback test to direct packets via specific TX queues.
 *
330
 * Returns NETDEV_TX_OK.
331 332
 * You must hold netif_tx_lock() to call this function.
 */
333
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
334 335
{
	struct efx_nic *efx = tx_queue->efx;
336
	struct device *dma_dev = &efx->pci_dev->dev;
337
	struct efx_tx_buffer *buffer;
E
Edward Cree 已提交
338
	unsigned int old_insert_count = tx_queue->insert_count;
339
	skb_frag_t *fragment;
340
	unsigned int len, unmap_len = 0;
341 342
	dma_addr_t dma_addr, unmap_addr = 0;
	unsigned int dma_len;
343
	unsigned short dma_flags;
344
	int i = 0;
345

346
	if (skb_shinfo(skb)->gso_size)
B
Ben Hutchings 已提交
347 348
		return efx_enqueue_skb_tso(tx_queue, skb);

349 350 351
	/* Get size of the initial fragment */
	len = skb_headlen(skb);

352 353 354 355 356 357 358 359
	/* Pad if necessary */
	if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) {
		EFX_BUG_ON_PARANOID(skb->data_len);
		len = 32 + 1;
		if (skb_pad(skb, len - skb->len))
			return NETDEV_TX_OK;
	}

360 361
	/* Consider using PIO for short packets */
#ifdef EFX_USE_PIO
E
Edward Cree 已提交
362 363
	if (skb->len <= efx_piobuf_size && !skb->xmit_more &&
	    efx_nic_may_tx_pio(tx_queue)) {
364 365 366 367 368 369
		buffer = efx_enqueue_skb_pio(tx_queue, skb);
		dma_flags = EFX_TX_BUF_OPTION;
		goto finish_packet;
	}
#endif

370
	/* Map for DMA.  Use dma_map_single rather than dma_map_page
371 372 373
	 * since this is more efficient on machines with sparse
	 * memory.
	 */
374
	dma_flags = EFX_TX_BUF_MAP_SINGLE;
375
	dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
376 377 378

	/* Process all fragments */
	while (1) {
379 380
		if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
			goto dma_err;
381 382 383 384 385 386 387 388

		/* Store fields for marking in the per-fragment final
		 * descriptor */
		unmap_len = len;
		unmap_addr = dma_addr;

		/* Add to TX queue, splitting across DMA boundaries */
		do {
389
			buffer = efx_tx_queue_get_insert_buffer(tx_queue);
390

391 392
			dma_len = efx_max_tx_len(efx, dma_addr);
			if (likely(dma_len >= len))
393 394 395 396 397
				dma_len = len;

			/* Fill out per descriptor fields */
			buffer->len = dma_len;
			buffer->dma_addr = dma_addr;
398
			buffer->flags = EFX_TX_BUF_CONT;
399 400 401 402 403 404
			len -= dma_len;
			dma_addr += dma_len;
			++tx_queue->insert_count;
		} while (len);

		/* Transfer ownership of the unmapping to the final buffer */
405
		buffer->flags = EFX_TX_BUF_CONT | dma_flags;
406
		buffer->unmap_len = unmap_len;
407
		buffer->dma_offset = buffer->dma_addr - unmap_addr;
408 409 410 411 412 413
		unmap_len = 0;

		/* Get address and size of next fragment */
		if (i >= skb_shinfo(skb)->nr_frags)
			break;
		fragment = &skb_shinfo(skb)->frags[i];
E
Eric Dumazet 已提交
414
		len = skb_frag_size(fragment);
415 416
		i++;
		/* Map for DMA */
417
		dma_flags = 0;
418
		dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
419
					    DMA_TO_DEVICE);
420 421 422
	}

	/* Transfer ownership of the skb to the final buffer */
423
#ifdef EFX_USE_PIO
424
finish_packet:
425
#endif
426
	buffer->skb = skb;
427
	buffer->flags = EFX_TX_BUF_SKB | dma_flags;
428

T
Tom Herbert 已提交
429 430
	netdev_tx_sent_queue(tx_queue->core_txq, skb->len);

E
Edward Cree 已提交
431 432
	efx_tx_maybe_stop_queue(tx_queue);

433
	/* Pass off to hardware */
434 435 436 437 438 439 440 441 442 443
	if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
		struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);

		/* There could be packets left on the partner queue if those
		 * SKBs had skb->xmit_more set. If we do not push those they
		 * could be left for a long time and cause a netdev watchdog.
		 */
		if (txq2->xmit_more_available)
			efx_nic_push_buffers(txq2);

E
Edward Cree 已提交
444
		efx_nic_push_buffers(tx_queue);
445 446 447
	} else {
		tx_queue->xmit_more_available = skb->xmit_more;
	}
448

449 450
	tx_queue->tx_packets++;

451 452
	return NETDEV_TX_OK;

453
 dma_err:
454 455 456 457
	netif_err(efx, tx_err, efx->net_dev,
		  " TX queue %d could not map skb with %d bytes %d "
		  "fragments for DMA\n", tx_queue->queue, skb->len,
		  skb_shinfo(skb)->nr_frags + 1);
458 459

	/* Mark the packet as transmitted, and free the SKB ourselves */
460
	dev_kfree_skb_any(skb);
461 462

	/* Work backwards until we hit the original insert pointer value */
E
Edward Cree 已提交
463
	while (tx_queue->insert_count != old_insert_count) {
T
Tom Herbert 已提交
464
		unsigned int pkts_compl = 0, bytes_compl = 0;
465
		--tx_queue->insert_count;
466
		buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
T
Tom Herbert 已提交
467
		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
468 469 470
	}

	/* Free the fragment we were mid-way through pushing */
471
	if (unmap_len) {
472
		if (dma_flags & EFX_TX_BUF_MAP_SINGLE)
473 474
			dma_unmap_single(dma_dev, unmap_addr, unmap_len,
					 DMA_TO_DEVICE);
475
		else
476 477
			dma_unmap_page(dma_dev, unmap_addr, unmap_len,
				       DMA_TO_DEVICE);
478
	}
479

480
	return NETDEV_TX_OK;
481 482 483 484 485 486 487
}

/* Remove packets from the TX queue
 *
 * This removes packets from the TX queue, up to and including the
 * specified index.
 */
488
static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
T
Tom Herbert 已提交
489 490 491
				unsigned int index,
				unsigned int *pkts_compl,
				unsigned int *bytes_compl)
492 493 494 495
{
	struct efx_nic *efx = tx_queue->efx;
	unsigned int stop_index, read_ptr;

496 497
	stop_index = (index + 1) & tx_queue->ptr_mask;
	read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
498 499 500

	while (read_ptr != stop_index) {
		struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
501 502 503

		if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
		    unlikely(buffer->len == 0)) {
504 505 506
			netif_err(efx, tx_err, efx->net_dev,
				  "TX queue %d spurious TX completion id %x\n",
				  tx_queue->queue, read_ptr);
507 508 509 510
			efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
			return;
		}

T
Tom Herbert 已提交
511
		efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
512 513

		++tx_queue->read_count;
514
		read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
515 516 517 518 519 520 521 522 523 524 525 526
	}
}

/* Initiate a packet transmission.  We use one channel per CPU
 * (sharing when we have more CPUs than channels).  On Falcon, the TX
 * completion events will be directed back to the CPU that transmitted
 * the packet, which should be cache-efficient.
 *
 * Context: non-blocking.
 * Note that returning anything other than NETDEV_TX_OK will cause the
 * OS to free the skb.
 */
527
netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
B
Ben Hutchings 已提交
528
				struct net_device *net_dev)
529
{
530
	struct efx_nic *efx = netdev_priv(net_dev);
531
	struct efx_tx_queue *tx_queue;
532
	unsigned index, type;
533

534
	EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
535

536 537 538 539 540 541
	/* PTP "event" packet */
	if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
	    unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
		return efx_ptp_tx(efx, skb);
	}

542 543 544 545 546 547 548
	index = skb_get_queue_mapping(skb);
	type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
	if (index >= efx->n_tx_channels) {
		index -= efx->n_tx_channels;
		type |= EFX_TXQ_TYPE_HIGHPRI;
	}
	tx_queue = efx_get_tx_queue(efx, index, type);
549

550
	return efx_enqueue_skb(tx_queue, skb);
551 552
}

553 554
void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
{
555 556
	struct efx_nic *efx = tx_queue->efx;

557
	/* Must be inverse of queue lookup in efx_hard_start_xmit() */
558 559 560 561 562 563 564
	tx_queue->core_txq =
		netdev_get_tx_queue(efx->net_dev,
				    tx_queue->queue / EFX_TXQ_TYPES +
				    ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
				     efx->n_tx_channels : 0));
}

565 566
int efx_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
		 struct tc_to_netdev *ntc)
567 568 569 570
{
	struct efx_nic *efx = netdev_priv(net_dev);
	struct efx_channel *channel;
	struct efx_tx_queue *tx_queue;
571
	unsigned tc, num_tc;
572 573
	int rc;

574
	if (ntc->type != TC_SETUP_MQPRIO)
575 576
		return -EINVAL;

577 578
	num_tc = ntc->tc;

579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625
	if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
		return -EINVAL;

	if (num_tc == net_dev->num_tc)
		return 0;

	for (tc = 0; tc < num_tc; tc++) {
		net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
		net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
	}

	if (num_tc > net_dev->num_tc) {
		/* Initialise high-priority queues as necessary */
		efx_for_each_channel(channel, efx) {
			efx_for_each_possible_channel_tx_queue(tx_queue,
							       channel) {
				if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
					continue;
				if (!tx_queue->buffer) {
					rc = efx_probe_tx_queue(tx_queue);
					if (rc)
						return rc;
				}
				if (!tx_queue->initialised)
					efx_init_tx_queue(tx_queue);
				efx_init_tx_queue_core_txq(tx_queue);
			}
		}
	} else {
		/* Reduce number of classes before number of queues */
		net_dev->num_tc = num_tc;
	}

	rc = netif_set_real_num_tx_queues(net_dev,
					  max_t(int, num_tc, 1) *
					  efx->n_tx_channels);
	if (rc)
		return rc;

	/* Do not destroy high-priority queues when they become
	 * unused.  We would have to flush them first, and it is
	 * fairly difficult to flush a subset of TX queues.  Leave
	 * it to efx_fini_channels().
	 */

	net_dev->num_tc = num_tc;
	return 0;
626 627
}

628 629 630 631
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{
	unsigned fill_level;
	struct efx_nic *efx = tx_queue->efx;
632
	struct efx_tx_queue *txq2;
T
Tom Herbert 已提交
633
	unsigned int pkts_compl = 0, bytes_compl = 0;
634

635
	EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
636

T
Tom Herbert 已提交
637
	efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
638 639
	tx_queue->pkts_compl += pkts_compl;
	tx_queue->bytes_compl += bytes_compl;
640

641 642 643
	if (pkts_compl > 1)
		++tx_queue->merge_events;

644 645 646 647
	/* See if we need to restart the netif queue.  This memory
	 * barrier ensures that we write read_count (inside
	 * efx_dequeue_buffers()) before reading the queue status.
	 */
648
	smp_mb();
649
	if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
650
	    likely(efx->port_enabled) &&
651
	    likely(netif_device_present(efx->net_dev))) {
652 653 654 655
		txq2 = efx_tx_queue_partner(tx_queue);
		fill_level = max(tx_queue->insert_count - tx_queue->read_count,
				 txq2->insert_count - txq2->read_count);
		if (fill_level <= efx->txq_wake_thresh)
656
			netif_tx_wake_queue(tx_queue->core_txq);
657
	}
658 659 660 661 662 663 664 665 666 667

	/* Check whether the hardware queue is now empty */
	if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
		tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
		if (tx_queue->read_count == tx_queue->old_write_count) {
			smp_mb();
			tx_queue->empty_read_count =
				tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
		}
	}
668 669
}

670 671 672 673 674 675 676 677 678 679 680 681 682 683 684
/* Size of page-based TSO header buffers.  Larger blocks must be
 * allocated from the heap.
 */
#define TSOH_STD_SIZE	128
#define TSOH_PER_PAGE	(PAGE_SIZE / TSOH_STD_SIZE)

/* At most half the descriptors in the queue at any time will refer to
 * a TSO header buffer, since they must always be followed by a
 * payload descriptor referring to an skb.
 */
static unsigned int efx_tsoh_page_count(struct efx_tx_queue *tx_queue)
{
	return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2 * TSOH_PER_PAGE);
}

685 686 687
int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
{
	struct efx_nic *efx = tx_queue->efx;
688
	unsigned int entries;
689
	int rc;
690

691 692 693 694 695 696 697 698
	/* Create the smallest power-of-two aligned ring */
	entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
	EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
	tx_queue->ptr_mask = entries - 1;

	netif_dbg(efx, probe, efx->net_dev,
		  "creating TX queue %d size %#x mask %#x\n",
		  tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
699 700

	/* Allocate software ring */
701
	tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
702
				   GFP_KERNEL);
703 704
	if (!tx_queue->buffer)
		return -ENOMEM;
705

706 707 708 709 710 711 712 713 714 715
	if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) {
		tx_queue->tsoh_page =
			kcalloc(efx_tsoh_page_count(tx_queue),
				sizeof(tx_queue->tsoh_page[0]), GFP_KERNEL);
		if (!tx_queue->tsoh_page) {
			rc = -ENOMEM;
			goto fail1;
		}
	}

716
	/* Allocate hardware ring */
717
	rc = efx_nic_probe_tx(tx_queue);
718
	if (rc)
719
		goto fail2;
720 721 722

	return 0;

723 724 725 726
fail2:
	kfree(tx_queue->tsoh_page);
	tx_queue->tsoh_page = NULL;
fail1:
727 728 729 730 731
	kfree(tx_queue->buffer);
	tx_queue->buffer = NULL;
	return rc;
}

732
void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
733
{
734 735
	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
		  "initialising TX queue %d\n", tx_queue->queue);
736 737 738

	tx_queue->insert_count = 0;
	tx_queue->write_count = 0;
739
	tx_queue->old_write_count = 0;
740 741
	tx_queue->read_count = 0;
	tx_queue->old_read_count = 0;
742
	tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
743
	tx_queue->xmit_more_available = false;
744 745

	/* Set up TX descriptor ring */
746
	efx_nic_init_tx(tx_queue);
747 748

	tx_queue->initialised = true;
749 750
}

751
void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
752 753 754
{
	struct efx_tx_buffer *buffer;

755 756 757
	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
		  "shutting down TX queue %d\n", tx_queue->queue);

758 759 760 761 762
	if (!tx_queue->buffer)
		return;

	/* Free any buffers left in the ring */
	while (tx_queue->read_count != tx_queue->write_count) {
T
Tom Herbert 已提交
763
		unsigned int pkts_compl = 0, bytes_compl = 0;
764
		buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
T
Tom Herbert 已提交
765
		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
766 767 768

		++tx_queue->read_count;
	}
769
	tx_queue->xmit_more_available = false;
T
Tom Herbert 已提交
770
	netdev_tx_reset_queue(tx_queue->core_txq);
771 772 773 774
}

void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
{
775 776
	int i;

777 778 779
	if (!tx_queue->buffer)
		return;

780 781
	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
		  "destroying TX queue %d\n", tx_queue->queue);
782
	efx_nic_remove_tx(tx_queue);
783

784 785 786 787 788 789 790 791
	if (tx_queue->tsoh_page) {
		for (i = 0; i < efx_tsoh_page_count(tx_queue); i++)
			efx_nic_free_buffer(tx_queue->efx,
					    &tx_queue->tsoh_page[i]);
		kfree(tx_queue->tsoh_page);
		tx_queue->tsoh_page = NULL;
	}

792 793 794 795 796
	kfree(tx_queue->buffer);
	tx_queue->buffer = NULL;
}


B
Ben Hutchings 已提交
797 798 799 800 801 802 803 804 805 806 807 808
/* Efx TCP segmentation acceleration.
 *
 * Why?  Because by doing it here in the driver we can go significantly
 * faster than the GSO.
 *
 * Requires TX checksum offload support.
 */

#define PTR_DIFF(p1, p2)  ((u8 *)(p1) - (u8 *)(p2))

/**
 * struct tso_state - TSO state for an SKB
809
 * @out_len: Remaining length in current segment
B
Ben Hutchings 已提交
810
 * @seqnum: Current sequence number
811
 * @ipv4_id: Current IPv4 ID, host endian
B
Ben Hutchings 已提交
812
 * @packet_space: Remaining space in current packet
813 814 815 816
 * @dma_addr: DMA address of current position
 * @in_len: Remaining length in current SKB fragment
 * @unmap_len: Length of SKB fragment
 * @unmap_addr: DMA address of SKB fragment
817
 * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0
B
Ben Hutchings 已提交
818
 * @protocol: Network protocol (after any VLAN header)
819 820
 * @ip_off: Offset of IP header
 * @tcp_off: Offset of TCP header
821
 * @header_len: Number of bytes of header
822
 * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
823 824 825
 * @header_dma_addr: Header DMA address, when using option descriptors
 * @header_unmap_len: Header DMA mapped length, or 0 if not using option
 *	descriptors
B
Ben Hutchings 已提交
826 827 828 829 830
 *
 * The state used during segmentation.  It is put into this data structure
 * just to make it easy to pass into inline functions.
 */
struct tso_state {
831 832
	/* Output position */
	unsigned out_len;
B
Ben Hutchings 已提交
833
	unsigned seqnum;
834
	u16 ipv4_id;
B
Ben Hutchings 已提交
835 836
	unsigned packet_space;

837 838 839 840 841
	/* Input position */
	dma_addr_t dma_addr;
	unsigned in_len;
	unsigned unmap_len;
	dma_addr_t unmap_addr;
842
	unsigned short dma_flags;
843

B
Ben Hutchings 已提交
844
	__be16 protocol;
845 846
	unsigned int ip_off;
	unsigned int tcp_off;
847
	unsigned header_len;
848
	unsigned int ip_base_len;
849 850
	dma_addr_t header_dma_addr;
	unsigned int header_unmap_len;
B
Ben Hutchings 已提交
851 852 853 854 855
};


/*
 * Verify that our various assumptions about sk_buffs and the conditions
B
Ben Hutchings 已提交
856
 * under which TSO will be attempted hold true.  Return the protocol number.
B
Ben Hutchings 已提交
857
 */
B
Ben Hutchings 已提交
858
static __be16 efx_tso_check_protocol(struct sk_buff *skb)
B
Ben Hutchings 已提交
859
{
860 861
	__be16 protocol = skb->protocol;

B
Ben Hutchings 已提交
862
	EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
863 864 865 866 867 868
			    protocol);
	if (protocol == htons(ETH_P_8021Q)) {
		struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
		protocol = veh->h_vlan_encapsulated_proto;
	}

B
Ben Hutchings 已提交
869 870 871 872 873 874
	if (protocol == htons(ETH_P_IP)) {
		EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
	} else {
		EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6));
		EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP);
	}
B
Ben Hutchings 已提交
875 876 877
	EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
			     + (tcp_hdr(skb)->doff << 2u)) >
			    skb_headlen(skb));
B
Ben Hutchings 已提交
878 879

	return protocol;
B
Ben Hutchings 已提交
880 881
}

882 883
static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
			       struct efx_tx_buffer *buffer, unsigned int len)
B
Ben Hutchings 已提交
884
{
885
	u8 *result;
B
Ben Hutchings 已提交
886

887 888 889
	EFX_BUG_ON_PARANOID(buffer->len);
	EFX_BUG_ON_PARANOID(buffer->flags);
	EFX_BUG_ON_PARANOID(buffer->unmap_len);
B
Ben Hutchings 已提交
890

891
	if (likely(len <= TSOH_STD_SIZE - NET_IP_ALIGN)) {
892 893 894 895 896
		unsigned index =
			(tx_queue->insert_count & tx_queue->ptr_mask) / 2;
		struct efx_buffer *page_buf =
			&tx_queue->tsoh_page[index / TSOH_PER_PAGE];
		unsigned offset =
897
			TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + NET_IP_ALIGN;
B
Ben Hutchings 已提交
898

899
		if (unlikely(!page_buf->addr) &&
900 901
		    efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
					 GFP_ATOMIC))
902
			return NULL;
B
Ben Hutchings 已提交
903

904 905 906 907 908
		result = (u8 *)page_buf->addr + offset;
		buffer->dma_addr = page_buf->dma_addr + offset;
		buffer->flags = EFX_TX_BUF_CONT;
	} else {
		tx_queue->tso_long_headers++;
B
Ben Hutchings 已提交
909

910
		buffer->heap_buf = kmalloc(NET_IP_ALIGN + len, GFP_ATOMIC);
911 912
		if (unlikely(!buffer->heap_buf))
			return NULL;
913
		result = (u8 *)buffer->heap_buf + NET_IP_ALIGN;
914
		buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP;
B
Ben Hutchings 已提交
915 916
	}

917
	buffer->len = len;
B
Ben Hutchings 已提交
918

919
	return result;
B
Ben Hutchings 已提交
920 921 922 923 924 925 926
}

/**
 * efx_tx_queue_insert - push descriptors onto the TX queue
 * @tx_queue:		Efx TX queue
 * @dma_addr:		DMA address of fragment
 * @len:		Length of fragment
927
 * @final_buffer:	The final buffer inserted into the queue
B
Ben Hutchings 已提交
928
 *
929
 * Push descriptors onto the TX queue.
B
Ben Hutchings 已提交
930
 */
931 932 933
static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
				dma_addr_t dma_addr, unsigned len,
				struct efx_tx_buffer **final_buffer)
B
Ben Hutchings 已提交
934 935 936
{
	struct efx_tx_buffer *buffer;
	struct efx_nic *efx = tx_queue->efx;
937
	unsigned dma_len;
B
Ben Hutchings 已提交
938 939 940 941

	EFX_BUG_ON_PARANOID(len <= 0);

	while (1) {
942
		buffer = efx_tx_queue_get_insert_buffer(tx_queue);
B
Ben Hutchings 已提交
943 944 945
		++tx_queue->insert_count;

		EFX_BUG_ON_PARANOID(tx_queue->insert_count -
946 947
				    tx_queue->read_count >=
				    efx->txq_entries);
B
Ben Hutchings 已提交
948 949 950

		buffer->dma_addr = dma_addr;

951
		dma_len = efx_max_tx_len(efx, dma_addr);
B
Ben Hutchings 已提交
952 953 954 955 956

		/* If there is enough space to send then do so */
		if (dma_len >= len)
			break;

957 958
		buffer->len = dma_len;
		buffer->flags = EFX_TX_BUF_CONT;
B
Ben Hutchings 已提交
959 960 961 962 963 964
		dma_addr += dma_len;
		len -= dma_len;
	}

	EFX_BUG_ON_PARANOID(!len);
	buffer->len = len;
965
	*final_buffer = buffer;
B
Ben Hutchings 已提交
966 967 968 969 970 971 972 973 974 975
}


/*
 * Put a TSO header into the TX queue.
 *
 * This is special-cased because we know that it is small enough to fit in
 * a single fragment, and we know it doesn't cross a page boundary.  It
 * also allows us to not worry about end-of-packet etc.
 */
976 977
static int efx_tso_put_header(struct efx_tx_queue *tx_queue,
			      struct efx_tx_buffer *buffer, u8 *header)
B
Ben Hutchings 已提交
978
{
979 980 981 982 983 984 985 986 987 988 989 990
	if (unlikely(buffer->flags & EFX_TX_BUF_HEAP)) {
		buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
						  header, buffer->len,
						  DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
					       buffer->dma_addr))) {
			kfree(buffer->heap_buf);
			buffer->len = 0;
			buffer->flags = 0;
			return -ENOMEM;
		}
		buffer->unmap_len = buffer->len;
991
		buffer->dma_offset = 0;
992 993
		buffer->flags |= EFX_TX_BUF_MAP_SINGLE;
	}
B
Ben Hutchings 已提交
994 995

	++tx_queue->insert_count;
996
	return 0;
B
Ben Hutchings 已提交
997 998 999
}


1000 1001 1002
/* Remove buffers put into a tx_queue.  None of the buffers must have
 * an skb attached.
 */
E
Edward Cree 已提交
1003 1004
static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
			       unsigned int insert_count)
B
Ben Hutchings 已提交
1005 1006 1007 1008
{
	struct efx_tx_buffer *buffer;

	/* Work backwards until we hit the original insert pointer value */
E
Edward Cree 已提交
1009
	while (tx_queue->insert_count != insert_count) {
B
Ben Hutchings 已提交
1010
		--tx_queue->insert_count;
1011
		buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
1012
		efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
B
Ben Hutchings 已提交
1013 1014 1015 1016 1017
	}
}


/* Parse the SKB header and initialise state. */
1018
static int tso_start(struct tso_state *st, struct efx_nic *efx,
1019
		     struct efx_tx_queue *tx_queue,
1020
		     const struct sk_buff *skb)
B
Ben Hutchings 已提交
1021
{
1022
	struct device *dma_dev = &efx->pci_dev->dev;
1023
	unsigned int header_len, in_len;
1024
	bool use_opt_desc = false;
1025
	dma_addr_t dma_addr;
1026

1027 1028 1029
	if (tx_queue->tso_version == 1)
		use_opt_desc = true;

1030 1031
	st->ip_off = skb_network_header(skb) - skb->data;
	st->tcp_off = skb_transport_header(skb) - skb->data;
1032 1033 1034 1035
	header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
	in_len = skb_headlen(skb) - header_len;
	st->header_len = header_len;
	st->in_len = in_len;
1036
	if (st->protocol == htons(ETH_P_IP)) {
1037
		st->ip_base_len = st->header_len - st->ip_off;
B
Ben Hutchings 已提交
1038
		st->ipv4_id = ntohs(ip_hdr(skb)->id);
1039
	} else {
1040
		st->ip_base_len = st->header_len - st->tcp_off;
B
Ben Hutchings 已提交
1041
		st->ipv4_id = 0;
1042
	}
B
Ben Hutchings 已提交
1043 1044 1045 1046 1047 1048
	st->seqnum = ntohl(tcp_hdr(skb)->seq);

	EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
	EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
	EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);

1049 1050
	st->out_len = skb->len - header_len;

1051
	if (!use_opt_desc) {
1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070
		st->header_unmap_len = 0;

		if (likely(in_len == 0)) {
			st->dma_flags = 0;
			st->unmap_len = 0;
			return 0;
		}

		dma_addr = dma_map_single(dma_dev, skb->data + header_len,
					  in_len, DMA_TO_DEVICE);
		st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
		st->dma_addr = dma_addr;
		st->unmap_addr = dma_addr;
		st->unmap_len = in_len;
	} else {
		dma_addr = dma_map_single(dma_dev, skb->data,
					  skb_headlen(skb), DMA_TO_DEVICE);
		st->header_dma_addr = dma_addr;
		st->header_unmap_len = skb_headlen(skb);
1071
		st->dma_flags = 0;
1072 1073
		st->dma_addr = dma_addr + header_len;
		st->unmap_len = 0;
1074 1075
	}

1076
	return unlikely(dma_mapping_error(dma_dev, dma_addr)) ? -ENOMEM : 0;
B
Ben Hutchings 已提交
1077 1078
}

1079 1080
static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
			    skb_frag_t *frag)
B
Ben Hutchings 已提交
1081
{
1082
	st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
E
Eric Dumazet 已提交
1083
					  skb_frag_size(frag), DMA_TO_DEVICE);
1084
	if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
1085
		st->dma_flags = 0;
E
Eric Dumazet 已提交
1086 1087
		st->unmap_len = skb_frag_size(frag);
		st->in_len = skb_frag_size(frag);
1088
		st->dma_addr = st->unmap_addr;
1089 1090 1091 1092 1093
		return 0;
	}
	return -ENOMEM;
}

B
Ben Hutchings 已提交
1094 1095 1096 1097 1098 1099 1100 1101

/**
 * tso_fill_packet_with_fragment - form descriptors for the current fragment
 * @tx_queue:		Efx TX queue
 * @skb:		Socket buffer
 * @st:			TSO state
 *
 * Form descriptors for the current fragment, until we reach the end
1102
 * of fragment or end-of-packet.
B
Ben Hutchings 已提交
1103
 */
1104 1105 1106
static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
					  const struct sk_buff *skb,
					  struct tso_state *st)
B
Ben Hutchings 已提交
1107
{
1108
	struct efx_tx_buffer *buffer;
1109
	int n;
B
Ben Hutchings 已提交
1110

1111
	if (st->in_len == 0)
1112
		return;
B
Ben Hutchings 已提交
1113
	if (st->packet_space == 0)
1114
		return;
B
Ben Hutchings 已提交
1115

1116
	EFX_BUG_ON_PARANOID(st->in_len <= 0);
B
Ben Hutchings 已提交
1117 1118
	EFX_BUG_ON_PARANOID(st->packet_space <= 0);

1119
	n = min(st->in_len, st->packet_space);
B
Ben Hutchings 已提交
1120 1121

	st->packet_space -= n;
1122 1123
	st->out_len -= n;
	st->in_len -= n;
B
Ben Hutchings 已提交
1124

1125
	efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
B
Ben Hutchings 已提交
1126

1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137
	if (st->out_len == 0) {
		/* Transfer ownership of the skb */
		buffer->skb = skb;
		buffer->flags = EFX_TX_BUF_SKB;
	} else if (st->packet_space != 0) {
		buffer->flags = EFX_TX_BUF_CONT;
	}

	if (st->in_len == 0) {
		/* Transfer ownership of the DMA mapping */
		buffer->unmap_len = st->unmap_len;
1138
		buffer->dma_offset = buffer->unmap_len - buffer->len;
1139 1140
		buffer->flags |= st->dma_flags;
		st->unmap_len = 0;
1141 1142
	}

1143
	st->dma_addr += n;
B
Ben Hutchings 已提交
1144 1145 1146 1147 1148 1149 1150 1151 1152 1153
}


/**
 * tso_start_new_packet - generate a new header and prepare for the new packet
 * @tx_queue:		Efx TX queue
 * @skb:		Socket buffer
 * @st:			TSO state
 *
 * Generate a new header and prepare for the new packet.  Return 0 on
1154
 * success, or -%ENOMEM if failed to alloc header.
B
Ben Hutchings 已提交
1155
 */
1156 1157 1158
static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
				const struct sk_buff *skb,
				struct tso_state *st)
B
Ben Hutchings 已提交
1159
{
1160
	struct efx_tx_buffer *buffer =
1161
		efx_tx_queue_get_insert_buffer(tx_queue);
1162 1163
	bool is_last = st->out_len <= skb_shinfo(skb)->gso_size;
	u8 tcp_flags_clear;
B
Ben Hutchings 已提交
1164

1165
	if (!is_last) {
1166
		st->packet_space = skb_shinfo(skb)->gso_size;
1167
		tcp_flags_clear = 0x09; /* mask out FIN and PSH */
B
Ben Hutchings 已提交
1168
	} else {
1169
		st->packet_space = st->out_len;
1170
		tcp_flags_clear = 0x00;
B
Ben Hutchings 已提交
1171 1172
	}

1173 1174 1175 1176 1177 1178
	if (!st->header_unmap_len) {
		/* Allocate and insert a DMA-mapped header buffer. */
		struct tcphdr *tsoh_th;
		unsigned ip_length;
		u8 *header;
		int rc;
B
Ben Hutchings 已提交
1179

1180 1181 1182
		header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
		if (!header)
			return -ENOMEM;
B
Ben Hutchings 已提交
1183

1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209
		tsoh_th = (struct tcphdr *)(header + st->tcp_off);

		/* Copy and update the headers. */
		memcpy(header, skb->data, st->header_len);

		tsoh_th->seq = htonl(st->seqnum);
		((u8 *)tsoh_th)[13] &= ~tcp_flags_clear;

		ip_length = st->ip_base_len + st->packet_space;

		if (st->protocol == htons(ETH_P_IP)) {
			struct iphdr *tsoh_iph =
				(struct iphdr *)(header + st->ip_off);

			tsoh_iph->tot_len = htons(ip_length);
			tsoh_iph->id = htons(st->ipv4_id);
		} else {
			struct ipv6hdr *tsoh_iph =
				(struct ipv6hdr *)(header + st->ip_off);

			tsoh_iph->payload_len = htons(ip_length);
		}

		rc = efx_tso_put_header(tx_queue, buffer, header);
		if (unlikely(rc))
			return rc;
B
Ben Hutchings 已提交
1210
	} else {
1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226
		/* Send the original headers with a TSO option descriptor
		 * in front
		 */
		u8 tcp_flags = ((u8 *)tcp_hdr(skb))[13] & ~tcp_flags_clear;

		buffer->flags = EFX_TX_BUF_OPTION;
		buffer->len = 0;
		buffer->unmap_len = 0;
		EFX_POPULATE_QWORD_5(buffer->option,
				     ESF_DZ_TX_DESC_IS_OPT, 1,
				     ESF_DZ_TX_OPTION_TYPE,
				     ESE_DZ_TX_OPTION_DESC_TSO,
				     ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
				     ESF_DZ_TX_TSO_IP_ID, st->ipv4_id,
				     ESF_DZ_TX_TSO_TCP_SEQNO, st->seqnum);
		++tx_queue->insert_count;
B
Ben Hutchings 已提交
1227

1228 1229 1230
		/* We mapped the headers in tso_start().  Unmap them
		 * when the last segment is completed.
		 */
1231
		buffer = efx_tx_queue_get_insert_buffer(tx_queue);
1232 1233 1234 1235 1236
		buffer->dma_addr = st->header_dma_addr;
		buffer->len = st->header_len;
		if (is_last) {
			buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE;
			buffer->unmap_len = st->header_unmap_len;
1237
			buffer->dma_offset = 0;
1238 1239 1240 1241 1242 1243 1244 1245 1246
			/* Ensure we only unmap them once in case of a
			 * later DMA mapping error and rollback
			 */
			st->header_unmap_len = 0;
		} else {
			buffer->flags = EFX_TX_BUF_CONT;
			buffer->unmap_len = 0;
		}
		++tx_queue->insert_count;
B
Ben Hutchings 已提交
1247
	}
B
Ben Hutchings 已提交
1248

1249 1250 1251 1252
	st->seqnum += skb_shinfo(skb)->gso_size;

	/* Linux leaves suitable gaps in the IP ID space for us to fill. */
	++st->ipv4_id;
1253

B
Ben Hutchings 已提交
1254 1255
	++tx_queue->tso_packets;

1256 1257
	++tx_queue->tx_packets;

B
Ben Hutchings 已提交
1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270
	return 0;
}


/**
 * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
 * @tx_queue:		Efx TX queue
 * @skb:		Socket buffer
 *
 * Context: You must hold netif_tx_lock() to call this function.
 *
 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
 * @skb was not enqueued.  In all cases @skb is consumed.  Return
1271
 * %NETDEV_TX_OK.
B
Ben Hutchings 已提交
1272 1273
 */
static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1274
			       struct sk_buff *skb)
B
Ben Hutchings 已提交
1275
{
1276
	struct efx_nic *efx = tx_queue->efx;
E
Edward Cree 已提交
1277
	unsigned int old_insert_count = tx_queue->insert_count;
1278
	int frag_i, rc;
B
Ben Hutchings 已提交
1279 1280
	struct tso_state state;

B
Ben Hutchings 已提交
1281 1282
	/* Find the packet protocol and sanity-check it */
	state.protocol = efx_tso_check_protocol(skb);
B
Ben Hutchings 已提交
1283

1284
	rc = tso_start(&state, efx, tx_queue, skb);
1285 1286
	if (rc)
		goto mem_err;
B
Ben Hutchings 已提交
1287

1288
	if (likely(state.in_len == 0)) {
B
Ben Hutchings 已提交
1289 1290 1291
		/* Grab the first payload fragment. */
		EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
		frag_i = 0;
1292 1293
		rc = tso_get_fragment(&state, efx,
				      skb_shinfo(skb)->frags + frag_i);
B
Ben Hutchings 已提交
1294 1295 1296
		if (rc)
			goto mem_err;
	} else {
1297
		/* Payload starts in the header area. */
B
Ben Hutchings 已提交
1298 1299 1300 1301 1302 1303 1304
		frag_i = -1;
	}

	if (tso_start_new_packet(tx_queue, skb, &state) < 0)
		goto mem_err;

	while (1) {
1305
		tso_fill_packet_with_fragment(tx_queue, skb, &state);
B
Ben Hutchings 已提交
1306 1307

		/* Move onto the next fragment? */
1308
		if (state.in_len == 0) {
B
Ben Hutchings 已提交
1309 1310 1311
			if (++frag_i >= skb_shinfo(skb)->nr_frags)
				/* End of payload reached. */
				break;
1312 1313
			rc = tso_get_fragment(&state, efx,
					      skb_shinfo(skb)->frags + frag_i);
B
Ben Hutchings 已提交
1314 1315 1316 1317 1318 1319 1320 1321 1322 1323
			if (rc)
				goto mem_err;
		}

		/* Start at new packet? */
		if (state.packet_space == 0 &&
		    tso_start_new_packet(tx_queue, skb, &state) < 0)
			goto mem_err;
	}

1324 1325
	netdev_tx_sent_queue(tx_queue->core_txq, skb->len);

1326 1327
	efx_tx_maybe_stop_queue(tx_queue);

E
Edward Cree 已提交
1328
	/* Pass off to hardware */
1329 1330 1331 1332 1333 1334 1335 1336 1337 1338
	if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
		struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);

		/* There could be packets left on the partner queue if those
		 * SKBs had skb->xmit_more set. If we do not push those they
		 * could be left for a long time and cause a netdev watchdog.
		 */
		if (txq2->xmit_more_available)
			efx_nic_push_buffers(txq2);

E
Edward Cree 已提交
1339
		efx_nic_push_buffers(tx_queue);
1340 1341 1342
	} else {
		tx_queue->xmit_more_available = skb->xmit_more;
	}
E
Edward Cree 已提交
1343

B
Ben Hutchings 已提交
1344 1345 1346 1347
	tx_queue->tso_bursts++;
	return NETDEV_TX_OK;

 mem_err:
1348
	netif_err(efx, tx_err, efx->net_dev,
1349
		  "Out of memory for TSO headers, or DMA mapping error\n");
1350
	dev_kfree_skb_any(skb);
B
Ben Hutchings 已提交
1351

1352
	/* Free the DMA mapping we were in the process of writing out */
1353
	if (state.unmap_len) {
1354
		if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE)
1355 1356
			dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
					 state.unmap_len, DMA_TO_DEVICE);
1357
		else
1358 1359
			dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr,
				       state.unmap_len, DMA_TO_DEVICE);
1360
	}
1361

1362 1363 1364 1365 1366
	/* Free the header DMA mapping, if using option descriptors */
	if (state.header_unmap_len)
		dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr,
				 state.header_unmap_len, DMA_TO_DEVICE);

E
Edward Cree 已提交
1367
	efx_enqueue_unwind(tx_queue, old_insert_count);
1368
	return NETDEV_TX_OK;
B
Ben Hutchings 已提交
1369
}