ionic_txrx.c 31.3 KB
Newer Older
S
Shannon Nelson 已提交
1 2 3 4 5 6 7 8 9 10 11 12
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */

#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/if_vlan.h>
#include <net/ip6_checksum.h>

#include "ionic.h"
#include "ionic_lif.h"
#include "ionic_txrx.h"

S
Shannon Nelson 已提交
13

S
Shannon Nelson 已提交
14 15 16
static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
				  ionic_desc_cb cb_func, void *cb_arg)
{
17
	DEBUG_STATS_TXQ_POST(q, ring_dbell);
S
Shannon Nelson 已提交
18 19 20 21 22 23 24 25 26

	ionic_q_post(q, ring_dbell, cb_func, cb_arg);
}

static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
				  ionic_desc_cb cb_func, void *cb_arg)
{
	ionic_q_post(q, ring_dbell, cb_func, cb_arg);

27
	DEBUG_STATS_RX_BUFF_CNT(q);
S
Shannon Nelson 已提交
28 29 30 31 32 33 34
}

static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
{
	return netdev_get_tx_queue(q->lif->netdev, q->index);
}

S
Shannon Nelson 已提交
35 36 37 38 39 40 41
static void ionic_rx_buf_reset(struct ionic_buf_info *buf_info)
{
	buf_info->page = NULL;
	buf_info->page_offset = 0;
	buf_info->dma_addr = 0;
}

42
static int ionic_rx_page_alloc(struct ionic_queue *q,
S
Shannon Nelson 已提交
43
			       struct ionic_buf_info *buf_info)
44
{
S
Shannon Nelson 已提交
45
	struct net_device *netdev = q->lif->netdev;
46 47 48
	struct ionic_rx_stats *stats;
	struct device *dev;

49
	dev = q->dev;
50 51
	stats = q_to_rx_stats(q);

S
Shannon Nelson 已提交
52 53
	if (unlikely(!buf_info)) {
		net_err_ratelimited("%s: %s invalid buf_info in alloc\n",
54 55 56 57
				    netdev->name, q->name);
		return -EINVAL;
	}

S
Shannon Nelson 已提交
58 59
	buf_info->page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
	if (unlikely(!buf_info->page)) {
60 61 62 63 64
		net_err_ratelimited("%s: %s page alloc failed\n",
				    netdev->name, q->name);
		stats->alloc_err++;
		return -ENOMEM;
	}
S
Shannon Nelson 已提交
65
	buf_info->page_offset = 0;
66

S
Shannon Nelson 已提交
67 68 69 70 71
	buf_info->dma_addr = dma_map_page(dev, buf_info->page, buf_info->page_offset,
					  IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
	if (unlikely(dma_mapping_error(dev, buf_info->dma_addr))) {
		__free_pages(buf_info->page, 0);
		ionic_rx_buf_reset(buf_info);
72 73 74 75 76 77 78 79 80 81
		net_err_ratelimited("%s: %s dma map failed\n",
				    netdev->name, q->name);
		stats->dma_map_err++;
		return -EIO;
	}

	return 0;
}

static void ionic_rx_page_free(struct ionic_queue *q,
S
Shannon Nelson 已提交
82
			       struct ionic_buf_info *buf_info)
83
{
S
Shannon Nelson 已提交
84
	struct net_device *netdev = q->lif->netdev;
85
	struct device *dev = q->dev;
86

S
Shannon Nelson 已提交
87 88
	if (unlikely(!buf_info)) {
		net_err_ratelimited("%s: %s invalid buf_info in free\n",
89 90 91 92
				    netdev->name, q->name);
		return;
	}

S
Shannon Nelson 已提交
93
	if (!buf_info->page)
94 95
		return;

S
Shannon Nelson 已提交
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
	dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
	__free_pages(buf_info->page, 0);
	ionic_rx_buf_reset(buf_info);
}

static bool ionic_rx_buf_recycle(struct ionic_queue *q,
				 struct ionic_buf_info *buf_info, u32 used)
{
	u32 size;

	/* don't re-use pages allocated in low-mem condition */
	if (page_is_pfmemalloc(buf_info->page))
		return false;

	/* don't re-use buffers from non-local numa nodes */
	if (page_to_nid(buf_info->page) != numa_mem_id())
		return false;

	size = ALIGN(used, IONIC_PAGE_SPLIT_SZ);
	buf_info->page_offset += size;
	if (buf_info->page_offset >= IONIC_PAGE_SIZE)
		return false;

	get_page(buf_info->page);
120

S
Shannon Nelson 已提交
121
	return true;
122 123
}

124 125
static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
				      struct ionic_desc_info *desc_info,
126
				      struct ionic_rxq_comp *comp)
S
Shannon Nelson 已提交
127
{
S
Shannon Nelson 已提交
128
	struct net_device *netdev = q->lif->netdev;
S
Shannon Nelson 已提交
129
	struct ionic_buf_info *buf_info;
S
Shannon Nelson 已提交
130
	struct ionic_rx_stats *stats;
131
	struct device *dev = q->dev;
132 133 134 135
	struct sk_buff *skb;
	unsigned int i;
	u16 frag_len;
	u16 len;
S
Shannon Nelson 已提交
136

S
Shannon Nelson 已提交
137 138
	stats = q_to_rx_stats(q);

S
Shannon Nelson 已提交
139
	buf_info = &desc_info->bufs[0];
140
	len = le16_to_cpu(comp->len);
S
Shannon Nelson 已提交
141

S
Shannon Nelson 已提交
142
	prefetch(buf_info->page);
S
Shannon Nelson 已提交
143

S
Shannon Nelson 已提交
144 145 146 147 148
	skb = napi_get_frags(&q_to_qcq(q)->napi);
	if (unlikely(!skb)) {
		net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
				     netdev->name, q->name);
		stats->alloc_err++;
149
		return NULL;
S
Shannon Nelson 已提交
150
	}
S
Shannon Nelson 已提交
151

152 153
	i = comp->num_sg_elems + 1;
	do {
S
Shannon Nelson 已提交
154
		if (unlikely(!buf_info->page)) {
155 156 157 158
			dev_kfree_skb(skb);
			return NULL;
		}

S
Shannon Nelson 已提交
159
		frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset);
160 161
		len -= frag_len;

S
Shannon Nelson 已提交
162 163 164 165
		dma_sync_single_for_cpu(dev,
					buf_info->dma_addr + buf_info->page_offset,
					frag_len, DMA_FROM_DEVICE);

166
		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
S
Shannon Nelson 已提交
167 168 169 170 171 172 173 174 175 176 177
				buf_info->page, buf_info->page_offset, frag_len,
				IONIC_PAGE_SIZE);

		if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) {
			dma_unmap_page(dev, buf_info->dma_addr,
				       IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
			ionic_rx_buf_reset(buf_info);
		}

		buf_info++;

178 179 180 181 182 183 184 185
		i--;
	} while (i > 0);

	return skb;
}

static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
					  struct ionic_desc_info *desc_info,
186
					  struct ionic_rxq_comp *comp)
187
{
S
Shannon Nelson 已提交
188
	struct net_device *netdev = q->lif->netdev;
S
Shannon Nelson 已提交
189
	struct ionic_buf_info *buf_info;
S
Shannon Nelson 已提交
190
	struct ionic_rx_stats *stats;
191
	struct device *dev = q->dev;
192 193 194
	struct sk_buff *skb;
	u16 len;

S
Shannon Nelson 已提交
195 196
	stats = q_to_rx_stats(q);

S
Shannon Nelson 已提交
197
	buf_info = &desc_info->bufs[0];
198 199
	len = le16_to_cpu(comp->len);

S
Shannon Nelson 已提交
200 201 202 203 204
	skb = napi_alloc_skb(&q_to_qcq(q)->napi, len);
	if (unlikely(!skb)) {
		net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
				     netdev->name, q->name);
		stats->alloc_err++;
205
		return NULL;
S
Shannon Nelson 已提交
206
	}
207

S
Shannon Nelson 已提交
208
	if (unlikely(!buf_info->page)) {
209 210 211 212
		dev_kfree_skb(skb);
		return NULL;
	}

S
Shannon Nelson 已提交
213
	dma_sync_single_for_cpu(dev, buf_info->dma_addr + buf_info->page_offset,
214
				len, DMA_FROM_DEVICE);
S
Shannon Nelson 已提交
215 216
	skb_copy_to_linear_data(skb, page_address(buf_info->page) + buf_info->page_offset, len);
	dma_sync_single_for_device(dev, buf_info->dma_addr + buf_info->page_offset,
217 218 219 220 221 222
				   len, DMA_FROM_DEVICE);

	skb_put(skb, len);
	skb->protocol = eth_type_trans(skb, q->lif->netdev);

	return skb;
S
Shannon Nelson 已提交
223 224
}

225 226 227 228
static void ionic_rx_clean(struct ionic_queue *q,
			   struct ionic_desc_info *desc_info,
			   struct ionic_cq_info *cq_info,
			   void *cb_arg)
S
Shannon Nelson 已提交
229
{
S
Shannon Nelson 已提交
230
	struct net_device *netdev = q->lif->netdev;
S
Shannon Nelson 已提交
231 232
	struct ionic_qcq *qcq = q_to_qcq(q);
	struct ionic_rx_stats *stats;
233
	struct ionic_rxq_comp *comp;
234
	struct sk_buff *skb;
S
Shannon Nelson 已提交
235

236 237
	comp = cq_info->cq_desc + qcq->cq.desc_size - sizeof(*comp);

S
Shannon Nelson 已提交
238 239
	stats = q_to_rx_stats(q);

240 241
	if (comp->status) {
		stats->dropped++;
S
Shannon Nelson 已提交
242
		return;
243
	}
S
Shannon Nelson 已提交
244 245 246 247

	stats->pkts++;
	stats->bytes += le16_to_cpu(comp->len);

248
	if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
249
		skb = ionic_rx_copybreak(q, desc_info, comp);
250
	else
251
		skb = ionic_rx_frags(q, desc_info, comp);
S
Shannon Nelson 已提交
252

253 254
	if (unlikely(!skb)) {
		stats->dropped++;
255
		return;
256
	}
S
Shannon Nelson 已提交
257 258 259

	skb_record_rx_queue(skb, q->index);

260
	if (likely(netdev->features & NETIF_F_RXHASH)) {
S
Shannon Nelson 已提交
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
		switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
		case IONIC_PKT_TYPE_IPV4:
		case IONIC_PKT_TYPE_IPV6:
			skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
				     PKT_HASH_TYPE_L3);
			break;
		case IONIC_PKT_TYPE_IPV4_TCP:
		case IONIC_PKT_TYPE_IPV6_TCP:
		case IONIC_PKT_TYPE_IPV4_UDP:
		case IONIC_PKT_TYPE_IPV6_UDP:
			skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
				     PKT_HASH_TYPE_L4);
			break;
		}
	}

277
	if (likely(netdev->features & NETIF_F_RXCSUM)) {
S
Shannon Nelson 已提交
278 279
		if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
			skb->ip_summed = CHECKSUM_COMPLETE;
S
Shannon Nelson 已提交
280
			skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
S
Shannon Nelson 已提交
281 282 283 284 285 286
			stats->csum_complete++;
		}
	} else {
		stats->csum_none++;
	}

287 288 289
	if (unlikely((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
		     (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) ||
		     (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)))
S
Shannon Nelson 已提交
290 291
		stats->csum_error++;

S
Shannon Nelson 已提交
292 293 294 295 296
	if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
	    (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)) {
		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
				       le16_to_cpu(comp->vlan_tci));
		stats->vlan_stripped++;
S
Shannon Nelson 已提交
297 298
	}

299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
	if (unlikely(q->features & IONIC_RXQ_F_HWSTAMP)) {
		__le64 *cq_desc_hwstamp;
		u64 hwstamp;

		cq_desc_hwstamp =
			cq_info->cq_desc +
			qcq->cq.desc_size -
			sizeof(struct ionic_rxq_comp) -
			IONIC_HWSTAMP_CQ_NEGOFFSET;

		hwstamp = le64_to_cpu(*cq_desc_hwstamp);

		if (hwstamp != IONIC_HWSTAMP_INVALID) {
			skb_hwtstamps(skb)->hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp);
			stats->hwstamp_valid++;
		} else {
			stats->hwstamp_invalid++;
		}
	}

319 320 321 322
	if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
		napi_gro_receive(&qcq->napi, skb);
	else
		napi_gro_frags(&qcq->napi);
S
Shannon Nelson 已提交
323 324
}

325
bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
S
Shannon Nelson 已提交
326 327 328
{
	struct ionic_queue *q = cq->bound_q;
	struct ionic_desc_info *desc_info;
329 330 331
	struct ionic_rxq_comp *comp;

	comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp);
S
Shannon Nelson 已提交
332 333 334 335 336

	if (!color_match(comp->pkt_type_color, cq->done_color))
		return false;

	/* check for empty queue */
337
	if (q->tail_idx == q->head_idx)
S
Shannon Nelson 已提交
338 339
		return false;

340
	if (q->tail_idx != le16_to_cpu(comp->comp_index))
S
Shannon Nelson 已提交
341 342
		return false;

343
	desc_info = &q->info[q->tail_idx];
344
	q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
S
Shannon Nelson 已提交
345 346 347 348 349 350 351 352 353 354 355 356 357

	/* clean the related q entry, only one per qc completion */
	ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg);

	desc_info->cb = NULL;
	desc_info->cb_arg = NULL;

	return true;
}

void ionic_rx_fill(struct ionic_queue *q)
{
	struct net_device *netdev = q->lif->netdev;
358 359 360
	struct ionic_desc_info *desc_info;
	struct ionic_rxq_sg_desc *sg_desc;
	struct ionic_rxq_sg_elem *sg_elem;
S
Shannon Nelson 已提交
361
	struct ionic_buf_info *buf_info;
S
Shannon Nelson 已提交
362
	struct ionic_rxq_desc *desc;
363
	unsigned int remain_len;
S
Shannon Nelson 已提交
364
	unsigned int frag_len;
365 366
	unsigned int nfrags;
	unsigned int i, j;
S
Shannon Nelson 已提交
367 368
	unsigned int len;

369
	len = netdev->mtu + ETH_HLEN + VLAN_HLEN;
S
Shannon Nelson 已提交
370 371

	for (i = ionic_q_space_avail(q); i; i--) {
S
Shannon Nelson 已提交
372
		nfrags = 0;
373
		remain_len = len;
374
		desc_info = &q->info[q->head_idx];
375
		desc = desc_info->desc;
S
Shannon Nelson 已提交
376
		buf_info = &desc_info->bufs[0];
S
Shannon Nelson 已提交
377

S
Shannon Nelson 已提交
378 379 380 381 382 383
		if (!buf_info->page) { /* alloc a new buffer? */
			if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
				desc->addr = 0;
				desc->len = 0;
				return;
			}
384 385
		}

S
Shannon Nelson 已提交
386 387 388 389 390 391 392
		/* fill main descriptor - buf[0] */
		desc->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
		frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset);
		desc->len = cpu_to_le16(frag_len);
		remain_len -= frag_len;
		buf_info++;
		nfrags++;
393

S
Shannon Nelson 已提交
394 395
		/* fill sg descriptors - buf[1..n] */
		sg_desc = desc_info->sg_desc;
396
		for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++) {
397
			sg_elem = &sg_desc->elems[j];
S
Shannon Nelson 已提交
398 399 400 401 402 403
			if (!buf_info->page) { /* alloc a new sg buffer? */
				if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
					sg_elem->addr = 0;
					sg_elem->len = 0;
					return;
				}
404
			}
S
Shannon Nelson 已提交
405 406 407 408 409 410 411

			sg_elem->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
			frag_len = min_t(u16, remain_len, IONIC_PAGE_SIZE - buf_info->page_offset);
			sg_elem->len = cpu_to_le16(frag_len);
			remain_len -= frag_len;
			buf_info++;
			nfrags++;
412
		}
S
Shannon Nelson 已提交
413

S
Shannon Nelson 已提交
414
		/* clear end sg element as a sentinel */
415
		if (j < q->max_sg_elems) {
S
Shannon Nelson 已提交
416 417 418 419 420 421 422 423
			sg_elem = &sg_desc->elems[j];
			memset(sg_elem, 0, sizeof(*sg_elem));
		}

		desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
					      IONIC_RXQ_DESC_OPCODE_SIMPLE;
		desc_info->nbufs = nfrags;

424
		ionic_rxq_post(q, false, ionic_rx_clean, NULL);
S
Shannon Nelson 已提交
425
	}
426 427

	ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
428
			 q->dbval | q->head_idx);
S
Shannon Nelson 已提交
429 430 431 432
}

void ionic_rx_empty(struct ionic_queue *q)
{
433
	struct ionic_desc_info *desc_info;
S
Shannon Nelson 已提交
434
	struct ionic_buf_info *buf_info;
S
Shannon Nelson 已提交
435
	unsigned int i, j;
436

S
Shannon Nelson 已提交
437 438 439
	for (i = 0; i < q->num_descs; i++) {
		desc_info = &q->info[i];
		for (j = 0; j < IONIC_RX_MAX_SG_ELEMS + 1; j++) {
S
Shannon Nelson 已提交
440 441 442
			buf_info = &desc_info->bufs[j];
			if (buf_info->page)
				ionic_rx_page_free(q, buf_info);
S
Shannon Nelson 已提交
443
		}
444

S
Shannon Nelson 已提交
445
		desc_info->nbufs = 0;
S
Shannon Nelson 已提交
446
		desc_info->cb = NULL;
447
		desc_info->cb_arg = NULL;
S
Shannon Nelson 已提交
448
	}
S
Shannon Nelson 已提交
449 450 451

	q->head_idx = 0;
	q->tail_idx = 0;
S
Shannon Nelson 已提交
452 453
}

454
static void ionic_dim_update(struct ionic_qcq *qcq, int napi_mode)
455 456 457 458
{
	struct dim_sample dim_sample;
	struct ionic_lif *lif;
	unsigned int qi;
459
	u64 pkts, bytes;
460 461 462 463 464 465 466

	if (!qcq->intr.dim_coal_hw)
		return;

	lif = qcq->q.lif;
	qi = qcq->cq.bound_q->index;

467 468 469 470 471 472 473 474 475 476 477 478 479 480 481
	switch (napi_mode) {
	case IONIC_LIF_F_TX_DIM_INTR:
		pkts = lif->txqstats[qi].pkts;
		bytes = lif->txqstats[qi].bytes;
		break;
	case IONIC_LIF_F_RX_DIM_INTR:
		pkts = lif->rxqstats[qi].pkts;
		bytes = lif->rxqstats[qi].bytes;
		break;
	default:
		pkts = lif->txqstats[qi].pkts + lif->rxqstats[qi].pkts;
		bytes = lif->txqstats[qi].bytes + lif->rxqstats[qi].bytes;
		break;
	}

482
	dim_update_sample(qcq->cq.bound_intr->rearm_count,
483
			  pkts, bytes, &dim_sample);
484 485 486 487

	net_dim(&qcq->dim, dim_sample);
}

488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503
int ionic_tx_napi(struct napi_struct *napi, int budget)
{
	struct ionic_qcq *qcq = napi_to_qcq(napi);
	struct ionic_cq *cq = napi_to_cq(napi);
	struct ionic_dev *idev;
	struct ionic_lif *lif;
	u32 work_done = 0;
	u32 flags = 0;

	lif = cq->bound_q->lif;
	idev = &lif->ionic->idev;

	work_done = ionic_cq_service(cq, budget,
				     ionic_tx_service, NULL, NULL);

	if (work_done < budget && napi_complete_done(napi, work_done)) {
504
		ionic_dim_update(qcq, IONIC_LIF_F_TX_DIM_INTR);
505
		flags |= IONIC_INTR_CRED_UNMASK;
506
		cq->bound_intr->rearm_count++;
507 508 509 510 511 512 513 514 515 516 517 518 519 520
	}

	if (work_done || flags) {
		flags |= IONIC_INTR_CRED_RESET_COALESCE;
		ionic_intr_credits(idev->intr_ctrl,
				   cq->bound_intr->index,
				   work_done, flags);
	}

	DEBUG_STATS_NAPI_POLL(qcq, work_done);

	return work_done;
}

S
Shannon Nelson 已提交
521
int ionic_rx_napi(struct napi_struct *napi, int budget)
522 523 524 525 526
{
	struct ionic_qcq *qcq = napi_to_qcq(napi);
	struct ionic_cq *cq = napi_to_cq(napi);
	struct ionic_dev *idev;
	struct ionic_lif *lif;
S
Shannon Nelson 已提交
527
	u16 rx_fill_threshold;
528 529 530 531 532 533 534 535 536
	u32 work_done = 0;
	u32 flags = 0;

	lif = cq->bound_q->lif;
	idev = &lif->ionic->idev;

	work_done = ionic_cq_service(cq, budget,
				     ionic_rx_service, NULL, NULL);

S
Shannon Nelson 已提交
537 538 539
	rx_fill_threshold = min_t(u16, IONIC_RX_FILL_THRESHOLD,
				  cq->num_descs / IONIC_RX_FILL_DIV);
	if (work_done && ionic_q_space_avail(cq->bound_q) >= rx_fill_threshold)
540 541 542
		ionic_rx_fill(cq->bound_q);

	if (work_done < budget && napi_complete_done(napi, work_done)) {
543
		ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR);
544
		flags |= IONIC_INTR_CRED_UNMASK;
545
		cq->bound_intr->rearm_count++;
546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
	}

	if (work_done || flags) {
		flags |= IONIC_INTR_CRED_RESET_COALESCE;
		ionic_intr_credits(idev->intr_ctrl,
				   cq->bound_intr->index,
				   work_done, flags);
	}

	DEBUG_STATS_NAPI_POLL(qcq, work_done);

	return work_done;
}

int ionic_txrx_napi(struct napi_struct *napi, int budget)
S
Shannon Nelson 已提交
561 562 563 564 565 566 567
{
	struct ionic_qcq *qcq = napi_to_qcq(napi);
	struct ionic_cq *rxcq = napi_to_cq(napi);
	unsigned int qi = rxcq->bound_q->index;
	struct ionic_dev *idev;
	struct ionic_lif *lif;
	struct ionic_cq *txcq;
S
Shannon Nelson 已提交
568
	u16 rx_fill_threshold;
S
Shannon Nelson 已提交
569 570
	u32 rx_work_done = 0;
	u32 tx_work_done = 0;
S
Shannon Nelson 已提交
571 572 573 574
	u32 flags = 0;

	lif = rxcq->bound_q->lif;
	idev = &lif->ionic->idev;
575
	txcq = &lif->txqcqs[qi]->cq;
S
Shannon Nelson 已提交
576

577
	tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT,
S
Shannon Nelson 已提交
578
					ionic_tx_service, NULL, NULL);
S
Shannon Nelson 已提交
579

S
Shannon Nelson 已提交
580 581
	rx_work_done = ionic_cq_service(rxcq, budget,
					ionic_rx_service, NULL, NULL);
S
Shannon Nelson 已提交
582 583 584 585 586

	rx_fill_threshold = min_t(u16, IONIC_RX_FILL_THRESHOLD,
				  rxcq->num_descs / IONIC_RX_FILL_DIV);
	if (rx_work_done && ionic_q_space_avail(rxcq->bound_q) >= rx_fill_threshold)
		ionic_rx_fill(rxcq->bound_q);
S
Shannon Nelson 已提交
587

S
Shannon Nelson 已提交
588
	if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
589
		ionic_dim_update(qcq, 0);
S
Shannon Nelson 已提交
590
		flags |= IONIC_INTR_CRED_UNMASK;
591
		rxcq->bound_intr->rearm_count++;
S
Shannon Nelson 已提交
592 593
	}

S
Shannon Nelson 已提交
594
	if (rx_work_done || flags) {
S
Shannon Nelson 已提交
595 596
		flags |= IONIC_INTR_CRED_RESET_COALESCE;
		ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index,
S
Shannon Nelson 已提交
597
				   tx_work_done + rx_work_done, flags);
S
Shannon Nelson 已提交
598 599
	}

S
Shannon Nelson 已提交
600 601
	DEBUG_STATS_NAPI_POLL(qcq, rx_work_done);
	DEBUG_STATS_NAPI_POLL(qcq, tx_work_done);
S
Shannon Nelson 已提交
602

S
Shannon Nelson 已提交
603
	return rx_work_done;
S
Shannon Nelson 已提交
604 605
}

606 607
static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
				      void *data, size_t len)
S
Shannon Nelson 已提交
608 609
{
	struct ionic_tx_stats *stats = q_to_tx_stats(q);
610
	struct device *dev = q->dev;
S
Shannon Nelson 已提交
611 612 613 614 615 616 617 618 619 620 621 622
	dma_addr_t dma_addr;

	dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE);
	if (dma_mapping_error(dev, dma_addr)) {
		net_warn_ratelimited("%s: DMA single map failed on %s!\n",
				     q->lif->netdev->name, q->name);
		stats->dma_map_err++;
		return 0;
	}
	return dma_addr;
}

623 624
static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
				    const skb_frag_t *frag,
S
Shannon Nelson 已提交
625 626 627
				    size_t offset, size_t len)
{
	struct ionic_tx_stats *stats = q_to_tx_stats(q);
628
	struct device *dev = q->dev;
S
Shannon Nelson 已提交
629 630 631 632 633 634 635 636 637 638 639
	dma_addr_t dma_addr;

	dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE);
	if (dma_mapping_error(dev, dma_addr)) {
		net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
				     q->lif->netdev->name, q->name);
		stats->dma_map_err++;
	}
	return dma_addr;
}

S
Shannon Nelson 已提交
640 641
static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
			    struct ionic_desc_info *desc_info)
642
{
S
Shannon Nelson 已提交
643
	struct ionic_buf_info *buf_info = desc_info->bufs;
S
Shannon Nelson 已提交
644
	struct ionic_tx_stats *stats = q_to_tx_stats(q);
645 646
	struct device *dev = q->dev;
	dma_addr_t dma_addr;
S
Shannon Nelson 已提交
647
	unsigned int nfrags;
648 649 650 651
	skb_frag_t *frag;
	int frag_idx;

	dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
S
Shannon Nelson 已提交
652 653
	if (dma_mapping_error(dev, dma_addr)) {
		stats->dma_map_err++;
654
		return -EIO;
S
Shannon Nelson 已提交
655
	}
656 657 658 659
	buf_info->dma_addr = dma_addr;
	buf_info->len = skb_headlen(skb);
	buf_info++;

S
Shannon Nelson 已提交
660 661 662
	frag = skb_shinfo(skb)->frags;
	nfrags = skb_shinfo(skb)->nr_frags;
	for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) {
663
		dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
S
Shannon Nelson 已提交
664 665
		if (dma_mapping_error(dev, dma_addr)) {
			stats->dma_map_err++;
666
			goto dma_fail;
S
Shannon Nelson 已提交
667
		}
668 669
		buf_info->dma_addr = dma_addr;
		buf_info->len = skb_frag_size(frag);
S
Shannon Nelson 已提交
670
		buf_info++;
671 672
	}

S
Shannon Nelson 已提交
673 674
	desc_info->nbufs = 1 + nfrags;

675 676 677 678 679 680 681 682 683 684 685 686 687 688
	return 0;

dma_fail:
	/* unwind the frag mappings and the head mapping */
	while (frag_idx > 0) {
		frag_idx--;
		buf_info--;
		dma_unmap_page(dev, buf_info->dma_addr,
			       buf_info->len, DMA_TO_DEVICE);
	}
	dma_unmap_single(dev, buf_info->dma_addr, buf_info->len, DMA_TO_DEVICE);
	return -EIO;
}

689 690 691 692
static void ionic_tx_clean(struct ionic_queue *q,
			   struct ionic_desc_info *desc_info,
			   struct ionic_cq_info *cq_info,
			   void *cb_arg)
S
Shannon Nelson 已提交
693
{
694
	struct ionic_buf_info *buf_info = desc_info->bufs;
S
Shannon Nelson 已提交
695
	struct ionic_tx_stats *stats = q_to_tx_stats(q);
696 697
	struct ionic_qcq *qcq = q_to_qcq(q);
	struct sk_buff *skb = cb_arg;
698
	struct device *dev = q->dev;
S
Shannon Nelson 已提交
699
	unsigned int i;
700
	u16 qi;
S
Shannon Nelson 已提交
701

S
Shannon Nelson 已提交
702 703 704 705 706 707 708
	if (desc_info->nbufs) {
		dma_unmap_single(dev, (dma_addr_t)buf_info->dma_addr,
				 buf_info->len, DMA_TO_DEVICE);
		buf_info++;
		for (i = 1; i < desc_info->nbufs; i++, buf_info++)
			dma_unmap_page(dev, (dma_addr_t)buf_info->dma_addr,
				       buf_info->len, DMA_TO_DEVICE);
709
	}
S
Shannon Nelson 已提交
710

711 712
	if (!skb)
		return;
S
Shannon Nelson 已提交
713

714 715 716 717 718 719 720 721 722 723 724 725 726 727 728
	qi = skb_get_queue_mapping(skb);

	if (unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) {
		if (cq_info) {
			struct skb_shared_hwtstamps hwts = {};
			__le64 *cq_desc_hwstamp;
			u64 hwstamp;

			cq_desc_hwstamp =
				cq_info->cq_desc +
				qcq->cq.desc_size -
				sizeof(struct ionic_txq_comp) -
				IONIC_HWSTAMP_CQ_NEGOFFSET;

			hwstamp = le64_to_cpu(*cq_desc_hwstamp);
729

730 731
			if (hwstamp != IONIC_HWSTAMP_INVALID) {
				hwts.hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp);
732

733 734 735 736 737 738 739 740 741 742 743 744
				skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
				skb_tstamp_tx(skb, &hwts);

				stats->hwstamp_valid++;
			} else {
				stats->hwstamp_invalid++;
			}
		}

	} else if (unlikely(__netif_subqueue_stopped(q->lif->netdev, qi))) {
		netif_wake_subqueue(q->lif->netdev, qi);
		q->wake++;
S
Shannon Nelson 已提交
745
	}
746 747 748 749 750

	desc_info->bytes = skb->len;
	stats->clean++;

	dev_consume_skb_any(skb);
S
Shannon Nelson 已提交
751 752
}

753
bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
S
Shannon Nelson 已提交
754 755 756
{
	struct ionic_queue *q = cq->bound_q;
	struct ionic_desc_info *desc_info;
757
	struct ionic_txq_comp *comp;
758 759
	int bytes = 0;
	int pkts = 0;
760
	u16 index;
S
Shannon Nelson 已提交
761

762 763
	comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp);

S
Shannon Nelson 已提交
764 765 766 767 768 769 770
	if (!color_match(comp->color, cq->done_color))
		return false;

	/* clean the related q entries, there could be
	 * several q entries completed for each cq completion
	 */
	do {
771
		desc_info = &q->info[q->tail_idx];
772
		desc_info->bytes = 0;
773
		index = q->tail_idx;
774 775
		q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
		ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg);
776 777 778 779
		if (desc_info->cb_arg) {
			pkts++;
			bytes += desc_info->bytes;
		}
S
Shannon Nelson 已提交
780 781
		desc_info->cb = NULL;
		desc_info->cb_arg = NULL;
782
	} while (index != le16_to_cpu(comp->comp_index));
S
Shannon Nelson 已提交
783

784
	if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
785 786
		netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);

S
Shannon Nelson 已提交
787 788 789 790 791 792 793 794 795 796
	return true;
}

void ionic_tx_flush(struct ionic_cq *cq)
{
	struct ionic_dev *idev = &cq->lif->ionic->idev;
	u32 work_done;

	work_done = ionic_cq_service(cq, cq->num_descs,
				     ionic_tx_service, NULL, NULL);
S
Shannon Nelson 已提交
797 798
	if (work_done)
		ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
S
Shannon Nelson 已提交
799
				   work_done, IONIC_INTR_CRED_RESET_COALESCE);
S
Shannon Nelson 已提交
800 801
}

802 803 804
void ionic_tx_empty(struct ionic_queue *q)
{
	struct ionic_desc_info *desc_info;
805 806
	int bytes = 0;
	int pkts = 0;
807 808

	/* walk the not completed tx entries, if any */
809 810
	while (q->head_idx != q->tail_idx) {
		desc_info = &q->info[q->tail_idx];
811
		desc_info->bytes = 0;
812
		q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
813
		ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg);
814 815 816 817
		if (desc_info->cb_arg) {
			pkts++;
			bytes += desc_info->bytes;
		}
818 819 820
		desc_info->cb = NULL;
		desc_info->cb_arg = NULL;
	}
821

822
	if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
823
		netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);
824 825
}

S
Shannon Nelson 已提交
826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864
static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb)
{
	int err;

	err = skb_cow_head(skb, 0);
	if (err)
		return err;

	if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
		inner_ip_hdr(skb)->check = 0;
		inner_tcp_hdr(skb)->check =
			~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr,
					   inner_ip_hdr(skb)->daddr,
					   0, IPPROTO_TCP, 0);
	} else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
		inner_tcp_hdr(skb)->check =
			~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr,
					 &inner_ipv6_hdr(skb)->daddr,
					 0, IPPROTO_TCP, 0);
	}

	return 0;
}

static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
{
	int err;

	err = skb_cow_head(skb, 0);
	if (err)
		return err;

	if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
		ip_hdr(skb)->check = 0;
		tcp_hdr(skb)->check =
			~csum_tcpudp_magic(ip_hdr(skb)->saddr,
					   ip_hdr(skb)->daddr,
					   0, IPPROTO_TCP, 0);
	} else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
865
		tcp_v6_gso_csum_prep(skb);
S
Shannon Nelson 已提交
866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893
	}

	return 0;
}

static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
			      struct sk_buff *skb,
			      dma_addr_t addr, u8 nsge, u16 len,
			      unsigned int hdrlen, unsigned int mss,
			      bool outer_csum,
			      u16 vlan_tci, bool has_vlan,
			      bool start, bool done)
{
	u8 flags = 0;
	u64 cmd;

	flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
	flags |= outer_csum ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
	flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
	flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;

	cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, flags, nsge, addr);
	desc->cmd = cpu_to_le64(cmd);
	desc->len = cpu_to_le16(len);
	desc->vlan_tci = cpu_to_le16(vlan_tci);
	desc->hdr_len = cpu_to_le16(hdrlen);
	desc->mss = cpu_to_le16(mss);

S
Shannon Nelson 已提交
894
	if (start) {
S
Shannon Nelson 已提交
895
		skb_tx_timestamp(skb);
896 897
		if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
			netdev_tx_sent_queue(q_to_ndq(q), skb->len);
S
Shannon Nelson 已提交
898
		ionic_txq_post(q, false, ionic_tx_clean, skb);
S
Shannon Nelson 已提交
899
	} else {
S
Shannon Nelson 已提交
900
		ionic_txq_post(q, done, NULL, NULL);
S
Shannon Nelson 已提交
901 902 903 904 905 906
	}
}

static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
{
	struct ionic_tx_stats *stats = q_to_tx_stats(q);
S
Shannon Nelson 已提交
907 908
	struct ionic_desc_info *desc_info;
	struct ionic_buf_info *buf_info;
S
Shannon Nelson 已提交
909 910
	struct ionic_txq_sg_elem *elem;
	struct ionic_txq_desc *desc;
911 912 913 914
	unsigned int chunk_len;
	unsigned int frag_rem;
	unsigned int tso_rem;
	unsigned int seg_rem;
S
Shannon Nelson 已提交
915
	dma_addr_t desc_addr;
916
	dma_addr_t frag_addr;
S
Shannon Nelson 已提交
917 918 919 920 921 922 923 924 925 926 927 928
	unsigned int hdrlen;
	unsigned int len;
	unsigned int mss;
	bool start, done;
	bool outer_csum;
	bool has_vlan;
	u16 desc_len;
	u8 desc_nsge;
	u16 vlan_tci;
	bool encap;
	int err;

S
Shannon Nelson 已提交
929 930 931 932
	desc_info = &q->info[q->head_idx];
	buf_info = desc_info->bufs;

	if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
933 934 935
		return -EIO;

	len = skb->len;
S
Shannon Nelson 已提交
936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960
	mss = skb_shinfo(skb)->gso_size;
	outer_csum = (skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM) ||
		     (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
	has_vlan = !!skb_vlan_tag_present(skb);
	vlan_tci = skb_vlan_tag_get(skb);
	encap = skb->encapsulation;

	/* Preload inner-most TCP csum field with IP pseudo hdr
	 * calculated with IP length set to zero.  HW will later
	 * add in length to each TCP segment resulting from the TSO.
	 */

	if (encap)
		err = ionic_tx_tcp_inner_pseudo_csum(skb);
	else
		err = ionic_tx_tcp_pseudo_csum(skb);
	if (err)
		return err;

	if (encap)
		hdrlen = skb_inner_transport_header(skb) - skb->data +
			 inner_tcp_hdrlen(skb);
	else
		hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);

961 962
	tso_rem = len;
	seg_rem = min(tso_rem, hdrlen + mss);
S
Shannon Nelson 已提交
963

964 965
	frag_addr = 0;
	frag_rem = 0;
S
Shannon Nelson 已提交
966

967
	start = true;
S
Shannon Nelson 已提交
968

969 970 971 972 973
	while (tso_rem > 0) {
		desc = NULL;
		elem = NULL;
		desc_addr = 0;
		desc_len = 0;
S
Shannon Nelson 已提交
974
		desc_nsge = 0;
S
Shannon Nelson 已提交
975
		/* use fragments until we have enough to post a single descriptor */
976
		while (seg_rem > 0) {
S
Shannon Nelson 已提交
977
			/* if the fragment is exhausted then move to the next one */
978 979
			if (frag_rem == 0) {
				/* grab the next fragment */
S
Shannon Nelson 已提交
980 981 982
				frag_addr = buf_info->dma_addr;
				frag_rem = buf_info->len;
				buf_info++;
983 984 985 986
			}
			chunk_len = min(frag_rem, seg_rem);
			if (!desc) {
				/* fill main descriptor */
S
Shannon Nelson 已提交
987 988
				desc = desc_info->txq_desc;
				elem = desc_info->txq_sg_desc->elems;
989 990
				desc_addr = frag_addr;
				desc_len = chunk_len;
S
Shannon Nelson 已提交
991
			} else {
992 993 994 995 996
				/* fill sg descriptor */
				elem->addr = cpu_to_le64(frag_addr);
				elem->len = cpu_to_le16(chunk_len);
				elem++;
				desc_nsge++;
S
Shannon Nelson 已提交
997
			}
998 999 1000 1001
			frag_addr += chunk_len;
			frag_rem -= chunk_len;
			tso_rem -= chunk_len;
			seg_rem -= chunk_len;
S
Shannon Nelson 已提交
1002
		}
1003 1004 1005 1006 1007 1008 1009 1010
		seg_rem = min(tso_rem, mss);
		done = (tso_rem == 0);
		/* post descriptor */
		ionic_tx_tso_post(q, desc, skb,
				  desc_addr, desc_nsge, desc_len,
				  hdrlen, mss, outer_csum, vlan_tci, has_vlan,
				  start, done);
		start = false;
S
Shannon Nelson 已提交
1011 1012 1013
		/* Buffer information is stored with the first tso descriptor */
		desc_info = &q->info[q->head_idx];
		desc_info->nbufs = 0;
S
Shannon Nelson 已提交
1014 1015
	}

1016 1017
	stats->pkts += DIV_ROUND_UP(len - hdrlen, mss);
	stats->bytes += len;
S
Shannon Nelson 已提交
1018
	stats->tso++;
1019
	stats->tso_bytes = len;
S
Shannon Nelson 已提交
1020 1021 1022 1023

	return 0;
}

S
Shannon Nelson 已提交
1024 1025
static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
			      struct ionic_desc_info *desc_info)
S
Shannon Nelson 已提交
1026
{
S
Shannon Nelson 已提交
1027 1028
	struct ionic_txq_desc *desc = desc_info->txq_desc;
	struct ionic_buf_info *buf_info = desc_info->bufs;
S
Shannon Nelson 已提交
1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
	struct ionic_tx_stats *stats = q_to_tx_stats(q);
	bool has_vlan;
	u8 flags = 0;
	bool encap;
	u64 cmd;

	has_vlan = !!skb_vlan_tag_present(skb);
	encap = skb->encapsulation;

	flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
	flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;

	cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL,
S
Shannon Nelson 已提交
1042 1043
				  flags, skb_shinfo(skb)->nr_frags,
				  buf_info->dma_addr);
S
Shannon Nelson 已提交
1044
	desc->cmd = cpu_to_le64(cmd);
S
Shannon Nelson 已提交
1045
	desc->len = cpu_to_le16(buf_info->len);
S
Shannon Nelson 已提交
1046 1047 1048
	if (has_vlan) {
		desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
		stats->vlan_inserted++;
S
Shannon Nelson 已提交
1049 1050
	} else {
		desc->vlan_tci = 0;
S
Shannon Nelson 已提交
1051
	}
S
Shannon Nelson 已提交
1052 1053
	desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
	desc->csum_offset = cpu_to_le16(skb->csum_offset);
S
Shannon Nelson 已提交
1054

1055
	if (skb_csum_is_sctp(skb))
S
Shannon Nelson 已提交
1056 1057 1058 1059 1060 1061 1062
		stats->crc32_csum++;
	else
		stats->csum++;

	return 0;
}

S
Shannon Nelson 已提交
1063 1064
static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
				 struct ionic_desc_info *desc_info)
S
Shannon Nelson 已提交
1065
{
S
Shannon Nelson 已提交
1066 1067
	struct ionic_txq_desc *desc = desc_info->txq_desc;
	struct ionic_buf_info *buf_info = desc_info->bufs;
S
Shannon Nelson 已提交
1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080
	struct ionic_tx_stats *stats = q_to_tx_stats(q);
	bool has_vlan;
	u8 flags = 0;
	bool encap;
	u64 cmd;

	has_vlan = !!skb_vlan_tag_present(skb);
	encap = skb->encapsulation;

	flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
	flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;

	cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE,
S
Shannon Nelson 已提交
1081 1082
				  flags, skb_shinfo(skb)->nr_frags,
				  buf_info->dma_addr);
S
Shannon Nelson 已提交
1083
	desc->cmd = cpu_to_le64(cmd);
S
Shannon Nelson 已提交
1084
	desc->len = cpu_to_le16(buf_info->len);
S
Shannon Nelson 已提交
1085 1086 1087
	if (has_vlan) {
		desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
		stats->vlan_inserted++;
S
Shannon Nelson 已提交
1088 1089
	} else {
		desc->vlan_tci = 0;
S
Shannon Nelson 已提交
1090
	}
S
Shannon Nelson 已提交
1091 1092
	desc->csum_start = 0;
	desc->csum_offset = 0;
S
Shannon Nelson 已提交
1093

S
Shannon Nelson 已提交
1094
	stats->csum_none++;
S
Shannon Nelson 已提交
1095 1096 1097 1098

	return 0;
}

S
Shannon Nelson 已提交
1099 1100
static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
			      struct ionic_desc_info *desc_info)
S
Shannon Nelson 已提交
1101
{
S
Shannon Nelson 已提交
1102 1103
	struct ionic_txq_sg_desc *sg_desc = desc_info->txq_sg_desc;
	struct ionic_buf_info *buf_info = &desc_info->bufs[1];
S
Shannon Nelson 已提交
1104 1105
	struct ionic_txq_sg_elem *elem = sg_desc->elems;
	struct ionic_tx_stats *stats = q_to_tx_stats(q);
S
Shannon Nelson 已提交
1106
	unsigned int i;
S
Shannon Nelson 已提交
1107

S
Shannon Nelson 已提交
1108 1109 1110
	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, buf_info++, elem++) {
		elem->addr = cpu_to_le64(buf_info->dma_addr);
		elem->len = cpu_to_le16(buf_info->len);
S
Shannon Nelson 已提交
1111 1112
	}

S
Shannon Nelson 已提交
1113 1114
	stats->frags += skb_shinfo(skb)->nr_frags;

S
Shannon Nelson 已提交
1115 1116 1117 1118 1119
	return 0;
}

static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
{
S
Shannon Nelson 已提交
1120
	struct ionic_desc_info *desc_info = &q->info[q->head_idx];
S
Shannon Nelson 已提交
1121 1122 1123
	struct ionic_tx_stats *stats = q_to_tx_stats(q);
	int err;

S
Shannon Nelson 已提交
1124 1125 1126
	if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
		return -EIO;

S
Shannon Nelson 已提交
1127 1128
	/* set up the initial descriptor */
	if (skb->ip_summed == CHECKSUM_PARTIAL)
S
Shannon Nelson 已提交
1129
		err = ionic_tx_calc_csum(q, skb, desc_info);
S
Shannon Nelson 已提交
1130
	else
S
Shannon Nelson 已提交
1131
		err = ionic_tx_calc_no_csum(q, skb, desc_info);
S
Shannon Nelson 已提交
1132 1133 1134 1135
	if (err)
		return err;

	/* add frags */
S
Shannon Nelson 已提交
1136
	err = ionic_tx_skb_frags(q, skb, desc_info);
S
Shannon Nelson 已提交
1137 1138 1139 1140 1141 1142 1143
	if (err)
		return err;

	skb_tx_timestamp(skb);
	stats->pkts++;
	stats->bytes += skb->len;

1144 1145
	if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
		netdev_tx_sent_queue(q_to_ndq(q), skb->len);
S
Shannon Nelson 已提交
1146 1147 1148 1149 1150 1151 1152 1153
	ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);

	return 0;
}

static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
{
	struct ionic_tx_stats *stats = q_to_tx_stats(q);
1154
	int ndescs;
S
Shannon Nelson 已提交
1155 1156
	int err;

1157
	/* Each desc is mss long max, so a descriptor for each gso_seg */
S
Shannon Nelson 已提交
1158
	if (skb_is_gso(skb))
1159 1160 1161
		ndescs = skb_shinfo(skb)->gso_segs;
	else
		ndescs = 1;
S
Shannon Nelson 已提交
1162 1163

	/* If non-TSO, just need 1 desc and nr_frags sg elems */
1164
	if (skb_shinfo(skb)->nr_frags <= q->max_sg_elems)
1165
		return ndescs;
S
Shannon Nelson 已提交
1166 1167 1168 1169 1170 1171 1172 1173

	/* Too many frags, so linearize */
	err = skb_linearize(skb);
	if (err)
		return err;

	stats->linearize++;

1174
	return ndescs;
S
Shannon Nelson 已提交
1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196
}

static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
{
	int stopped = 0;

	if (unlikely(!ionic_q_has_space(q, ndescs))) {
		netif_stop_subqueue(q->lif->netdev, q->index);
		q->stop++;
		stopped = 1;

		/* Might race with ionic_tx_clean, check again */
		smp_rmb();
		if (ionic_q_has_space(q, ndescs)) {
			netif_wake_subqueue(q->lif->netdev, q->index);
			stopped = 0;
		}
	}

	return stopped;
}

1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215
static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb,
					    struct net_device *netdev)
{
	struct ionic_lif *lif = netdev_priv(netdev);
	struct ionic_queue *q = &lif->hwstamp_txq->q;
	int err, ndescs;

	/* Does not stop/start txq, because we post to a separate tx queue
	 * for timestamping, and if a packet can't be posted immediately to
	 * the timestamping queue, it is dropped.
	 */

	ndescs = ionic_tx_descs_needed(q, skb);
	if (unlikely(ndescs < 0))
		goto err_out_drop;

	if (unlikely(!ionic_q_has_space(q, ndescs)))
		goto err_out_drop;

S
Shannon Nelson 已提交
1216
	skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP;
1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232
	if (skb_is_gso(skb))
		err = ionic_tx_tso(q, skb);
	else
		err = ionic_tx(q, skb);

	if (err)
		goto err_out_drop;

	return NETDEV_TX_OK;

err_out_drop:
	q->drop++;
	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
}

S
Shannon Nelson 已提交
1233 1234 1235 1236 1237 1238 1239 1240
netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
	u16 queue_index = skb_get_queue_mapping(skb);
	struct ionic_lif *lif = netdev_priv(netdev);
	struct ionic_queue *q;
	int ndescs;
	int err;

S
Shannon Nelson 已提交
1241
	if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state))) {
S
Shannon Nelson 已提交
1242 1243 1244 1245
		dev_kfree_skb(skb);
		return NETDEV_TX_OK;
	}

1246
	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
1247
		if (lif->hwstamp_txq && lif->phc->ts_config_tx_mode)
1248 1249
			return ionic_start_hwstamp_xmit(skb, netdev);

1250
	if (unlikely(queue_index >= lif->nxqs))
S
Shannon Nelson 已提交
1251
		queue_index = 0;
1252
	q = &lif->txqcqs[queue_index]->q;
S
Shannon Nelson 已提交
1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282

	ndescs = ionic_tx_descs_needed(q, skb);
	if (ndescs < 0)
		goto err_out_drop;

	if (unlikely(ionic_maybe_stop_tx(q, ndescs)))
		return NETDEV_TX_BUSY;

	if (skb_is_gso(skb))
		err = ionic_tx_tso(q, skb);
	else
		err = ionic_tx(q, skb);

	if (err)
		goto err_out_drop;

	/* Stop the queue if there aren't descriptors for the next packet.
	 * Since our SG lists per descriptor take care of most of the possible
	 * fragmentation, we don't need to have many descriptors available.
	 */
	ionic_maybe_stop_tx(q, 4);

	return NETDEV_TX_OK;

err_out_drop:
	q->stop++;
	q->drop++;
	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
}