i40e_txrx.c 104.9 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/* Copyright(c) 2013 - 2018 Intel Corporation. */
3

M
Mitch Williams 已提交
4
#include <linux/prefetch.h>
5
#include <linux/bpf_trace.h>
6
#include <net/xdp.h>
7
#include "i40e.h"
S
Scott Peterson 已提交
8
#include "i40e_trace.h"
9
#include "i40e_prototype.h"
10
#include "i40e_txrx_common.h"
11
#include "i40e_xsk.h"
12

13
#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
/**
 * i40e_fdir - Generate a Flow Director descriptor based on fdata
 * @tx_ring: Tx ring to send buffer on
 * @fdata: Flow director filter data
 * @add: Indicate if we are adding a rule or deleting one
 *
 **/
static void i40e_fdir(struct i40e_ring *tx_ring,
		      struct i40e_fdir_filter *fdata, bool add)
{
	struct i40e_filter_program_desc *fdir_desc;
	struct i40e_pf *pf = tx_ring->vsi->back;
	u32 flex_ptype, dtype_cmd;
	u16 i;

	/* grab the next descriptor */
	i = tx_ring->next_to_use;
	fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);

	i++;
	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;

	flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
		     (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);

	flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
		      (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);

	flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
		      (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);

45 46 47
	flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
		      (fdata->flex_offset << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);

48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
	/* Use LAN VSI Id if not programmed by user */
	flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
		      ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
		       I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);

	dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;

	dtype_cmd |= add ?
		     I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
		     I40E_TXD_FLTR_QW1_PCMD_SHIFT :
		     I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
		     I40E_TXD_FLTR_QW1_PCMD_SHIFT;

	dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
		     (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);

	dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
		     (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);

	if (fdata->cnt_index) {
		dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
		dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
			     ((u32)fdata->cnt_index <<
			      I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
	}

	fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
	fdir_desc->rsvd = cpu_to_le32(0);
	fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
	fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
}

80
#define I40E_FD_CLEAN_DELAY 10
81 82
/**
 * i40e_program_fdir_filter - Program a Flow Director filter
83 84
 * @fdir_data: Packet data that will be filter parameters
 * @raw_packet: the pre-allocated packet buffer for FDir
85
 * @pf: The PF pointer
86 87
 * @add: True for add/update, False for remove
 **/
88 89 90
static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
				    u8 *raw_packet, struct i40e_pf *pf,
				    bool add)
91
{
92
	struct i40e_tx_buffer *tx_buf, *first;
93 94 95 96 97 98 99 100 101
	struct i40e_tx_desc *tx_desc;
	struct i40e_ring *tx_ring;
	struct i40e_vsi *vsi;
	struct device *dev;
	dma_addr_t dma;
	u32 td_cmd = 0;
	u16 i;

	/* find existing FDIR VSI */
102
	vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
103 104 105
	if (!vsi)
		return -ENOENT;

106
	tx_ring = vsi->tx_rings[0];
107 108
	dev = tx_ring->dev;

109
	/* we need two descriptors to add/del a filter and we can wait */
110 111 112
	for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) {
		if (!i)
			return -EAGAIN;
113
		msleep_interruptible(1);
114
	}
115

116 117
	dma = dma_map_single(dev, raw_packet,
			     I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
118 119 120 121
	if (dma_mapping_error(dev, dma))
		goto dma_fail;

	/* grab the next descriptor */
122
	i = tx_ring->next_to_use;
123
	first = &tx_ring->tx_bi[i];
124
	i40e_fdir(tx_ring, fdir_data, add);
125 126

	/* Now program a dummy descriptor */
127 128
	i = tx_ring->next_to_use;
	tx_desc = I40E_TX_DESC(tx_ring, i);
129
	tx_buf = &tx_ring->tx_bi[i];
130

131 132 133
	tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;

	memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
134

135
	/* record length, and DMA address */
136
	dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
137 138
	dma_unmap_addr_set(tx_buf, dma, dma);

139
	tx_desc->buffer_addr = cpu_to_le64(dma);
140
	td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
141

142 143 144
	tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
	tx_buf->raw_buf = (void *)raw_packet;

145
	tx_desc->cmd_type_offset_bsz =
146
		build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
147 148

	/* Force memory writes to complete before letting h/w
149
	 * know there are new descriptors to fetch.
150 151 152
	 */
	wmb();

153
	/* Mark the data descriptor to be watched */
154
	first->next_to_watch = tx_desc;
155

156 157 158 159 160 161 162
	writel(tx_ring->next_to_use, tx_ring->tail);
	return 0;

dma_fail:
	return -1;
}

163 164 165 166 167 168 169 170 171 172 173 174
#define IP_HEADER_OFFSET 14
#define I40E_UDPIP_DUMMY_PACKET_LEN 42
/**
 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
 * @vsi: pointer to the targeted VSI
 * @fd_data: the flow director data required for the FDir descriptor
 * @add: true adds a filter, false removes it
 *
 * Returns 0 if the filters were successfully added or removed
 **/
static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
				   struct i40e_fdir_filter *fd_data,
175
				   bool add)
176 177 178 179
{
	struct i40e_pf *pf = vsi->back;
	struct udphdr *udp;
	struct iphdr *ip;
180
	u8 *raw_packet;
181 182 183 184 185
	int ret;
	static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
		0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};

186 187 188
	raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
	if (!raw_packet)
		return -ENOMEM;
189 190 191 192 193 194
	memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);

	ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
	udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
	      + sizeof(struct iphdr));

195
	ip->daddr = fd_data->dst_ip;
196
	udp->dest = fd_data->dst_port;
197
	ip->saddr = fd_data->src_ip;
198 199
	udp->source = fd_data->src_port;

200 201 202 203 204 205 206 207
	if (fd_data->flex_filter) {
		u8 *payload = raw_packet + I40E_UDPIP_DUMMY_PACKET_LEN;
		__be16 pattern = fd_data->flex_word;
		u16 off = fd_data->flex_offset;

		*((__force __be16 *)(payload + off)) = pattern;
	}

208 209 210 211
	fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
	ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
	if (ret) {
		dev_info(&pf->pdev->dev,
212 213
			 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
			 fd_data->pctype, fd_data->fd_id, ret);
214 215 216
		/* Free the packet buffer since it wasn't added to the ring */
		kfree(raw_packet);
		return -EOPNOTSUPP;
217
	} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
218 219 220 221 222 223 224 225
		if (add)
			dev_info(&pf->pdev->dev,
				 "Filter OK for PCTYPE %d loc = %d\n",
				 fd_data->pctype, fd_data->fd_id);
		else
			dev_info(&pf->pdev->dev,
				 "Filter deleted for PCTYPE %d loc = %d\n",
				 fd_data->pctype, fd_data->fd_id);
226
	}
227

228 229 230 231 232
	if (add)
		pf->fd_udp4_filter_cnt++;
	else
		pf->fd_udp4_filter_cnt--;

233
	return 0;
234 235 236 237 238 239 240 241 242 243 244 245 246
}

#define I40E_TCPIP_DUMMY_PACKET_LEN 54
/**
 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
 * @vsi: pointer to the targeted VSI
 * @fd_data: the flow director data required for the FDir descriptor
 * @add: true adds a filter, false removes it
 *
 * Returns 0 if the filters were successfully added or removed
 **/
static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
				   struct i40e_fdir_filter *fd_data,
247
				   bool add)
248 249 250 251
{
	struct i40e_pf *pf = vsi->back;
	struct tcphdr *tcp;
	struct iphdr *ip;
252
	u8 *raw_packet;
253 254 255 256 257 258 259
	int ret;
	/* Dummy packet */
	static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
		0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
		0x0, 0x72, 0, 0, 0, 0};

260 261 262
	raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
	if (!raw_packet)
		return -ENOMEM;
263 264 265 266 267 268
	memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);

	ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
	tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
	      + sizeof(struct iphdr));

269
	ip->daddr = fd_data->dst_ip;
270
	tcp->dest = fd_data->dst_port;
271
	ip->saddr = fd_data->src_ip;
272 273
	tcp->source = fd_data->src_port;

274 275 276 277 278 279 280 281
	if (fd_data->flex_filter) {
		u8 *payload = raw_packet + I40E_TCPIP_DUMMY_PACKET_LEN;
		__be16 pattern = fd_data->flex_word;
		u16 off = fd_data->flex_offset;

		*((__force __be16 *)(payload + off)) = pattern;
	}

282
	fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
283 284 285
	ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
	if (ret) {
		dev_info(&pf->pdev->dev,
286 287
			 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
			 fd_data->pctype, fd_data->fd_id, ret);
288 289 290
		/* Free the packet buffer since it wasn't added to the ring */
		kfree(raw_packet);
		return -EOPNOTSUPP;
291
	} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
292 293 294 295 296 297 298
		if (add)
			dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
				 fd_data->pctype, fd_data->fd_id);
		else
			dev_info(&pf->pdev->dev,
				 "Filter deleted for PCTYPE %d loc = %d\n",
				 fd_data->pctype, fd_data->fd_id);
299 300
	}

301
	if (add) {
302
		pf->fd_tcp4_filter_cnt++;
303 304 305
		if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
		    I40E_DEBUG_FD & pf->hw.debug_mask)
			dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
306
		set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
307
	} else {
308
		pf->fd_tcp4_filter_cnt--;
309 310
	}

311
	return 0;
312 313
}

314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
#define I40E_SCTPIP_DUMMY_PACKET_LEN 46
/**
 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
 * a specific flow spec
 * @vsi: pointer to the targeted VSI
 * @fd_data: the flow director data required for the FDir descriptor
 * @add: true adds a filter, false removes it
 *
 * Returns 0 if the filters were successfully added or removed
 **/
static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
				    struct i40e_fdir_filter *fd_data,
				    bool add)
{
	struct i40e_pf *pf = vsi->back;
	struct sctphdr *sctp;
	struct iphdr *ip;
	u8 *raw_packet;
	int ret;
	/* Dummy packet */
	static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
		0x45, 0, 0, 0x20, 0, 0, 0x40, 0, 0x40, 0x84, 0, 0, 0, 0, 0, 0,
		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};

	raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
	if (!raw_packet)
		return -ENOMEM;
	memcpy(raw_packet, packet, I40E_SCTPIP_DUMMY_PACKET_LEN);

	ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
	sctp = (struct sctphdr *)(raw_packet + IP_HEADER_OFFSET
	      + sizeof(struct iphdr));

	ip->daddr = fd_data->dst_ip;
	sctp->dest = fd_data->dst_port;
	ip->saddr = fd_data->src_ip;
	sctp->source = fd_data->src_port;

	if (fd_data->flex_filter) {
		u8 *payload = raw_packet + I40E_SCTPIP_DUMMY_PACKET_LEN;
		__be16 pattern = fd_data->flex_word;
		u16 off = fd_data->flex_offset;

		*((__force __be16 *)(payload + off)) = pattern;
	}

	fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
	ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
	if (ret) {
		dev_info(&pf->pdev->dev,
			 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
			 fd_data->pctype, fd_data->fd_id, ret);
		/* Free the packet buffer since it wasn't added to the ring */
		kfree(raw_packet);
		return -EOPNOTSUPP;
	} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
		if (add)
			dev_info(&pf->pdev->dev,
				 "Filter OK for PCTYPE %d loc = %d\n",
				 fd_data->pctype, fd_data->fd_id);
		else
			dev_info(&pf->pdev->dev,
				 "Filter deleted for PCTYPE %d loc = %d\n",
				 fd_data->pctype, fd_data->fd_id);
	}

	if (add)
		pf->fd_sctp4_filter_cnt++;
	else
		pf->fd_sctp4_filter_cnt--;

	return 0;
}

388 389 390 391 392 393 394 395 396 397 398 399
#define I40E_IP_DUMMY_PACKET_LEN 34
/**
 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
 * a specific flow spec
 * @vsi: pointer to the targeted VSI
 * @fd_data: the flow director data required for the FDir descriptor
 * @add: true adds a filter, false removes it
 *
 * Returns 0 if the filters were successfully added or removed
 **/
static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
				  struct i40e_fdir_filter *fd_data,
400
				  bool add)
401 402 403
{
	struct i40e_pf *pf = vsi->back;
	struct iphdr *ip;
404
	u8 *raw_packet;
405 406 407 408 409 410 411 412
	int ret;
	int i;
	static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
		0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
		0, 0, 0, 0};

	for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
	     i <= I40E_FILTER_PCTYPE_FRAG_IPV4;	i++) {
413 414 415 416 417 418
		raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
		if (!raw_packet)
			return -ENOMEM;
		memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
		ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);

419 420
		ip->saddr = fd_data->src_ip;
		ip->daddr = fd_data->dst_ip;
421 422
		ip->protocol = 0;

423 424 425 426 427 428 429 430
		if (fd_data->flex_filter) {
			u8 *payload = raw_packet + I40E_IP_DUMMY_PACKET_LEN;
			__be16 pattern = fd_data->flex_word;
			u16 off = fd_data->flex_offset;

			*((__force __be16 *)(payload + off)) = pattern;
		}

431 432 433 434
		fd_data->pctype = i;
		ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
		if (ret) {
			dev_info(&pf->pdev->dev,
435 436
				 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
				 fd_data->pctype, fd_data->fd_id, ret);
437 438 439 440 441
			/* The packet buffer wasn't added to the ring so we
			 * need to free it now.
			 */
			kfree(raw_packet);
			return -EOPNOTSUPP;
442
		} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
443 444 445 446 447 448 449 450
			if (add)
				dev_info(&pf->pdev->dev,
					 "Filter OK for PCTYPE %d loc = %d\n",
					 fd_data->pctype, fd_data->fd_id);
			else
				dev_info(&pf->pdev->dev,
					 "Filter deleted for PCTYPE %d loc = %d\n",
					 fd_data->pctype, fd_data->fd_id);
451 452 453
		}
	}

454 455 456 457 458
	if (add)
		pf->fd_ip4_filter_cnt++;
	else
		pf->fd_ip4_filter_cnt--;

459
	return 0;
460 461 462 463 464
}

/**
 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
 * @vsi: pointer to the targeted VSI
465
 * @input: filter to add or delete
466 467 468 469 470 471 472 473 474 475 476
 * @add: true adds a filter, false removes it
 *
 **/
int i40e_add_del_fdir(struct i40e_vsi *vsi,
		      struct i40e_fdir_filter *input, bool add)
{
	struct i40e_pf *pf = vsi->back;
	int ret;

	switch (input->flow_type & ~FLOW_EXT) {
	case TCP_V4_FLOW:
477
		ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
478 479
		break;
	case UDP_V4_FLOW:
480
		ret = i40e_add_del_fdir_udpv4(vsi, input, add);
481
		break;
482 483 484
	case SCTP_V4_FLOW:
		ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
		break;
485 486 487
	case IP_USER_FLOW:
		switch (input->ip4_proto) {
		case IPPROTO_TCP:
488
			ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
489 490
			break;
		case IPPROTO_UDP:
491
			ret = i40e_add_del_fdir_udpv4(vsi, input, add);
492
			break;
493 494 495
		case IPPROTO_SCTP:
			ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
			break;
496
		case IPPROTO_IP:
497
			ret = i40e_add_del_fdir_ipv4(vsi, input, add);
498
			break;
499 500
		default:
			/* We cannot support masking based on protocol */
501 502 503
			dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n",
				 input->ip4_proto);
			return -EINVAL;
504 505 506
		}
		break;
	default:
507
		dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n",
508
			 input->flow_type);
509
		return -EINVAL;
510 511
	}

512 513 514 515 516 517
	/* The buffer allocated here will be normally be freed by
	 * i40e_clean_fdir_tx_irq() as it reclaims resources after transmit
	 * completion. In the event of an error adding the buffer to the FDIR
	 * ring, it will immediately be freed. It may also be freed by
	 * i40e_clean_tx_ring() when closing the VSI.
	 */
518 519 520
	return ret;
}

521 522 523
/**
 * i40e_fd_handle_status - check the Programming Status for FD
 * @rx_ring: the Rx ring for this descriptor
524 525
 * @qword0_raw: qword0
 * @qword1: qword1 after le_to_cpu
526 527 528 529 530
 * @prog_id: the id originally used for programming
 *
 * This is used to verify if the FD programming or invalidation
 * requested by SW to the HW is successful or not and take actions accordingly.
 **/
531 532
static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw,
				  u64 qword1, u8 prog_id)
533
{
534 535
	struct i40e_pf *pf = rx_ring->vsi->back;
	struct pci_dev *pdev = pf->pdev;
536
	struct i40e_32b_rx_wb_qw0 *qw0;
537
	u32 fcnt_prog, fcnt_avail;
538 539
	u32 error;

540 541
	qw0 = (struct i40e_32b_rx_wb_qw0 *)&qword0_raw;
	error = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
542 543
		I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;

544
	if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
545 546
		pf->fd_inv = le32_to_cpu(qw0->hi_dword.fd_id);
		if (qw0->hi_dword.fd_id != 0 ||
547 548
		    (I40E_DEBUG_FD & pf->hw.debug_mask))
			dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
549
				 pf->fd_inv);
550

551 552 553 554 555 556
		/* Check if the programming error is for ATR.
		 * If so, auto disable ATR and set a state for
		 * flush in progress. Next time we come here if flush is in
		 * progress do nothing, once flush is complete the state will
		 * be cleared.
		 */
557
		if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
558 559
			return;

560 561 562 563
		pf->fd_add_err++;
		/* store the current atr filter count */
		pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);

564
		if (qw0->hi_dword.fd_id == 0 &&
565 566 567 568 569 570 571 572
		    test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) {
			/* These set_bit() calls aren't atomic with the
			 * test_bit() here, but worse case we potentially
			 * disable ATR and queue a flush right after SB
			 * support is re-enabled. That shouldn't cause an
			 * issue in practice
			 */
			set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
573
			set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
574 575
		}

576
		/* filter programming failed most likely due to table full */
577
		fcnt_prog = i40e_get_global_fd_count(pf);
578
		fcnt_avail = pf->fdir_pf_filter_count;
579 580 581 582 583
		/* If ATR is running fcnt_prog can quickly change,
		 * if we are very close to full, it makes sense to disable
		 * FD ATR/SB and then re-enable it when there is room.
		 */
		if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
584
			if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
585 586
			    !test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED,
					      pf->state))
587 588
				if (I40E_DEBUG_FD & pf->hw.debug_mask)
					dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
589
		}
590
	} else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
591
		if (I40E_DEBUG_FD & pf->hw.debug_mask)
592
			dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
593
				 qw0->hi_dword.fd_id);
594
	}
595 596 597
}

/**
A
Alexander Duyck 已提交
598
 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
599 600 601
 * @ring:      the ring that owns the buffer
 * @tx_buffer: the buffer to free
 **/
A
Alexander Duyck 已提交
602 603
static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
					    struct i40e_tx_buffer *tx_buffer)
604
{
A
Alexander Duyck 已提交
605
	if (tx_buffer->skb) {
606 607
		if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
			kfree(tx_buffer->raw_buf);
608
		else if (ring_is_xdp(ring))
609
			xdp_return_frame(tx_buffer->xdpf);
610 611
		else
			dev_kfree_skb_any(tx_buffer->skb);
A
Alexander Duyck 已提交
612
		if (dma_unmap_len(tx_buffer, len))
613
			dma_unmap_single(ring->dev,
614 615
					 dma_unmap_addr(tx_buffer, dma),
					 dma_unmap_len(tx_buffer, len),
616
					 DMA_TO_DEVICE);
A
Alexander Duyck 已提交
617 618 619 620 621
	} else if (dma_unmap_len(tx_buffer, len)) {
		dma_unmap_page(ring->dev,
			       dma_unmap_addr(tx_buffer, dma),
			       dma_unmap_len(tx_buffer, len),
			       DMA_TO_DEVICE);
622
	}
623

A
Alexander Duyck 已提交
624 625
	tx_buffer->next_to_watch = NULL;
	tx_buffer->skb = NULL;
626
	dma_unmap_len_set(tx_buffer, len, 0);
A
Alexander Duyck 已提交
627
	/* tx_buffer must be completely set up in the transmit path */
628 629 630 631 632 633 634 635 636 637 638
}

/**
 * i40e_clean_tx_ring - Free any empty Tx buffers
 * @tx_ring: ring to be cleaned
 **/
void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
{
	unsigned long bi_size;
	u16 i;

639 640 641 642 643 644
	if (ring_is_xdp(tx_ring) && tx_ring->xsk_umem) {
		i40e_xsk_clean_tx_ring(tx_ring);
	} else {
		/* ring already cleared, nothing to do */
		if (!tx_ring->tx_bi)
			return;
645

646 647 648 649 650
		/* Free all the Tx ring sk_buffs */
		for (i = 0; i < tx_ring->count; i++)
			i40e_unmap_and_free_tx_resource(tx_ring,
							&tx_ring->tx_bi[i]);
	}
651 652 653 654 655 656 657 658 659

	bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
	memset(tx_ring->tx_bi, 0, bi_size);

	/* Zero out the descriptor ring */
	memset(tx_ring->desc, 0, tx_ring->size);

	tx_ring->next_to_use = 0;
	tx_ring->next_to_clean = 0;
660 661 662 663 664

	if (!tx_ring->netdev)
		return;

	/* cleanup Tx queue statistics */
665
	netdev_tx_reset_queue(txring_txq(tx_ring));
666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688
}

/**
 * i40e_free_tx_resources - Free Tx resources per queue
 * @tx_ring: Tx descriptor ring for a specific queue
 *
 * Free all transmit software resources
 **/
void i40e_free_tx_resources(struct i40e_ring *tx_ring)
{
	i40e_clean_tx_ring(tx_ring);
	kfree(tx_ring->tx_bi);
	tx_ring->tx_bi = NULL;

	if (tx_ring->desc) {
		dma_free_coherent(tx_ring->dev, tx_ring->size,
				  tx_ring->desc, tx_ring->dma);
		tx_ring->desc = NULL;
	}
}

/**
 * i40e_get_tx_pending - how many tx descriptors not processed
689
 * @ring: the ring of descriptors
690
 * @in_sw: use SW variables
691 692 693 694
 *
 * Since there is no access to the ring head register
 * in XL710, we need to use our local copies
 **/
695
u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
696
{
J
Jesse Brandeburg 已提交
697 698
	u32 head, tail;

699 700 701 702 703 704 705
	if (!in_sw) {
		head = i40e_get_head(ring);
		tail = readl(ring->tail);
	} else {
		head = ring->next_to_clean;
		tail = ring->next_to_use;
	}
J
Jesse Brandeburg 已提交
706 707 708 709 710 711

	if (head != tail)
		return (head < tail) ?
			tail - head : (tail + ring->count - head);

	return 0;
712 713
}

714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761
/**
 * i40e_detect_recover_hung - Function to detect and recover hung_queues
 * @vsi:  pointer to vsi struct with tx queues
 *
 * VSI has netdev and netdev has TX queues. This function is to check each of
 * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
 **/
void i40e_detect_recover_hung(struct i40e_vsi *vsi)
{
	struct i40e_ring *tx_ring = NULL;
	struct net_device *netdev;
	unsigned int i;
	int packets;

	if (!vsi)
		return;

	if (test_bit(__I40E_VSI_DOWN, vsi->state))
		return;

	netdev = vsi->netdev;
	if (!netdev)
		return;

	if (!netif_carrier_ok(netdev))
		return;

	for (i = 0; i < vsi->num_queue_pairs; i++) {
		tx_ring = vsi->tx_rings[i];
		if (tx_ring && tx_ring->desc) {
			/* If packet counter has not changed the queue is
			 * likely stalled, so force an interrupt for this
			 * queue.
			 *
			 * prev_pkt_ctr would be negative if there was no
			 * pending work.
			 */
			packets = tx_ring->stats.packets & INT_MAX;
			if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
				i40e_force_wb(vsi, tx_ring->q_vector);
				continue;
			}

			/* Memory barrier between read of packet count and call
			 * to i40e_get_tx_pending()
			 */
			smp_rmb();
			tx_ring->tx_stats.prev_pkt_ctr =
762
			    i40e_get_tx_pending(tx_ring, true) ? packets : -1;
763 764 765 766
		}
	}
}

767 768
/**
 * i40e_clean_tx_irq - Reclaim resources after transmit completes
769 770 771
 * @vsi: the VSI we care about
 * @tx_ring: Tx ring to clean
 * @napi_budget: Used to determine if we are in netpoll
772 773 774
 *
 * Returns true if there's any budget left (e.g. the clean is finished)
 **/
775 776
static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
			      struct i40e_ring *tx_ring, int napi_budget)
777
{
M
Mitch Williams 已提交
778
	int i = tx_ring->next_to_clean;
779
	struct i40e_tx_buffer *tx_buf;
780
	struct i40e_tx_desc *tx_head;
781
	struct i40e_tx_desc *tx_desc;
782 783
	unsigned int total_bytes = 0, total_packets = 0;
	unsigned int budget = vsi->work_limit;
784 785 786

	tx_buf = &tx_ring->tx_bi[i];
	tx_desc = I40E_TX_DESC(tx_ring, i);
A
Alexander Duyck 已提交
787
	i -= tx_ring->count;
788

789 790
	tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));

A
Alexander Duyck 已提交
791 792
	do {
		struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
793 794 795 796 797

		/* if next_to_watch is not set then there is no work pending */
		if (!eop_desc)
			break;

A
Alexander Duyck 已提交
798
		/* prevent any other reads prior to eop_desc */
799
		smp_rmb();
A
Alexander Duyck 已提交
800

S
Scott Peterson 已提交
801
		i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
802 803
		/* we have caught up to head, no work left to do */
		if (tx_head == tx_desc)
804 805
			break;

A
Alexander Duyck 已提交
806
		/* clear next_to_watch to prevent false hangs */
807 808
		tx_buf->next_to_watch = NULL;

A
Alexander Duyck 已提交
809 810 811
		/* update the statistics for this packet */
		total_bytes += tx_buf->bytecount;
		total_packets += tx_buf->gso_segs;
812

813 814
		/* free the skb/XDP data */
		if (ring_is_xdp(tx_ring))
815
			xdp_return_frame(tx_buf->xdpf);
816 817
		else
			napi_consume_skb(tx_buf->skb, napi_budget);
818

A
Alexander Duyck 已提交
819 820 821 822 823
		/* unmap skb header data */
		dma_unmap_single(tx_ring->dev,
				 dma_unmap_addr(tx_buf, dma),
				 dma_unmap_len(tx_buf, len),
				 DMA_TO_DEVICE);
824

A
Alexander Duyck 已提交
825 826 827
		/* clear tx_buffer data */
		tx_buf->skb = NULL;
		dma_unmap_len_set(tx_buf, len, 0);
828

A
Alexander Duyck 已提交
829 830
		/* unmap remaining buffers */
		while (tx_desc != eop_desc) {
S
Scott Peterson 已提交
831 832
			i40e_trace(clean_tx_irq_unmap,
				   tx_ring, tx_desc, tx_buf);
833 834 835 836

			tx_buf++;
			tx_desc++;
			i++;
A
Alexander Duyck 已提交
837 838
			if (unlikely(!i)) {
				i -= tx_ring->count;
839 840 841 842
				tx_buf = tx_ring->tx_bi;
				tx_desc = I40E_TX_DESC(tx_ring, 0);
			}

A
Alexander Duyck 已提交
843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862
			/* unmap any remaining paged data */
			if (dma_unmap_len(tx_buf, len)) {
				dma_unmap_page(tx_ring->dev,
					       dma_unmap_addr(tx_buf, dma),
					       dma_unmap_len(tx_buf, len),
					       DMA_TO_DEVICE);
				dma_unmap_len_set(tx_buf, len, 0);
			}
		}

		/* move us one more past the eop_desc for start of next pkt */
		tx_buf++;
		tx_desc++;
		i++;
		if (unlikely(!i)) {
			i -= tx_ring->count;
			tx_buf = tx_ring->tx_bi;
			tx_desc = I40E_TX_DESC(tx_ring, 0);
		}

863 864
		prefetch(tx_desc);

A
Alexander Duyck 已提交
865 866 867 868 869
		/* update budget accounting */
		budget--;
	} while (likely(budget));

	i += tx_ring->count;
870
	tx_ring->next_to_clean = i;
871 872
	i40e_update_tx_stats(tx_ring, total_packets, total_bytes);
	i40e_arm_wb(tx_ring, vsi, budget);
873

874 875 876
	if (ring_is_xdp(tx_ring))
		return !!budget;

877 878
	/* notify netdev of completed buffers */
	netdev_tx_completed_queue(txring_txq(tx_ring),
879 880
				  total_packets, total_bytes);

881
#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
882 883 884 885 886 887 888 889
	if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
		     (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
		/* Make sure that anybody stopping the queue after this
		 * sees the new next_to_clean.
		 */
		smp_mb();
		if (__netif_subqueue_stopped(tx_ring->netdev,
					     tx_ring->queue_index) &&
890
		   !test_bit(__I40E_VSI_DOWN, vsi->state)) {
891 892 893 894 895 896
			netif_wake_subqueue(tx_ring->netdev,
					    tx_ring->queue_index);
			++tx_ring->tx_stats.restart_queue;
		}
	}

897 898 899 900
	return !!budget;
}

/**
901
 * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
902
 * @vsi: the VSI we care about
903
 * @q_vector: the vector on which to enable writeback
904 905
 *
 **/
906 907
static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
				  struct i40e_q_vector *q_vector)
908
{
909
	u16 flags = q_vector->tx.ring[0].flags;
910
	u32 val;
911

912 913
	if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
		return;
914

915 916
	if (q_vector->arm_wb_state)
		return;
917

918 919 920
	if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
		val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
		      I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
921

922
		wr32(&vsi->back->hw,
923
		     I40E_PFINT_DYN_CTLN(q_vector->reg_idx),
924 925 926 927
		     val);
	} else {
		val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
		      I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
928

929 930 931 932 933 934 935 936 937 938 939 940 941 942
		wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
	}
	q_vector->arm_wb_state = true;
}

/**
 * i40e_force_wb - Issue SW Interrupt so HW does a wb
 * @vsi: the VSI we care about
 * @q_vector: the vector  on which to force writeback
 *
 **/
void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
{
	if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
943 944 945 946 947 948 949
		u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
			  I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
			  I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
			  I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
			  /* allow 00 to be written to the index */

		wr32(&vsi->back->hw,
950
		     I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val);
951 952 953 954 955 956 957 958 959
	} else {
		u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
			  I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
			  I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
			  I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
			/* allow 00 to be written to the index */

		wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
	}
960 961
}

962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992
static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector,
					struct i40e_ring_container *rc)
{
	return &q_vector->rx == rc;
}

static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
{
	unsigned int divisor;

	switch (q_vector->vsi->back->hw.phy.link_info.link_speed) {
	case I40E_LINK_SPEED_40GB:
		divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024;
		break;
	case I40E_LINK_SPEED_25GB:
	case I40E_LINK_SPEED_20GB:
		divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512;
		break;
	default:
	case I40E_LINK_SPEED_10GB:
		divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256;
		break;
	case I40E_LINK_SPEED_1GB:
	case I40E_LINK_SPEED_100MB:
		divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32;
		break;
	}

	return divisor;
}

993
/**
994 995
 * i40e_update_itr - update the dynamic ITR value based on statistics
 * @q_vector: structure containing interrupt and ring information
996 997
 * @rc: structure containing ring performance data
 *
998 999 1000 1001 1002 1003
 * Stores a new ITR value based on packets and byte
 * counts during the last interrupt.  The advantage of per interrupt
 * computation is faster updates and more accurate ITR for the current
 * traffic pattern.  Constants in this function were computed
 * based on theoretical maximum wire speed and thresholds were set based
 * on testing data as well as attempting to minimize response time
1004 1005
 * while increasing bulk throughput.
 **/
1006 1007
static void i40e_update_itr(struct i40e_q_vector *q_vector,
			    struct i40e_ring_container *rc)
1008
{
1009 1010
	unsigned int avg_wire_size, packets, bytes, itr;
	unsigned long next_update = jiffies;
1011

1012 1013 1014
	/* If we don't have any rings just leave ourselves set for maximum
	 * possible latency so we take ourselves out of the equation.
	 */
1015
	if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
1016
		return;
1017

1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
	/* For Rx we want to push the delay up and default to low latency.
	 * for Tx we want to pull the delay down and default to high latency.
	 */
	itr = i40e_container_is_rx(q_vector, rc) ?
	      I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY :
	      I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY;

	/* If we didn't update within up to 1 - 2 jiffies we can assume
	 * that either packets are coming in so slow there hasn't been
	 * any work, or that there is so much work that NAPI is dealing
	 * with interrupt moderation and we don't need to do anything.
	 */
	if (time_after(next_update, rc->next_update))
		goto clear_counts;

	/* If itr_countdown is set it means we programmed an ITR within
	 * the last 4 interrupt cycles. This has a side effect of us
	 * potentially firing an early interrupt. In order to work around
	 * this we need to throw out any data received for a few
	 * interrupts following the update.
	 */
	if (q_vector->itr_countdown) {
		itr = rc->target_itr;
		goto clear_counts;
	}
1043

1044 1045
	packets = rc->total_packets;
	bytes = rc->total_bytes;
1046

1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081
	if (i40e_container_is_rx(q_vector, rc)) {
		/* If Rx there are 1 to 4 packets and bytes are less than
		 * 9000 assume insufficient data to use bulk rate limiting
		 * approach unless Tx is already in bulk rate limiting. We
		 * are likely latency driven.
		 */
		if (packets && packets < 4 && bytes < 9000 &&
		    (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) {
			itr = I40E_ITR_ADAPTIVE_LATENCY;
			goto adjust_by_size;
		}
	} else if (packets < 4) {
		/* If we have Tx and Rx ITR maxed and Tx ITR is running in
		 * bulk mode and we are receiving 4 or fewer packets just
		 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
		 * that the Rx can relax.
		 */
		if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS &&
		    (q_vector->rx.target_itr & I40E_ITR_MASK) ==
		     I40E_ITR_ADAPTIVE_MAX_USECS)
			goto clear_counts;
	} else if (packets > 32) {
		/* If we have processed over 32 packets in a single interrupt
		 * for Tx assume we need to switch over to "bulk" mode.
		 */
		rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY;
	}

	/* We have no packets to actually measure against. This means
	 * either one of the other queues on this vector is active or
	 * we are a Tx queue doing TSO with too high of an interrupt rate.
	 *
	 * Between 4 and 56 we can assume that our current interrupt delay
	 * is only slightly too low. As such we should increase it by a small
	 * fixed amount.
1082
	 */
1083 1084 1085 1086 1087 1088 1089
	if (packets < 56) {
		itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC;
		if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
			itr &= I40E_ITR_ADAPTIVE_LATENCY;
			itr += I40E_ITR_ADAPTIVE_MAX_USECS;
		}
		goto clear_counts;
1090 1091
	}

1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
	if (packets <= 256) {
		itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
		itr &= I40E_ITR_MASK;

		/* Between 56 and 112 is our "goldilocks" zone where we are
		 * working out "just right". Just report that our current
		 * ITR is good for us.
		 */
		if (packets <= 112)
			goto clear_counts;

		/* If packet count is 128 or greater we are likely looking
		 * at a slight overrun of the delay we want. Try halving
		 * our delay to see if that will cut the number of packets
		 * in half per interrupt.
		 */
		itr /= 2;
		itr &= I40E_ITR_MASK;
		if (itr < I40E_ITR_ADAPTIVE_MIN_USECS)
			itr = I40E_ITR_ADAPTIVE_MIN_USECS;

		goto clear_counts;
	}

	/* The paths below assume we are dealing with a bulk ITR since
	 * number of packets is greater than 256. We are just going to have
	 * to compute a value and try to bring the count under control,
	 * though for smaller packet sizes there isn't much we can do as
	 * NAPI polling will likely be kicking in sooner rather than later.
	 */
	itr = I40E_ITR_ADAPTIVE_BULK;

adjust_by_size:
	/* If packet counts are 256 or greater we can assume we have a gross
	 * overestimation of what the rate should be. Instead of trying to fine
	 * tune it just use the formula below to try and dial in an exact value
	 * give the current packet size of the frame.
	 */
	avg_wire_size = bytes / packets;

	/* The following is a crude approximation of:
	 *  wmem_default / (size + overhead) = desired_pkts_per_int
	 *  rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
	 *  (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
1136
	 *
1137 1138 1139 1140 1141 1142 1143 1144 1145
	 * Assuming wmem_default is 212992 and overhead is 640 bytes per
	 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
	 * formula down to
	 *
	 *  (170 * (size + 24)) / (size + 640) = ITR
	 *
	 * We first do some math on the packet size and then finally bitshift
	 * by 8 after rounding up. We also have to account for PCIe link speed
	 * difference as ITR scales based on this.
1146
	 */
1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164
	if (avg_wire_size <= 60) {
		/* Start at 250k ints/sec */
		avg_wire_size = 4096;
	} else if (avg_wire_size <= 380) {
		/* 250K ints/sec to 60K ints/sec */
		avg_wire_size *= 40;
		avg_wire_size += 1696;
	} else if (avg_wire_size <= 1084) {
		/* 60K ints/sec to 36K ints/sec */
		avg_wire_size *= 15;
		avg_wire_size += 11452;
	} else if (avg_wire_size <= 1980) {
		/* 36K ints/sec to 30K ints/sec */
		avg_wire_size *= 5;
		avg_wire_size += 22420;
	} else {
		/* plateau at a limit of 30K ints/sec */
		avg_wire_size = 32256;
1165
	}
1166

1167 1168 1169 1170 1171
	/* If we are in low latency mode halve our delay which doubles the
	 * rate to somewhere between 100K to 16K ints/sec
	 */
	if (itr & I40E_ITR_ADAPTIVE_LATENCY)
		avg_wire_size /= 2;
1172

1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185
	/* Resultant value is 256 times larger than it needs to be. This
	 * gives us room to adjust the value as needed to either increase
	 * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
	 *
	 * Use addition as we have already recorded the new latency flag
	 * for the ITR value.
	 */
	itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) *
	       I40E_ITR_ADAPTIVE_MIN_INC;

	if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
		itr &= I40E_ITR_ADAPTIVE_LATENCY;
		itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1186 1187
	}

1188 1189 1190 1191 1192 1193 1194
clear_counts:
	/* write back value */
	rc->target_itr = itr;

	/* next update should occur within next jiffy */
	rc->next_update = next_update + 1;

1195 1196 1197 1198
	rc->total_bytes = 0;
	rc->total_packets = 0;
}

B
Björn Töpel 已提交
1199 1200 1201 1202 1203
static struct i40e_rx_buffer *i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
{
	return &rx_ring->rx_bi[idx];
}

1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216
/**
 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
 * @rx_ring: rx descriptor ring to store buffers on
 * @old_buff: donor buffer to have page reused
 *
 * Synchronizes page for reuse by the adapter
 **/
static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
			       struct i40e_rx_buffer *old_buff)
{
	struct i40e_rx_buffer *new_buff;
	u16 nta = rx_ring->next_to_alloc;

B
Björn Töpel 已提交
1217
	new_buff = i40e_rx_bi(rx_ring, nta);
1218 1219 1220 1221 1222 1223 1224 1225 1226 1227

	/* update, and store next to alloc */
	nta++;
	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;

	/* transfer page from old buffer to new buffer */
	new_buff->dma		= old_buff->dma;
	new_buff->page		= old_buff->page;
	new_buff->page_offset	= old_buff->page_offset;
	new_buff->pagecnt_bias	= old_buff->pagecnt_bias;
1228 1229 1230 1231 1232

	rx_ring->rx_stats.page_reuse_count++;

	/* clear contents of buffer_info */
	old_buff->page = NULL;
1233 1234
}

1235
/**
1236
 * i40e_clean_programming_status - clean the programming status descriptor
1237
 * @rx_ring: the rx ring that has this descriptor
1238 1239
 * @qword0_raw: qword0
 * @qword1: qword1 representing status_error_len in CPU ordering
1240 1241 1242 1243 1244
 *
 * Flow director should handle FD_FILTER_STATUS to check its filter programming
 * status being successful or not and take actions accordingly. FCoE should
 * handle its context/filter programming/invalidation status and take actions.
 *
1245
 * Returns an i40e_rx_buffer to reuse if the cleanup occurred, otherwise NULL.
1246
 **/
1247 1248
void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw,
				   u64 qword1)
1249 1250 1251
{
	u8 id;

1252
	id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1253 1254 1255
		  I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;

	if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
1256
		i40e_fd_handle_status(rx_ring, qword0_raw, qword1, id);
1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272
}

/**
 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
 * @tx_ring: the tx ring to set up
 *
 * Return 0 on success, negative on error
 **/
int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
{
	struct device *dev = tx_ring->dev;
	int bi_size;

	if (!dev)
		return -ENOMEM;

J
Jesse Brandeburg 已提交
1273 1274
	/* warn if we are about to overwrite the pointer */
	WARN_ON(tx_ring->tx_bi);
1275 1276 1277 1278 1279
	bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
	tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
	if (!tx_ring->tx_bi)
		goto err;

1280 1281
	u64_stats_init(&tx_ring->syncp);

1282 1283
	/* round up to nearest 4K */
	tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
1284 1285 1286 1287
	/* add u32 for head writeback, align after this takes care of
	 * guaranteeing this is at least one cache line in size
	 */
	tx_ring->size += sizeof(u32);
1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298
	tx_ring->size = ALIGN(tx_ring->size, 4096);
	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
					   &tx_ring->dma, GFP_KERNEL);
	if (!tx_ring->desc) {
		dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
			 tx_ring->size);
		goto err;
	}

	tx_ring->next_to_use = 0;
	tx_ring->next_to_clean = 0;
1299
	tx_ring->tx_stats.prev_pkt_ctr = -1;
1300 1301 1302 1303 1304 1305 1306 1307
	return 0;

err:
	kfree(tx_ring->tx_bi);
	tx_ring->tx_bi = NULL;
	return -ENOMEM;
}

1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320
int i40e_alloc_rx_bi(struct i40e_ring *rx_ring)
{
	unsigned long sz = sizeof(*rx_ring->rx_bi) * rx_ring->count;

	rx_ring->rx_bi = kzalloc(sz, GFP_KERNEL);
	return rx_ring->rx_bi ? 0 : -ENOMEM;
}

static void i40e_clear_rx_bi(struct i40e_ring *rx_ring)
{
	memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count);
}

1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332
/**
 * i40e_clean_rx_ring - Free Rx buffers
 * @rx_ring: ring to be cleaned
 **/
void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
{
	u16 i;

	/* ring already cleared, nothing to do */
	if (!rx_ring->rx_bi)
		return;

1333 1334 1335 1336 1337
	if (rx_ring->skb) {
		dev_kfree_skb(rx_ring->skb);
		rx_ring->skb = NULL;
	}

1338 1339
	if (rx_ring->xsk_umem) {
		i40e_xsk_clean_rx_ring(rx_ring);
1340
		goto skip_free;
1341
	}
1342

1343 1344
	/* Free all the Rx ring sk_buffs */
	for (i = 0; i < rx_ring->count; i++) {
B
Björn Töpel 已提交
1345
		struct i40e_rx_buffer *rx_bi = i40e_rx_bi(rx_ring, i);
1346 1347 1348 1349

		if (!rx_bi->page)
			continue;

1350 1351 1352 1353 1354 1355
		/* Invalidate cache lines that may have been written to by
		 * device so that we avoid corrupting memory.
		 */
		dma_sync_single_range_for_cpu(rx_ring->dev,
					      rx_bi->dma,
					      rx_bi->page_offset,
1356
					      rx_ring->rx_buf_len,
1357 1358 1359 1360
					      DMA_FROM_DEVICE);

		/* free resources associated with mapping */
		dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
1361
				     i40e_rx_pg_size(rx_ring),
1362 1363
				     DMA_FROM_DEVICE,
				     I40E_RX_DMA_ATTR);
1364

1365
		__page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
1366 1367 1368

		rx_bi->page = NULL;
		rx_bi->page_offset = 0;
1369 1370
	}

1371
skip_free:
1372 1373 1374 1375
	if (rx_ring->xsk_umem)
		i40e_clear_rx_bi_zc(rx_ring);
	else
		i40e_clear_rx_bi(rx_ring);
1376 1377 1378 1379

	/* Zero out the descriptor ring */
	memset(rx_ring->desc, 0, rx_ring->size);

1380
	rx_ring->next_to_alloc = 0;
1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393
	rx_ring->next_to_clean = 0;
	rx_ring->next_to_use = 0;
}

/**
 * i40e_free_rx_resources - Free Rx resources
 * @rx_ring: ring to clean the resources from
 *
 * Free all receive software resources
 **/
void i40e_free_rx_resources(struct i40e_ring *rx_ring)
{
	i40e_clean_rx_ring(rx_ring);
1394 1395
	if (rx_ring->vsi->type == I40E_VSI_MAIN)
		xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
1396
	rx_ring->xdp_prog = NULL;
1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415
	kfree(rx_ring->rx_bi);
	rx_ring->rx_bi = NULL;

	if (rx_ring->desc) {
		dma_free_coherent(rx_ring->dev, rx_ring->size,
				  rx_ring->desc, rx_ring->dma);
		rx_ring->desc = NULL;
	}
}

/**
 * i40e_setup_rx_descriptors - Allocate Rx descriptors
 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
 *
 * Returns 0 on success, negative on failure
 **/
int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
{
	struct device *dev = rx_ring->dev;
1416
	int err;
1417

1418
	u64_stats_init(&rx_ring->syncp);
1419

1420
	/* Round up to nearest 4K */
1421
	rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1422 1423 1424 1425 1426 1427 1428
	rx_ring->size = ALIGN(rx_ring->size, 4096);
	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
					   &rx_ring->dma, GFP_KERNEL);

	if (!rx_ring->desc) {
		dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
			 rx_ring->size);
1429
		return -ENOMEM;
1430 1431
	}

1432
	rx_ring->next_to_alloc = 0;
1433 1434 1435
	rx_ring->next_to_clean = 0;
	rx_ring->next_to_use = 0;

1436 1437 1438 1439 1440
	/* XDP RX-queue info only needed for RX rings exposed to XDP */
	if (rx_ring->vsi->type == I40E_VSI_MAIN) {
		err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
				       rx_ring->queue_index);
		if (err < 0)
1441
			return err;
1442 1443
	}

1444 1445
	rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;

1446 1447 1448 1449 1450 1451 1452 1453
	return 0;
}

/**
 * i40e_release_rx_desc - Store the new tail and head values
 * @rx_ring: ring to bump
 * @val: new head index
 **/
1454
void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1455 1456
{
	rx_ring->next_to_use = val;
1457 1458 1459 1460

	/* update next to alloc since we have filled the ring */
	rx_ring->next_to_alloc = val;

1461 1462 1463 1464 1465 1466 1467 1468 1469
	/* Force memory writes to complete before letting h/w
	 * know there are new descriptors to fetch.  (Only
	 * applicable for weak-ordered memory model archs,
	 * such as IA-64).
	 */
	wmb();
	writel(val, rx_ring->tail);
}

1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480
/**
 * i40e_rx_offset - Return expected offset into page to access data
 * @rx_ring: Ring we are requesting offset of
 *
 * Returns the offset value for ring into the data buffer.
 */
static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
{
	return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
}

1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496
static unsigned int i40e_rx_frame_truesize(struct i40e_ring *rx_ring,
					   unsigned int size)
{
	unsigned int truesize;

#if (PAGE_SIZE < 8192)
	truesize = i40e_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
#else
	truesize = i40e_rx_offset(rx_ring) ?
		SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring)) +
		SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
		SKB_DATA_ALIGN(size);
#endif
	return truesize;
}

1497
/**
1498 1499 1500
 * i40e_alloc_mapped_page - recycle or make a new page
 * @rx_ring: ring to use
 * @bi: rx_buffer struct to modify
1501
 *
1502 1503
 * Returns true if the page was successfully allocated or
 * reused.
1504
 **/
1505 1506
static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
				   struct i40e_rx_buffer *bi)
1507
{
1508 1509
	struct page *page = bi->page;
	dma_addr_t dma;
1510

1511 1512 1513 1514 1515
	/* since we are recycling buffers we should seldom need to alloc */
	if (likely(page)) {
		rx_ring->rx_stats.page_reuse_count++;
		return true;
	}
1516

1517
	/* alloc new page for storage */
1518
	page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
1519 1520 1521 1522
	if (unlikely(!page)) {
		rx_ring->rx_stats.alloc_page_failed++;
		return false;
	}
1523

1524
	/* map page for use */
1525
	dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1526
				 i40e_rx_pg_size(rx_ring),
1527 1528
				 DMA_FROM_DEVICE,
				 I40E_RX_DMA_ATTR);
1529

1530 1531
	/* if mapping failed free memory back to system since
	 * there isn't much point in holding memory we can't use
1532
	 */
1533
	if (dma_mapping_error(rx_ring->dev, dma)) {
1534
		__free_pages(page, i40e_rx_pg_order(rx_ring));
1535 1536
		rx_ring->rx_stats.alloc_page_failed++;
		return false;
1537 1538
	}

1539 1540
	bi->dma = dma;
	bi->page = page;
1541
	bi->page_offset = i40e_rx_offset(rx_ring);
1542 1543
	page_ref_add(page, USHRT_MAX - 1);
	bi->pagecnt_bias = USHRT_MAX;
1544

1545 1546
	return true;
}
1547

1548
/**
1549
 * i40e_alloc_rx_buffers - Replace used receive buffers
1550 1551
 * @rx_ring: ring to place buffers on
 * @cleaned_count: number of buffers to replace
1552
 *
1553
 * Returns false if all allocations were successful, true if any fail
1554
 **/
1555
bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
1556
{
1557
	u16 ntu = rx_ring->next_to_use;
1558 1559 1560 1561 1562
	union i40e_rx_desc *rx_desc;
	struct i40e_rx_buffer *bi;

	/* do nothing if no valid netdev defined */
	if (!rx_ring->netdev || !cleaned_count)
1563
		return false;
1564

1565
	rx_desc = I40E_RX_DESC(rx_ring, ntu);
B
Björn Töpel 已提交
1566
	bi = i40e_rx_bi(rx_ring, ntu);
1567

1568 1569 1570
	do {
		if (!i40e_alloc_mapped_page(rx_ring, bi))
			goto no_buffers;
1571

1572 1573 1574
		/* sync the buffer for use by the device */
		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
						 bi->page_offset,
1575
						 rx_ring->rx_buf_len,
1576 1577
						 DMA_FROM_DEVICE);

1578 1579 1580 1581
		/* Refresh the desc even if buffer_addrs didn't change
		 * because each write-back erases this info.
		 */
		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1582

1583 1584 1585 1586 1587
		rx_desc++;
		bi++;
		ntu++;
		if (unlikely(ntu == rx_ring->count)) {
			rx_desc = I40E_RX_DESC(rx_ring, 0);
B
Björn Töpel 已提交
1588
			bi = i40e_rx_bi(rx_ring, 0);
1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599
			ntu = 0;
		}

		/* clear the status bits for the next_to_use descriptor */
		rx_desc->wb.qword1.status_error_len = 0;

		cleaned_count--;
	} while (cleaned_count);

	if (rx_ring->next_to_use != ntu)
		i40e_release_rx_desc(rx_ring, ntu);
1600 1601 1602

	return false;

1603
no_buffers:
1604 1605
	if (rx_ring->next_to_use != ntu)
		i40e_release_rx_desc(rx_ring, ntu);
1606 1607 1608 1609 1610

	/* make sure to come back via polling to try again after
	 * allocation failure
	 */
	return true;
1611 1612 1613 1614 1615 1616
}

/**
 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
 * @vsi: the VSI we care about
 * @skb: skb currently being received and modified
1617
 * @rx_desc: the receive descriptor
1618 1619 1620
 **/
static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
				    struct sk_buff *skb,
1621
				    union i40e_rx_desc *rx_desc)
1622
{
1623 1624
	struct i40e_rx_ptype_decoded decoded;
	u32 rx_error, rx_status;
1625
	bool ipv4, ipv6;
1626 1627 1628 1629 1630 1631 1632 1633 1634 1635
	u8 ptype;
	u64 qword;

	qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
	ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
	rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
		   I40E_RXD_QW1_ERROR_SHIFT;
	rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
		    I40E_RXD_QW1_STATUS_SHIFT;
	decoded = decode_rx_desc_ptype(ptype);
1636

1637 1638
	skb->ip_summed = CHECKSUM_NONE;

1639 1640
	skb_checksum_none_assert(skb);

1641
	/* Rx csum enabled and ip headers found? */
1642 1643 1644 1645
	if (!(vsi->netdev->features & NETIF_F_RXCSUM))
		return;

	/* did the hardware decode the packet and checksum? */
1646
	if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1647 1648 1649 1650
		return;

	/* both known and outer_ip must be set for the below code to work */
	if (!(decoded.known && decoded.outer_ip))
1651 1652
		return;

1653 1654 1655 1656
	ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
	       (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
	ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
	       (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
1657 1658

	if (ipv4 &&
1659 1660
	    (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
			 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1661 1662
		goto checksum_fail;

J
Jesse Brandeburg 已提交
1663
	/* likely incorrect csum if alternate IP extension headers found */
1664
	if (ipv6 &&
1665
	    rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1666
		/* don't increment checksum err here, non-fatal err */
1667 1668
		return;

1669
	/* there was some L4 error, count error and punt packet to the stack */
1670
	if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1671 1672 1673 1674 1675 1676
		goto checksum_fail;

	/* handle packets that were not able to be checksummed due
	 * to arrival speed, in this case the stack can compute
	 * the csum.
	 */
1677
	if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1678 1679
		return;

1680 1681 1682
	/* If there is an outer header present that might contain a checksum
	 * we need to bump the checksum level by 1 to reflect the fact that
	 * we are indicating we validated the inner checksum.
1683
	 */
1684 1685 1686 1687 1688 1689 1690 1691 1692
	if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
		skb->csum_level = 1;

	/* Only report checksum unnecessary for TCP, UDP, or SCTP */
	switch (decoded.inner_prot) {
	case I40E_RX_PTYPE_INNER_PROT_TCP:
	case I40E_RX_PTYPE_INNER_PROT_UDP:
	case I40E_RX_PTYPE_INNER_PROT_SCTP:
		skb->ip_summed = CHECKSUM_UNNECESSARY;
1693
		fallthrough;
1694 1695 1696
	default:
		break;
	}
1697 1698 1699 1700 1701

	return;

checksum_fail:
	vsi->back->hw_csum_rx_error++;
1702 1703 1704
}

/**
1705
 * i40e_ptype_to_htype - get a hash type
1706 1707 1708 1709
 * @ptype: the ptype value from the descriptor
 *
 * Returns a hash type to be used by skb_set_hash
 **/
1710
static inline int i40e_ptype_to_htype(u8 ptype)
1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726
{
	struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);

	if (!decoded.known)
		return PKT_HASH_TYPE_NONE;

	if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
	    decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
		return PKT_HASH_TYPE_L4;
	else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
		 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
		return PKT_HASH_TYPE_L3;
	else
		return PKT_HASH_TYPE_L2;
}

1727 1728 1729 1730
/**
 * i40e_rx_hash - set the hash value in the skb
 * @ring: descriptor ring
 * @rx_desc: specific descriptor
1731 1732
 * @skb: skb currently being received and modified
 * @rx_ptype: Rx packet type
1733 1734 1735 1736 1737 1738 1739
 **/
static inline void i40e_rx_hash(struct i40e_ring *ring,
				union i40e_rx_desc *rx_desc,
				struct sk_buff *skb,
				u8 rx_ptype)
{
	u32 hash;
1740
	const __le64 rss_mask =
1741 1742 1743
		cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
			    I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);

1744
	if (!(ring->netdev->features & NETIF_F_RXHASH))
1745 1746 1747 1748 1749 1750 1751 1752
		return;

	if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
		hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
		skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
	}
}

1753
/**
1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764
 * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
 * @rx_ring: rx descriptor ring packet is being transacted on
 * @rx_desc: pointer to the EOP Rx descriptor
 * @skb: pointer to current skb being populated
 * @rx_ptype: the packet type decoded by hardware
 *
 * This function checks the ring, descriptor, and packet information in
 * order to populate the hash, checksum, VLAN, protocol, and
 * other fields within the skb.
 **/
void i40e_process_skb_fields(struct i40e_ring *rx_ring,
1765
			     union i40e_rx_desc *rx_desc, struct sk_buff *skb)
1766 1767 1768 1769
{
	u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
	u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
			I40E_RXD_QW1_STATUS_SHIFT;
1770 1771
	u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
	u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1772
		   I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
1773 1774
	u8 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
		      I40E_RXD_QW1_PTYPE_SHIFT;
1775

1776
	if (unlikely(tsynvalid))
1777
		i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
1778 1779 1780 1781 1782 1783

	i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);

	i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);

	skb_record_rx_queue(skb, rx_ring->queue_index);
1784

1785 1786 1787 1788 1789 1790 1791
	if (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
		u16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1;

		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
				       le16_to_cpu(vlan_tag));
	}

1792 1793
	/* modifies the skb - consumes the enet header */
	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1794 1795 1796 1797 1798 1799
}

/**
 * i40e_cleanup_headers - Correct empty headers
 * @rx_ring: rx descriptor ring packet is being transacted on
 * @skb: pointer to current skb being fixed
1800
 * @rx_desc: pointer to the EOP Rx descriptor
1801 1802 1803 1804 1805 1806 1807 1808 1809
 *
 * Also address the case where we are pulling data in on pages only
 * and as such no data is present in the skb header.
 *
 * In addition if skb is not at least 60 bytes we need to pad it so that
 * it is large enough to qualify as a valid Ethernet frame.
 *
 * Returns true if an error was encountered and skb was freed.
 **/
1810 1811 1812
static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
				 union i40e_rx_desc *rx_desc)

1813
{
1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828
	/* XDP packets use error pointer so abort at this point */
	if (IS_ERR(skb))
		return true;

	/* ERR_MASK will only have valid bits if EOP set, and
	 * what we are doing here is actually checking
	 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
	 * the error field
	 */
	if (unlikely(i40e_test_staterr(rx_desc,
				       BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
		dev_kfree_skb_any(skb);
		return true;
	}

1829 1830 1831 1832 1833 1834 1835 1836
	/* if eth_skb_pad returns an error the skb was freed */
	if (eth_skb_pad(skb))
		return true;

	return false;
}

/**
1837
 * i40e_page_is_reusable - check if any reuse is possible
1838
 * @page: page struct to check
1839 1840 1841
 *
 * A page is not reusable if it was allocated under low memory
 * conditions, or it's not in the same NUMA node as this CPU.
1842
 */
1843
static inline bool i40e_page_is_reusable(struct page *page)
1844
{
1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875
	return (page_to_nid(page) == numa_mem_id()) &&
		!page_is_pfmemalloc(page);
}

/**
 * i40e_can_reuse_rx_page - Determine if this page can be reused by
 * the adapter for another receive
 *
 * @rx_buffer: buffer containing the page
 *
 * If page is reusable, rx_buffer->page_offset is adjusted to point to
 * an unused region in the page.
 *
 * For small pages, @truesize will be a constant value, half the size
 * of the memory at page.  We'll attempt to alternate between high and
 * low halves of the page, with one half ready for use by the hardware
 * and the other half being consumed by the stack.  We use the page
 * ref count to determine whether the stack has finished consuming the
 * portion of this page that was passed up with a previous packet.  If
 * the page ref count is >1, we'll assume the "other" half page is
 * still busy, and this page cannot be reused.
 *
 * For larger pages, @truesize will be the actual space used by the
 * received packet (adjusted upward to an even multiple of the cache
 * line size).  This will advance through the page by the amount
 * actually consumed by the received packets while there is still
 * space for a buffer.  Each region of larger pages will be used at
 * most once, after which the page will not be reused.
 *
 * In either case, if the page is reusable its refcount is increased.
 **/
1876
static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
1877
{
1878 1879
	unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
	struct page *page = rx_buffer->page;
1880 1881 1882 1883 1884 1885 1886

	/* Is any reuse possible? */
	if (unlikely(!i40e_page_is_reusable(page)))
		return false;

#if (PAGE_SIZE < 8192)
	/* if we are only owner of page we can reuse it */
1887
	if (unlikely((page_count(page) - pagecnt_bias) > 1))
1888 1889
		return false;
#else
1890 1891 1892
#define I40E_LAST_OFFSET \
	(SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
	if (rx_buffer->page_offset > I40E_LAST_OFFSET)
1893 1894 1895
		return false;
#endif

1896 1897 1898 1899
	/* If we have drained the page fragment pool we need to update
	 * the pagecnt_bias and page count so that we fully restock the
	 * number of references the driver holds.
	 */
1900 1901
	if (unlikely(pagecnt_bias == 1)) {
		page_ref_add(page, USHRT_MAX - 1);
1902 1903
		rx_buffer->pagecnt_bias = USHRT_MAX;
	}
1904

1905
	return true;
1906 1907 1908 1909 1910 1911 1912
}

/**
 * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
 * @rx_ring: rx descriptor ring to transact packets on
 * @rx_buffer: buffer containing page to add
 * @skb: sk_buff to place the data into
1913
 * @size: packet length from rx_desc
1914 1915
 *
 * This function will add the data contained in rx_buffer->page to the skb.
1916
 * It will just attach the page as a frag to the skb.
1917
 *
1918
 * The function will then update the page offset.
1919
 **/
1920
static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
1921
			     struct i40e_rx_buffer *rx_buffer,
1922 1923
			     struct sk_buff *skb,
			     unsigned int size)
1924 1925
{
#if (PAGE_SIZE < 8192)
1926
	unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
1927
#else
1928
	unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
1929 1930
#endif

1931 1932
	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
			rx_buffer->page_offset, size, truesize);
1933

1934 1935 1936 1937 1938 1939
	/* page is being used so we must update the page offset */
#if (PAGE_SIZE < 8192)
	rx_buffer->page_offset ^= truesize;
#else
	rx_buffer->page_offset += truesize;
#endif
1940 1941
}

1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954
/**
 * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
 * @rx_ring: rx descriptor ring to transact packets on
 * @size: size of buffer to add to skb
 *
 * This function will pull an Rx buffer from the ring and synchronize it
 * for use by the CPU.
 */
static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
						 const unsigned int size)
{
	struct i40e_rx_buffer *rx_buffer;

B
Björn Töpel 已提交
1955
	rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
1956 1957 1958 1959 1960 1961 1962 1963 1964
	prefetchw(rx_buffer->page);

	/* we are reusing so sync this buffer for CPU use */
	dma_sync_single_range_for_cpu(rx_ring->dev,
				      rx_buffer->dma,
				      rx_buffer->page_offset,
				      size,
				      DMA_FROM_DEVICE);

1965 1966 1967
	/* We have pulled a buffer for use, so decrement pagecnt_bias */
	rx_buffer->pagecnt_bias--;

1968 1969 1970
	return rx_buffer;
}

1971
/**
1972
 * i40e_construct_skb - Allocate skb and populate it
1973
 * @rx_ring: rx descriptor ring to transact packets on
1974
 * @rx_buffer: rx buffer to pull data from
1975
 * @xdp: xdp_buff pointing to the data
1976
 *
1977 1978 1979
 * This function allocates an skb.  It then populates it with the page
 * data from the current receive descriptor, taking care to set up the
 * skb correctly.
1980
 */
1981 1982
static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
					  struct i40e_rx_buffer *rx_buffer,
1983
					  struct xdp_buff *xdp)
1984
{
1985
	unsigned int size = xdp->data_end - xdp->data;
1986
#if (PAGE_SIZE < 8192)
1987
	unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
1988 1989 1990 1991 1992
#else
	unsigned int truesize = SKB_DATA_ALIGN(size);
#endif
	unsigned int headlen;
	struct sk_buff *skb;
1993

1994
	/* prefetch first cache line of first page */
1995
	prefetch(xdp->data);
1996
#if L1_CACHE_BYTES < 128
1997
	prefetch(xdp->data + L1_CACHE_BYTES);
1998
#endif
1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013
	/* Note, we get here by enabling legacy-rx via:
	 *
	 *    ethtool --set-priv-flags <dev> legacy-rx on
	 *
	 * In this mode, we currently get 0 extra XDP headroom as
	 * opposed to having legacy-rx off, where we process XDP
	 * packets going to stack via i40e_build_skb(). The latter
	 * provides us currently with 192 bytes of headroom.
	 *
	 * For i40e_construct_skb() mode it means that the
	 * xdp->data_meta will always point to xdp->data, since
	 * the helper cannot expand the head. Should this ever
	 * change in future for legacy-rx mode on, then lets also
	 * add xdp->data_meta handling here.
	 */
2014

2015 2016 2017 2018 2019 2020 2021 2022 2023 2024
	/* allocate a skb to store the frags */
	skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
			       I40E_RX_HDR_SIZE,
			       GFP_ATOMIC | __GFP_NOWARN);
	if (unlikely(!skb))
		return NULL;

	/* Determine available headroom for copy */
	headlen = size;
	if (headlen > I40E_RX_HDR_SIZE)
2025 2026
		headlen = eth_get_headlen(skb->dev, xdp->data,
					  I40E_RX_HDR_SIZE);
2027

2028
	/* align pull length to size of long to optimize memcpy performance */
2029 2030
	memcpy(__skb_put(skb, headlen), xdp->data,
	       ALIGN(headlen, sizeof(long)));
2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048

	/* update all of the pointers */
	size -= headlen;
	if (size) {
		skb_add_rx_frag(skb, 0, rx_buffer->page,
				rx_buffer->page_offset + headlen,
				size, truesize);

		/* buffer is used by skb, update page_offset */
#if (PAGE_SIZE < 8192)
		rx_buffer->page_offset ^= truesize;
#else
		rx_buffer->page_offset += truesize;
#endif
	} else {
		/* buffer is unused, reset bias back to rx_buffer */
		rx_buffer->pagecnt_bias++;
	}
2049 2050 2051 2052

	return skb;
}

2053 2054 2055 2056
/**
 * i40e_build_skb - Build skb around an existing buffer
 * @rx_ring: Rx descriptor ring to transact packets on
 * @rx_buffer: Rx buffer to pull data from
2057
 * @xdp: xdp_buff pointing to the data
2058 2059 2060 2061 2062 2063
 *
 * This function builds an skb around an existing Rx buffer, taking care
 * to set up the skb correctly and avoid any memcpy overhead.
 */
static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
				      struct i40e_rx_buffer *rx_buffer,
2064
				      struct xdp_buff *xdp)
2065
{
2066
	unsigned int metasize = xdp->data - xdp->data_meta;
2067 2068 2069
#if (PAGE_SIZE < 8192)
	unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
#else
2070
	unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
2071 2072
				SKB_DATA_ALIGN(xdp->data_end -
					       xdp->data_hard_start);
2073 2074 2075
#endif
	struct sk_buff *skb;

2076 2077 2078 2079 2080 2081
	/* Prefetch first cache line of first page. If xdp->data_meta
	 * is unused, this points exactly as xdp->data, otherwise we
	 * likely have a consumer accessing first few bytes of meta
	 * data, and then actual data.
	 */
	prefetch(xdp->data_meta);
2082
#if L1_CACHE_BYTES < 128
2083
	prefetch(xdp->data_meta + L1_CACHE_BYTES);
2084 2085
#endif
	/* build an skb around the page buffer */
2086
	skb = build_skb(xdp->data_hard_start, truesize);
2087 2088 2089 2090
	if (unlikely(!skb))
		return NULL;

	/* update pointers within the skb to store the data */
2091
	skb_reserve(skb, xdp->data - xdp->data_hard_start);
2092 2093 2094
	__skb_put(skb, xdp->data_end - xdp->data);
	if (metasize)
		skb_metadata_set(skb, metasize);
2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105

	/* buffer is used by skb, update page_offset */
#if (PAGE_SIZE < 8192)
	rx_buffer->page_offset ^= truesize;
#else
	rx_buffer->page_offset += truesize;
#endif

	return skb;
}

2106 2107 2108 2109 2110 2111
/**
 * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
 * @rx_ring: rx descriptor ring to transact packets on
 * @rx_buffer: rx buffer to pull data from
 *
 * This function will clean up the contents of the rx_buffer.  It will
2112
 * either recycle the buffer or unmap it and free the associated resources.
2113 2114 2115 2116 2117
 */
static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
			       struct i40e_rx_buffer *rx_buffer)
{
	if (i40e_can_reuse_rx_page(rx_buffer)) {
2118 2119 2120 2121
		/* hand second half of page back to the ring */
		i40e_reuse_rx_page(rx_ring, rx_buffer);
	} else {
		/* we are not reusing the buffer so unmap it */
2122 2123
		dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
				     i40e_rx_pg_size(rx_ring),
2124
				     DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
2125 2126
		__page_frag_cache_drain(rx_buffer->page,
					rx_buffer->pagecnt_bias);
2127 2128
		/* clear contents of buffer_info */
		rx_buffer->page = NULL;
2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141
	}
}

/**
 * i40e_is_non_eop - process handling of non-EOP buffers
 * @rx_ring: Rx ring being processed
 * @rx_desc: Rx descriptor for current buffer
 * @skb: Current socket buffer containing buffer in progress
 *
 * This function updates next to clean.  If the buffer is an EOP buffer
 * this function exits returning false, otherwise it will place the
 * sk_buff in the next buffer to be chained and return true indicating
 * that this is in fact a non-EOP buffer.
2142
 **/
2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164
static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
			    union i40e_rx_desc *rx_desc,
			    struct sk_buff *skb)
{
	u32 ntc = rx_ring->next_to_clean + 1;

	/* fetch, update, and store next to clean */
	ntc = (ntc < rx_ring->count) ? ntc : 0;
	rx_ring->next_to_clean = ntc;

	prefetch(I40E_RX_DESC(rx_ring, ntc));

	/* if we are the last buffer then there is nothing else to do */
#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
	if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
		return false;

	rx_ring->rx_stats.non_eop_descs++;

	return true;
}

2165
static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
2166
			      struct i40e_ring *xdp_ring);
2167

2168
int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring)
2169
{
2170
	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
2171 2172 2173 2174 2175 2176 2177

	if (unlikely(!xdpf))
		return I40E_XDP_CONSUMED;

	return i40e_xmit_xdp_ring(xdpf, xdp_ring);
}

2178 2179 2180 2181 2182 2183 2184 2185
/**
 * i40e_run_xdp - run an XDP program
 * @rx_ring: Rx ring being processed
 * @xdp: XDP buffer containing the frame
 **/
static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
				    struct xdp_buff *xdp)
{
2186
	int err, result = I40E_XDP_PASS;
2187
	struct i40e_ring *xdp_ring;
2188 2189 2190 2191 2192 2193 2194 2195 2196
	struct bpf_prog *xdp_prog;
	u32 act;

	rcu_read_lock();
	xdp_prog = READ_ONCE(rx_ring->xdp_prog);

	if (!xdp_prog)
		goto xdp_out;

2197 2198
	prefetchw(xdp->data_hard_start); /* xdp_frame write */

2199 2200 2201 2202
	act = bpf_prog_run_xdp(xdp_prog, xdp);
	switch (act) {
	case XDP_PASS:
		break;
2203 2204
	case XDP_TX:
		xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2205
		result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
2206
		break;
2207 2208
	case XDP_REDIRECT:
		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
2209
		result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
2210
		break;
2211 2212
	default:
		bpf_warn_invalid_xdp_action(act);
2213
		fallthrough;
2214 2215
	case XDP_ABORTED:
		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2216
		fallthrough; /* handle aborts by dropping packet */
2217 2218 2219 2220 2221 2222 2223 2224 2225
	case XDP_DROP:
		result = I40E_XDP_CONSUMED;
		break;
	}
xdp_out:
	rcu_read_unlock();
	return ERR_PTR(-result);
}

2226 2227 2228 2229 2230 2231 2232 2233 2234 2235
/**
 * i40e_rx_buffer_flip - adjusted rx_buffer to point to an unused region
 * @rx_ring: Rx ring
 * @rx_buffer: Rx buffer to adjust
 * @size: Size of adjustment
 **/
static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
				struct i40e_rx_buffer *rx_buffer,
				unsigned int size)
{
2236
	unsigned int truesize = i40e_rx_frame_truesize(rx_ring, size);
2237

2238
#if (PAGE_SIZE < 8192)
2239 2240 2241 2242 2243 2244
	rx_buffer->page_offset ^= truesize;
#else
	rx_buffer->page_offset += truesize;
#endif
}

2245 2246 2247 2248 2249 2250
/**
 * i40e_xdp_ring_update_tail - Updates the XDP Tx ring tail register
 * @xdp_ring: XDP Tx ring
 *
 * This function updates the XDP Tx ring tail register.
 **/
2251
void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring)
2252 2253 2254 2255 2256 2257 2258 2259
{
	/* Force memory writes to complete before letting h/w
	 * know there are new descriptors to fetch.
	 */
	wmb();
	writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
}

2260 2261 2262 2263 2264 2265 2266 2267
/**
 * i40e_update_rx_stats - Update Rx ring statistics
 * @rx_ring: rx descriptor ring
 * @total_rx_bytes: number of bytes received
 * @total_rx_packets: number of packets received
 *
 * This function updates the Rx ring statistics.
 **/
2268 2269 2270
void i40e_update_rx_stats(struct i40e_ring *rx_ring,
			  unsigned int total_rx_bytes,
			  unsigned int total_rx_packets)
2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288
{
	u64_stats_update_begin(&rx_ring->syncp);
	rx_ring->stats.packets += total_rx_packets;
	rx_ring->stats.bytes += total_rx_bytes;
	u64_stats_update_end(&rx_ring->syncp);
	rx_ring->q_vector->rx.total_packets += total_rx_packets;
	rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
}

/**
 * i40e_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
 * @rx_ring: Rx ring
 * @xdp_res: Result of the receive batch
 *
 * This function bumps XDP Tx tail and/or flush redirect map, and
 * should be called when a batch of packets has been processed in the
 * napi loop.
 **/
2289
void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res)
2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301
{
	if (xdp_res & I40E_XDP_REDIR)
		xdp_do_flush_map();

	if (xdp_res & I40E_XDP_TX) {
		struct i40e_ring *xdp_ring =
			rx_ring->vsi->xdp_rings[rx_ring->queue_index];

		i40e_xdp_ring_update_tail(xdp_ring);
	}
}

2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314
/**
 * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
 * @rx_ring: rx descriptor ring to transact packets on
 * @budget: Total limit on number of packets to process
 *
 * This function provides a "bounce buffer" approach to Rx interrupt
 * processing.  The advantage to this is that on systems that have
 * expensive overhead for IOMMU access this provides a means of avoiding
 * it by maintaining the mapping of the page to the system.
 *
 * Returns amount of work completed
 **/
static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
2315 2316
{
	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
2317
	struct sk_buff *skb = rx_ring->skb;
2318
	u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
2319 2320
	unsigned int xdp_xmit = 0;
	bool failure = false;
2321 2322
	struct xdp_buff xdp;

2323 2324 2325
#if (PAGE_SIZE < 8192)
	xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, 0);
#endif
2326
	xdp.rxq = &rx_ring->xdp_rxq;
2327

2328
	while (likely(total_rx_packets < (unsigned int)budget)) {
2329
		struct i40e_rx_buffer *rx_buffer;
2330
		union i40e_rx_desc *rx_desc;
2331
		unsigned int size;
2332 2333
		u64 qword;

2334 2335
		/* return some buffers to hardware, one at a time is too slow */
		if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
2336
			failure = failure ||
2337
				  i40e_alloc_rx_buffers(rx_ring, cleaned_count);
2338 2339 2340
			cleaned_count = 0;
		}

2341 2342 2343 2344 2345
		rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);

		/* status_error_len will always be zero for unused descriptors
		 * because it's cleared in cleanup, and overlaps with hdr_addr
		 * which is always zero because packet split isn't used, if the
2346
		 * hardware wrote DD then the length will be non-zero
2347
		 */
2348
		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
2349

2350
		/* This memory barrier is needed to keep us from reading
2351 2352
		 * any other fields out of the rx_desc until we have
		 * verified the descriptor has been written back.
2353
		 */
2354
		dma_rmb();
2355

2356 2357 2358 2359 2360 2361
		if (i40e_rx_is_programming_status(qword)) {
			i40e_clean_programming_status(rx_ring,
						      rx_desc->raw.qword[0],
						      qword);
			rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
			i40e_inc_ntc(rx_ring);
2362
			i40e_reuse_rx_page(rx_ring, rx_buffer);
2363
			cleaned_count++;
2364 2365
			continue;
		}
2366

2367 2368 2369 2370 2371
		size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
		       I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
		if (!size)
			break;

S
Scott Peterson 已提交
2372
		i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
2373 2374
		rx_buffer = i40e_get_rx_buffer(rx_ring, size);

2375
		/* retrieve a buffer from the ring */
2376 2377 2378
		if (!skb) {
			xdp.data = page_address(rx_buffer->page) +
				   rx_buffer->page_offset;
2379
			xdp.data_meta = xdp.data;
2380 2381 2382
			xdp.data_hard_start = xdp.data -
					      i40e_rx_offset(rx_ring);
			xdp.data_end = xdp.data + size;
2383 2384 2385 2386
#if (PAGE_SIZE > 4096)
			/* At larger PAGE_SIZE, frame_sz depend on len size */
			xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size);
#endif
2387 2388 2389 2390
			skb = i40e_run_xdp(rx_ring, &xdp);
		}

		if (IS_ERR(skb)) {
2391 2392 2393 2394
			unsigned int xdp_res = -PTR_ERR(skb);

			if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
				xdp_xmit |= xdp_res;
2395 2396 2397 2398
				i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
			} else {
				rx_buffer->pagecnt_bias++;
			}
2399 2400 2401
			total_rx_bytes += size;
			total_rx_packets++;
		} else if (skb) {
2402
			i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
2403 2404 2405 2406 2407
		} else if (ring_uses_build_skb(rx_ring)) {
			skb = i40e_build_skb(rx_ring, rx_buffer, &xdp);
		} else {
			skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp);
		}
2408 2409 2410 2411 2412

		/* exit if we failed to retrieve a buffer */
		if (!skb) {
			rx_ring->rx_stats.alloc_buff_failed++;
			rx_buffer->pagecnt_bias++;
2413
			break;
2414
		}
2415

2416
		i40e_put_rx_buffer(rx_ring, rx_buffer);
2417 2418
		cleaned_count++;

2419
		if (i40e_is_non_eop(rx_ring, rx_desc, skb))
2420 2421
			continue;

2422
		if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
2423
			skb = NULL;
2424
			continue;
2425
		}
2426 2427 2428 2429

		/* probably a little skewed due to removing CRC */
		total_rx_bytes += skb->len;

2430
		/* populate checksum, VLAN, and protocol */
2431
		i40e_process_skb_fields(rx_ring, rx_desc, skb);
2432

S
Scott Peterson 已提交
2433
		i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
2434
		napi_gro_receive(&rx_ring->q_vector->napi, skb);
2435
		skb = NULL;
2436

2437 2438 2439
		/* update budget accounting */
		total_rx_packets++;
	}
2440

2441
	i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
2442 2443
	rx_ring->skb = skb;

2444
	i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
2445

2446
	/* guarantee a trip back through this routine if there was a failure */
2447
	return failure ? budget : (int)total_rx_packets;
2448 2449
}

2450
static inline u32 i40e_buildreg_itr(const int type, u16 itr)
2451 2452 2453
{
	u32 val;

2454 2455 2456 2457 2458 2459 2460 2461 2462
	/* We don't bother with setting the CLEARPBA bit as the data sheet
	 * points out doing so is "meaningless since it was already
	 * auto-cleared". The auto-clearing happens when the interrupt is
	 * asserted.
	 *
	 * Hardware errata 28 for also indicates that writing to a
	 * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear
	 * an event in the PBA anyway so we need to rely on the automask
	 * to hold pending events for us until the interrupt is re-enabled
2463 2464 2465 2466 2467
	 *
	 * The itr value is reported in microseconds, and the register
	 * value is recorded in 2 microsecond units. For this reason we
	 * only need to shift by the interval shift - 1 instead of the
	 * full value.
2468
	 */
2469 2470
	itr &= I40E_ITR_MASK;

2471 2472
	val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
	      (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
2473
	      (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1));
2474 2475 2476 2477 2478 2479 2480

	return val;
}

/* a small macro to shorten up some long lines */
#define INTREG I40E_PFINT_DYN_CTLN

2481 2482 2483 2484 2485 2486 2487 2488 2489
/* The act of updating the ITR will cause it to immediately trigger. In order
 * to prevent this from throwing off adaptive update statistics we defer the
 * update so that it can only happen so often. So after either Tx or Rx are
 * updated we make the adaptive scheme wait until either the ITR completely
 * expires via the next_update expiration or we have been through at least
 * 3 interrupts.
 */
#define ITR_COUNTDOWN_START 3

2490 2491 2492 2493 2494 2495 2496 2497 2498 2499
/**
 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
 * @vsi: the VSI we care about
 * @q_vector: q_vector for which itr is being updated and interrupt enabled
 *
 **/
static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
					  struct i40e_q_vector *q_vector)
{
	struct i40e_hw *hw = &vsi->back->hw;
2500
	u32 intval;
2501

2502 2503
	/* If we don't have MSIX, then we only need to re-enable icr0 */
	if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) {
2504
		i40e_irq_dynamic_enable_icr0(vsi->back);
2505 2506 2507
		return;
	}

2508 2509 2510
	/* These will do nothing if dynamic updates are not enabled */
	i40e_update_itr(q_vector, &q_vector->tx);
	i40e_update_itr(q_vector, &q_vector->rx);
2511

2512 2513 2514 2515 2516 2517 2518 2519 2520 2521
	/* This block of logic allows us to get away with only updating
	 * one ITR value with each interrupt. The idea is to perform a
	 * pseudo-lazy update with the following criteria.
	 *
	 * 1. Rx is given higher priority than Tx if both are in same state
	 * 2. If we must reduce an ITR that is given highest priority.
	 * 3. We then give priority to increasing ITR based on amount.
	 */
	if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
		/* Rx ITR needs to be reduced, this is highest priority */
2522 2523 2524
		intval = i40e_buildreg_itr(I40E_RX_ITR,
					   q_vector->rx.target_itr);
		q_vector->rx.current_itr = q_vector->rx.target_itr;
2525 2526 2527 2528 2529 2530 2531
		q_vector->itr_countdown = ITR_COUNTDOWN_START;
	} else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
		   ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
		    (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
		/* Tx ITR needs to be reduced, this is second priority
		 * Tx ITR needs to be increased more than Rx, fourth priority
		 */
2532 2533 2534
		intval = i40e_buildreg_itr(I40E_TX_ITR,
					   q_vector->tx.target_itr);
		q_vector->tx.current_itr = q_vector->tx.target_itr;
2535 2536 2537 2538 2539 2540 2541
		q_vector->itr_countdown = ITR_COUNTDOWN_START;
	} else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
		/* Rx ITR needs to be increased, third priority */
		intval = i40e_buildreg_itr(I40E_RX_ITR,
					   q_vector->rx.target_itr);
		q_vector->rx.current_itr = q_vector->rx.target_itr;
		q_vector->itr_countdown = ITR_COUNTDOWN_START;
2542
	} else {
2543
		/* No ITR update, lowest priority */
2544
		intval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
2545 2546
		if (q_vector->itr_countdown)
			q_vector->itr_countdown--;
2547 2548
	}

2549
	if (!test_bit(__I40E_VSI_DOWN, vsi->state))
2550
		wr32(hw, INTREG(q_vector->reg_idx), intval);
2551 2552
}

2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566
/**
 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
 * @napi: napi struct with our devices info in it
 * @budget: amount of work driver is allowed to do this pass, in packets
 *
 * This function will clean all queues associated with a q_vector.
 *
 * Returns the amount of work done
 **/
int i40e_napi_poll(struct napi_struct *napi, int budget)
{
	struct i40e_q_vector *q_vector =
			       container_of(napi, struct i40e_q_vector, napi);
	struct i40e_vsi *vsi = q_vector->vsi;
2567
	struct i40e_ring *ring;
2568
	bool clean_complete = true;
2569
	bool arm_wb = false;
2570
	int budget_per_ring;
2571
	int work_done = 0;
2572

2573
	if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
2574 2575 2576 2577
		napi_complete(napi);
		return 0;
	}

2578 2579 2580
	/* Since the actual Tx work is minimal, we can give the Tx a larger
	 * budget and be more aggressive about cleaning up the Tx descriptors.
	 */
2581
	i40e_for_each_ring(ring, q_vector->tx) {
2582 2583 2584 2585 2586
		bool wd = ring->xsk_umem ?
			  i40e_clean_xdp_tx_irq(vsi, ring, budget) :
			  i40e_clean_tx_irq(vsi, ring, budget);

		if (!wd) {
2587 2588 2589 2590
			clean_complete = false;
			continue;
		}
		arm_wb |= ring->arm_wb;
2591
		ring->arm_wb = false;
2592
	}
2593

2594 2595 2596 2597
	/* Handle case where we are called by netpoll with a budget of 0 */
	if (budget <= 0)
		goto tx_only;

2598 2599 2600 2601
	/* We attempt to distribute budget to each Rx queue fairly, but don't
	 * allow the budget to go below 1 because that would exit polling early.
	 */
	budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
2602

2603
	i40e_for_each_ring(ring, q_vector->rx) {
2604 2605 2606
		int cleaned = ring->xsk_umem ?
			      i40e_clean_rx_irq_zc(ring, budget_per_ring) :
			      i40e_clean_rx_irq(ring, budget_per_ring);
2607 2608

		work_done += cleaned;
2609 2610 2611
		/* if we clean as many as budgeted, we must not be done */
		if (cleaned >= budget_per_ring)
			clean_complete = false;
2612
	}
2613 2614

	/* If work not completed, return budget and polling will return */
2615
	if (!clean_complete) {
2616 2617 2618 2619 2620 2621 2622 2623 2624
		int cpu_id = smp_processor_id();

		/* It is possible that the interrupt affinity has changed but,
		 * if the cpu is pegged at 100%, polling will never exit while
		 * traffic continues and the interrupt will be stuck on this
		 * cpu.  We check to make sure affinity is correct before we
		 * continue to poll, otherwise we must stop polling so the
		 * interrupt can move to the correct cpu.
		 */
2625 2626 2627 2628 2629 2630 2631 2632 2633 2634
		if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
			/* Tell napi that we are done polling */
			napi_complete_done(napi, work_done);

			/* Force an interrupt */
			i40e_force_wb(vsi, q_vector);

			/* Return budget-1 so that polling stops */
			return budget - 1;
		}
2635
tx_only:
2636 2637 2638
		if (arm_wb) {
			q_vector->tx.ring[0].tx_stats.tx_force_wb++;
			i40e_enable_wb_on_itr(vsi, q_vector);
2639
		}
2640
		return budget;
2641
	}
2642

2643 2644 2645
	if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
		q_vector->arm_wb_state = false;

2646 2647 2648 2649 2650
	/* Exit the polling mode, but don't re-enable interrupts if stack might
	 * poll us due to busy-polling
	 */
	if (likely(napi_complete_done(napi, work_done)))
		i40e_update_enable_itr(vsi, q_vector);
2651

2652
	return min(work_done, budget - 1);
2653 2654 2655 2656 2657 2658
}

/**
 * i40e_atr - Add a Flow Director ATR filter
 * @tx_ring:  ring to add programming descriptor to
 * @skb:      send buffer
2659
 * @tx_flags: send tx flags
2660 2661
 **/
static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2662
		     u32 tx_flags)
2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673
{
	struct i40e_filter_program_desc *fdir_desc;
	struct i40e_pf *pf = tx_ring->vsi->back;
	union {
		unsigned char *network;
		struct iphdr *ipv4;
		struct ipv6hdr *ipv6;
	} hdr;
	struct tcphdr *th;
	unsigned int hlen;
	u32 flex_ptype, dtype_cmd;
2674
	int l4_proto;
2675
	u16 i;
2676 2677

	/* make sure ATR is enabled */
J
Jesse Brandeburg 已提交
2678
	if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
2679 2680
		return;

2681
	if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2682 2683
		return;

2684 2685 2686 2687
	/* if sampling is disabled do nothing */
	if (!tx_ring->atr_sample_rate)
		return;

2688
	/* Currently only IPv4/IPv6 with TCP is supported */
2689 2690
	if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
		return;
2691

2692 2693 2694
	/* snag network header to get L4 type and address */
	hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
		      skb_inner_network_header(skb) : skb_network_header(skb);
2695

2696 2697 2698 2699
	/* Note: tx_flags gets modified to reflect inner protocols in
	 * tx_enable_csum function if encap is enabled.
	 */
	if (tx_flags & I40E_TX_FLAGS_IPV4) {
2700
		/* access ihl as u8 to avoid unaligned access on ia64 */
2701 2702
		hlen = (hdr.network[0] & 0x0F) << 2;
		l4_proto = hdr.ipv4->protocol;
2703
	} else {
2704 2705 2706 2707 2708 2709 2710 2711 2712
		/* find the start of the innermost ipv6 header */
		unsigned int inner_hlen = hdr.network - skb->data;
		unsigned int h_offset = inner_hlen;

		/* this function updates h_offset to the end of the header */
		l4_proto =
		  ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL);
		/* hlen will contain our best estimate of the tcp header */
		hlen = h_offset - inner_hlen;
2713 2714
	}

2715
	if (l4_proto != IPPROTO_TCP)
2716 2717
		return;

2718 2719
	th = (struct tcphdr *)(hdr.network + hlen);

2720
	/* Due to lack of space, no more new filters can be programmed */
2721
	if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2722
		return;
2723
	if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) {
2724 2725 2726 2727 2728 2729
		/* HW ATR eviction will take care of removing filters on FIN
		 * and RST packets.
		 */
		if (th->fin || th->rst)
			return;
	}
2730 2731 2732

	tx_ring->atr_count++;

2733 2734 2735 2736 2737
	/* sample on all syn/fin/rst packets or once every atr sample rate */
	if (!th->fin &&
	    !th->syn &&
	    !th->rst &&
	    (tx_ring->atr_count < tx_ring->atr_sample_rate))
2738 2739 2740 2741 2742
		return;

	tx_ring->atr_count = 0;

	/* grab the next descriptor */
2743 2744 2745 2746 2747
	i = tx_ring->next_to_use;
	fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);

	i++;
	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2748 2749 2750

	flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
		      I40E_TXD_FLTR_QW0_QINDEX_MASK;
2751
	flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
2752 2753 2754 2755 2756 2757 2758 2759 2760
		      (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
		       I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
		      (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
		       I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);

	flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;

	dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;

2761
	dtype_cmd |= (th->fin || th->rst) ?
2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772
		     (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
		      I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
		     (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
		      I40E_TXD_FLTR_QW1_PCMD_SHIFT);

	dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
		     I40E_TXD_FLTR_QW1_DEST_SHIFT;

	dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
		     I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;

2773
	dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2774
	if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
2775 2776 2777 2778 2779 2780 2781 2782 2783
		dtype_cmd |=
			((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
			I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
			I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
	else
		dtype_cmd |=
			((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
			I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
			I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2784

2785
	if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED)
2786 2787
		dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;

2788
	fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
J
Jesse Brandeburg 已提交
2789
	fdir_desc->rsvd = cpu_to_le32(0);
2790
	fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
J
Jesse Brandeburg 已提交
2791
	fdir_desc->fd_id = cpu_to_le32(0);
2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805
}

/**
 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
 * @skb:     send buffer
 * @tx_ring: ring to send buffer on
 * @flags:   the tx flags to be set
 *
 * Checks the skb and set up correspondingly several generic transmit flags
 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
 *
 * Returns error code indicate the frame should be dropped upon error and the
 * otherwise  returns 0 to indicate the flags has been set properly.
 **/
2806 2807 2808
static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
					     struct i40e_ring *tx_ring,
					     u32 *flags)
2809 2810 2811 2812
{
	__be16 protocol = skb->protocol;
	u32  tx_flags = 0;

2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825
	if (protocol == htons(ETH_P_8021Q) &&
	    !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
		/* When HW VLAN acceleration is turned off by the user the
		 * stack sets the protocol to 8021q so that the driver
		 * can take any steps required to support the SW only
		 * VLAN handling.  In our case the driver doesn't need
		 * to take any further steps so just set the protocol
		 * to the encapsulated ethertype.
		 */
		skb->protocol = vlan_get_protocol(skb);
		goto out;
	}

2826
	/* if we have a HW VLAN tag being added, default to the HW one */
2827 2828
	if (skb_vlan_tag_present(skb)) {
		tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
2829 2830
		tx_flags |= I40E_TX_FLAGS_HW_VLAN;
	/* else if it is a SW VLAN, check the next protocol and store the tag */
2831
	} else if (protocol == htons(ETH_P_8021Q)) {
2832
		struct vlan_hdr *vhdr, _vhdr;
J
Jesse Brandeburg 已提交
2833

2834 2835 2836 2837 2838 2839 2840 2841 2842
		vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
		if (!vhdr)
			return -EINVAL;

		protocol = vhdr->h_vlan_encapsulated_proto;
		tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
		tx_flags |= I40E_TX_FLAGS_SW_VLAN;
	}

2843 2844 2845
	if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
		goto out;

2846
	/* Insert 802.1p priority into VLAN header */
2847 2848
	if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
	    (skb->priority != TC_PRIO_CONTROL)) {
2849 2850 2851 2852 2853
		tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
		tx_flags |= (skb->priority & 0x7) <<
				I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
		if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
			struct vlan_ethhdr *vhdr;
2854 2855 2856 2857 2858
			int rc;

			rc = skb_cow_head(skb, 0);
			if (rc < 0)
				return rc;
2859 2860 2861 2862 2863 2864 2865
			vhdr = (struct vlan_ethhdr *)skb->data;
			vhdr->h_vlan_TCI = htons(tx_flags >>
						 I40E_TX_FLAGS_VLAN_SHIFT);
		} else {
			tx_flags |= I40E_TX_FLAGS_HW_VLAN;
		}
	}
2866 2867

out:
2868 2869 2870 2871 2872 2873
	*flags = tx_flags;
	return 0;
}

/**
 * i40e_tso - set up the tso context descriptor
2874
 * @first:    pointer to first Tx buffer for xmit
2875
 * @hdr_len:  ptr to the size of the packet header
2876
 * @cd_type_cmd_tso_mss: Quad Word 1
2877 2878 2879
 *
 * Returns 0 if no TSO can happen, 1 if tso is going, or error
 **/
2880 2881
static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
		    u64 *cd_type_cmd_tso_mss)
2882
{
2883
	struct sk_buff *skb = first->skb;
2884
	u64 cd_cmd, cd_tso_len, cd_mss;
2885 2886 2887 2888 2889
	union {
		struct iphdr *v4;
		struct ipv6hdr *v6;
		unsigned char *hdr;
	} ip;
2890 2891
	union {
		struct tcphdr *tcp;
2892
		struct udphdr *udp;
2893 2894 2895
		unsigned char *hdr;
	} l4;
	u32 paylen, l4_offset;
2896
	u16 gso_segs, gso_size;
2897 2898
	int err;

2899 2900 2901
	if (skb->ip_summed != CHECKSUM_PARTIAL)
		return 0;

2902 2903 2904
	if (!skb_is_gso(skb))
		return 0;

2905 2906 2907
	err = skb_cow_head(skb, 0);
	if (err < 0)
		return err;
2908

2909 2910
	ip.hdr = skb_network_header(skb);
	l4.hdr = skb_transport_header(skb);
2911

2912 2913 2914 2915
	/* initialize outer IP header fields */
	if (ip.v4->version == 4) {
		ip.v4->tot_len = 0;
		ip.v4->check = 0;
2916
	} else {
2917 2918 2919
		ip.v6->payload_len = 0;
	}

2920
	if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
2921
					 SKB_GSO_GRE_CSUM |
2922
					 SKB_GSO_IPXIP4 |
2923
					 SKB_GSO_IPXIP6 |
2924
					 SKB_GSO_UDP_TUNNEL |
2925
					 SKB_GSO_UDP_TUNNEL_CSUM)) {
2926 2927 2928 2929
		if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
			l4.udp->len = 0;

2930 2931 2932 2933
			/* determine offset of outer transport header */
			l4_offset = l4.hdr - skb->data;

			/* remove payload length from outer checksum */
2934
			paylen = skb->len - l4_offset;
2935 2936
			csum_replace_by_diff(&l4.udp->check,
					     (__force __wsum)htonl(paylen));
2937 2938
		}

2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949
		/* reset pointers to inner headers */
		ip.hdr = skb_inner_network_header(skb);
		l4.hdr = skb_inner_transport_header(skb);

		/* initialize inner IP header fields */
		if (ip.v4->version == 4) {
			ip.v4->tot_len = 0;
			ip.v4->check = 0;
		} else {
			ip.v6->payload_len = 0;
		}
2950 2951
	}

2952 2953 2954 2955
	/* determine offset of inner transport header */
	l4_offset = l4.hdr - skb->data;

	/* remove payload length from inner checksum */
2956
	paylen = skb->len - l4_offset;
2957

2958 2959 2960 2961 2962 2963 2964 2965 2966
	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
		csum_replace_by_diff(&l4.udp->check, (__force __wsum)htonl(paylen));
		/* compute length of segmentation header */
		*hdr_len = sizeof(*l4.udp) + l4_offset;
	} else {
		csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
		/* compute length of segmentation header */
		*hdr_len = (l4.tcp->doff * 4) + l4_offset;
	}
2967

2968 2969 2970 2971 2972 2973 2974 2975
	/* pull values out of skb_shinfo */
	gso_size = skb_shinfo(skb)->gso_size;
	gso_segs = skb_shinfo(skb)->gso_segs;

	/* update GSO size and bytecount with header size */
	first->gso_segs = gso_segs;
	first->bytecount += (first->gso_segs - 1) * *hdr_len;

2976 2977 2978
	/* find the field values */
	cd_cmd = I40E_TX_CTX_DESC_TSO;
	cd_tso_len = skb->len - *hdr_len;
2979
	cd_mss = gso_size;
2980 2981 2982
	*cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
				(cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
				(cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
2983 2984 2985
	return 1;
}

J
Jacob Keller 已提交
2986 2987 2988 2989 2990
/**
 * i40e_tsyn - set up the tsyn context descriptor
 * @tx_ring:  ptr to the ring to send
 * @skb:      ptr to the skb we're sending
 * @tx_flags: the collected send information
2991
 * @cd_type_cmd_tso_mss: Quad Word 1
J
Jacob Keller 已提交
2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010
 *
 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
 **/
static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
		     u32 tx_flags, u64 *cd_type_cmd_tso_mss)
{
	struct i40e_pf *pf;

	if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
		return 0;

	/* Tx timestamps cannot be sampled when doing TSO */
	if (tx_flags & I40E_TX_FLAGS_TSO)
		return 0;

	/* only timestamp the outbound packet if the user has requested it and
	 * we are not already transmitting a packet to be timestamped
	 */
	pf = i40e_netdev_to_pf(tx_ring->netdev);
3011 3012 3013
	if (!(pf->flags & I40E_FLAG_PTP))
		return 0;

3014
	if (pf->ptp_tx &&
3015
	    !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) {
J
Jacob Keller 已提交
3016
		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3017
		pf->ptp_tx_start = jiffies;
J
Jacob Keller 已提交
3018 3019
		pf->ptp_tx_skb = skb_get(skb);
	} else {
3020
		pf->tx_hwtstamp_skipped++;
J
Jacob Keller 已提交
3021 3022 3023 3024 3025 3026 3027 3028 3029
		return 0;
	}

	*cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
				I40E_TXD_CTX_QW1_CMD_SHIFT;

	return 1;
}

3030 3031 3032
/**
 * i40e_tx_enable_csum - Enable Tx checksum offloads
 * @skb: send buffer
3033
 * @tx_flags: pointer to Tx flags currently set
3034 3035
 * @td_cmd: Tx descriptor command bits to set
 * @td_offset: Tx descriptor header offsets to set
3036
 * @tx_ring: Tx descriptor ring
3037 3038
 * @cd_tunneling: ptr to context desc bits
 **/
3039 3040 3041 3042
static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
			       u32 *td_cmd, u32 *td_offset,
			       struct i40e_ring *tx_ring,
			       u32 *cd_tunneling)
3043
{
3044 3045 3046 3047 3048 3049 3050 3051 3052 3053
	union {
		struct iphdr *v4;
		struct ipv6hdr *v6;
		unsigned char *hdr;
	} ip;
	union {
		struct tcphdr *tcp;
		struct udphdr *udp;
		unsigned char *hdr;
	} l4;
3054
	unsigned char *exthdr;
3055
	u32 offset, cmd = 0;
3056
	__be16 frag_off;
3057 3058
	u8 l4_proto = 0;

3059 3060 3061
	if (skb->ip_summed != CHECKSUM_PARTIAL)
		return 0;

3062 3063
	ip.hdr = skb_network_header(skb);
	l4.hdr = skb_transport_header(skb);
3064

3065 3066 3067
	/* compute outer L2 header size */
	offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;

3068
	if (skb->encapsulation) {
3069
		u32 tunnel = 0;
3070 3071
		/* define outer network header type */
		if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3072 3073 3074 3075
			tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
				  I40E_TX_CTX_EXT_IP_IPV4 :
				  I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;

3076 3077
			l4_proto = ip.v4->protocol;
		} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3078
			tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
3079 3080

			exthdr = ip.hdr + sizeof(*ip.v6);
3081
			l4_proto = ip.v6->nexthdr;
3082 3083 3084
			if (l4.hdr != exthdr)
				ipv6_skip_exthdr(skb, exthdr - skb->data,
						 &l4_proto, &frag_off);
3085 3086 3087 3088
		}

		/* define outer transport */
		switch (l4_proto) {
3089
		case IPPROTO_UDP:
3090
			tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
3091
			*tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3092
			break;
3093
		case IPPROTO_GRE:
3094
			tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
3095
			*tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3096
			break;
3097 3098 3099 3100 3101
		case IPPROTO_IPIP:
		case IPPROTO_IPV6:
			*tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
			l4.hdr = skb_inner_network_header(skb);
			break;
3102
		default:
3103 3104 3105 3106 3107
			if (*tx_flags & I40E_TX_FLAGS_TSO)
				return -1;

			skb_checksum_help(skb);
			return 0;
3108
		}
3109

3110 3111 3112 3113 3114 3115 3116
		/* compute outer L3 header size */
		tunnel |= ((l4.hdr - ip.hdr) / 4) <<
			  I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;

		/* switch IP header pointer from outer to inner header */
		ip.hdr = skb_inner_network_header(skb);

3117 3118 3119 3120
		/* compute tunnel header size */
		tunnel |= ((ip.hdr - l4.hdr) / 2) <<
			  I40E_TXD_CTX_QW0_NATLEN_SHIFT;

3121 3122
		/* indicate if we need to offload outer UDP header */
		if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
3123
		    !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
3124 3125 3126
		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
			tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;

3127 3128 3129
		/* record tunnel offload values */
		*cd_tunneling |= tunnel;

3130 3131
		/* switch L4 header pointer from outer to inner */
		l4.hdr = skb_inner_transport_header(skb);
3132
		l4_proto = 0;
3133

3134 3135 3136 3137 3138
		/* reset type as we transition from outer to inner headers */
		*tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
		if (ip.v4->version == 4)
			*tx_flags |= I40E_TX_FLAGS_IPV4;
		if (ip.v6->version == 6)
3139
			*tx_flags |= I40E_TX_FLAGS_IPV6;
3140 3141 3142
	}

	/* Enable IP checksum offloads */
3143
	if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3144
		l4_proto = ip.v4->protocol;
3145 3146 3147
		/* the stack computes the IP header already, the only time we
		 * need the hardware to recompute it is in the case of TSO.
		 */
3148 3149 3150
		cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
		       I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
		       I40E_TX_DESC_CMD_IIPT_IPV4;
3151
	} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3152
		cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
3153 3154 3155 3156 3157 3158

		exthdr = ip.hdr + sizeof(*ip.v6);
		l4_proto = ip.v6->nexthdr;
		if (l4.hdr != exthdr)
			ipv6_skip_exthdr(skb, exthdr - skb->data,
					 &l4_proto, &frag_off);
3159
	}
3160

3161 3162
	/* compute inner L3 header size */
	offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
3163 3164

	/* Enable L4 checksum offloads */
3165
	switch (l4_proto) {
3166 3167
	case IPPROTO_TCP:
		/* enable checksum offloads */
3168 3169
		cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
		offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3170 3171 3172
		break;
	case IPPROTO_SCTP:
		/* enable SCTP checksum offload */
3173 3174 3175
		cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
		offset |= (sizeof(struct sctphdr) >> 2) <<
			  I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3176 3177 3178
		break;
	case IPPROTO_UDP:
		/* enable UDP checksum offload */
3179 3180 3181
		cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
		offset |= (sizeof(struct udphdr) >> 2) <<
			  I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3182 3183
		break;
	default:
3184 3185 3186 3187
		if (*tx_flags & I40E_TX_FLAGS_TSO)
			return -1;
		skb_checksum_help(skb);
		return 0;
3188
	}
3189 3190 3191

	*td_cmd |= cmd;
	*td_offset |= offset;
3192 3193

	return 1;
3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207
}

/**
 * i40e_create_tx_ctx Build the Tx context descriptor
 * @tx_ring:  ring to create the descriptor on
 * @cd_type_cmd_tso_mss: Quad Word 1
 * @cd_tunneling: Quad Word 0 - bits 0-31
 * @cd_l2tag2: Quad Word 0 - bits 32-63
 **/
static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
			       const u64 cd_type_cmd_tso_mss,
			       const u32 cd_tunneling, const u32 cd_l2tag2)
{
	struct i40e_tx_context_desc *context_desc;
3208
	int i = tx_ring->next_to_use;
3209

3210 3211
	if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
	    !cd_tunneling && !cd_l2tag2)
3212 3213 3214
		return;

	/* grab the next descriptor */
3215 3216 3217 3218
	context_desc = I40E_TX_CTXTDESC(tx_ring, i);

	i++;
	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3219 3220 3221 3222

	/* cpu_to_le32 and assign to struct fields */
	context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
	context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
3223
	context_desc->rsvd = cpu_to_le16(0);
3224 3225 3226
	context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
}

E
Eric Dumazet 已提交
3227 3228 3229 3230 3231 3232 3233
/**
 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
 * @tx_ring: the ring to be checked
 * @size:    the size buffer we want to assure is available
 *
 * Returns -EBUSY if a stop is needed, else 0
 **/
3234
int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
E
Eric Dumazet 已提交
3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249
{
	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
	/* Memory barrier before checking head and tail */
	smp_mb();

	/* Check again in a case another CPU has just made room available. */
	if (likely(I40E_DESC_UNUSED(tx_ring) < size))
		return -EBUSY;

	/* A reprieve! - use start_queue because it doesn't call schedule */
	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
	++tx_ring->tx_stats.restart_queue;
	return 0;
}

3250
/**
3251
 * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
3252 3253
 * @skb:      send buffer
 *
3254 3255 3256 3257 3258 3259 3260 3261
 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
 * and so we need to figure out the cases where we need to linearize the skb.
 *
 * For TSO we need to count the TSO header and segment payload separately.
 * As such we need to check cases where we have 7 fragments or more as we
 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
 * the segment payload in the first descriptor, and another 7 for the
 * fragments.
3262
 **/
3263
bool __i40e_chk_linearize(struct sk_buff *skb)
3264
{
3265
	const skb_frag_t *frag, *stale;
3266
	int nr_frags, sum;
3267

3268
	/* no need to check if number of frags is less than 7 */
3269
	nr_frags = skb_shinfo(skb)->nr_frags;
3270
	if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
3271
		return false;
3272

3273
	/* We need to walk through the list and validate that each group
3274
	 * of 6 fragments totals at least gso_size.
3275
	 */
3276
	nr_frags -= I40E_MAX_BUFFER_TXD - 2;
3277 3278 3279 3280 3281 3282 3283 3284
	frag = &skb_shinfo(skb)->frags[0];

	/* Initialize size to the negative value of gso_size minus 1.  We
	 * use this as the worst case scenerio in which the frag ahead
	 * of us only provides one byte which is why we are limited to 6
	 * descriptors for a single transmit as the header and previous
	 * fragment are already consuming 2 descriptors.
	 */
3285
	sum = 1 - skb_shinfo(skb)->gso_size;
3286

3287 3288 3289 3290 3291 3292
	/* Add size of frags 0 through 4 to create our initial sum */
	sum += skb_frag_size(frag++);
	sum += skb_frag_size(frag++);
	sum += skb_frag_size(frag++);
	sum += skb_frag_size(frag++);
	sum += skb_frag_size(frag++);
3293 3294 3295 3296

	/* Walk through fragments adding latest fragment, testing it, and
	 * then removing stale fragments from the sum.
	 */
3297 3298 3299
	for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
		int stale_size = skb_frag_size(stale);

3300
		sum += skb_frag_size(frag++);
3301

3302 3303 3304 3305 3306 3307 3308
		/* The stale fragment may present us with a smaller
		 * descriptor than the actual fragment size. To account
		 * for that we need to remove all the data on the front and
		 * figure out what the remainder would be in the last
		 * descriptor associated with the fragment.
		 */
		if (stale_size > I40E_MAX_DATA_PER_TXD) {
J
Jonathan Lemon 已提交
3309
			int align_pad = -(skb_frag_off(stale)) &
3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320
					(I40E_MAX_READ_REQ_SIZE - 1);

			sum -= align_pad;
			stale_size -= align_pad;

			do {
				sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
				stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
			} while (stale_size > I40E_MAX_DATA_PER_TXD);
		}

3321 3322 3323 3324
		/* if sum is negative we failed to make sufficient progress */
		if (sum < 0)
			return true;

3325
		if (!nr_frags--)
3326 3327
			break;

3328
		sum -= stale_size;
3329 3330
	}

3331
	return false;
3332 3333
}

3334 3335 3336 3337 3338 3339 3340 3341 3342
/**
 * i40e_tx_map - Build the Tx descriptor
 * @tx_ring:  ring to send buffer on
 * @skb:      send buffer
 * @first:    first buffer info buffer to use
 * @tx_flags: collected send information
 * @hdr_len:  size of the packet header
 * @td_cmd:   the command field in the descriptor
 * @td_offset: offset for checksum or crc
3343 3344
 *
 * Returns 0 on success, -1 on failure to DMA
3345
 **/
3346 3347 3348
static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
			      struct i40e_tx_buffer *first, u32 tx_flags,
			      const u8 hdr_len, u32 td_cmd, u32 td_offset)
3349 3350 3351
{
	unsigned int data_len = skb->data_len;
	unsigned int size = skb_headlen(skb);
3352
	skb_frag_t *frag;
3353 3354
	struct i40e_tx_buffer *tx_bi;
	struct i40e_tx_desc *tx_desc;
A
Alexander Duyck 已提交
3355
	u16 i = tx_ring->next_to_use;
3356 3357
	u32 td_tag = 0;
	dma_addr_t dma;
3358
	u16 desc_count = 1;
3359 3360 3361 3362 3363 3364 3365

	if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
		td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
		td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
			 I40E_TX_FLAGS_VLAN_SHIFT;
	}

A
Alexander Duyck 已提交
3366 3367 3368 3369
	first->tx_flags = tx_flags;

	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);

3370
	tx_desc = I40E_TX_DESC(tx_ring, i);
A
Alexander Duyck 已提交
3371 3372 3373
	tx_bi = first;

	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
3374 3375
		unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;

A
Alexander Duyck 已提交
3376 3377 3378 3379 3380 3381 3382
		if (dma_mapping_error(tx_ring->dev, dma))
			goto dma_error;

		/* record length, and DMA address */
		dma_unmap_len_set(tx_bi, len, size);
		dma_unmap_addr_set(tx_bi, dma, dma);

3383 3384
		/* align size to end of page */
		max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
A
Alexander Duyck 已提交
3385 3386 3387
		tx_desc->buffer_addr = cpu_to_le64(dma);

		while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
3388 3389
			tx_desc->cmd_type_offset_bsz =
				build_ctob(td_cmd, td_offset,
3390
					   max_data, td_tag);
3391 3392 3393

			tx_desc++;
			i++;
3394 3395
			desc_count++;

3396 3397 3398 3399 3400
			if (i == tx_ring->count) {
				tx_desc = I40E_TX_DESC(tx_ring, 0);
				i = 0;
			}

3401 3402
			dma += max_data;
			size -= max_data;
3403

3404
			max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
A
Alexander Duyck 已提交
3405 3406
			tx_desc->buffer_addr = cpu_to_le64(dma);
		}
3407 3408 3409 3410

		if (likely(!data_len))
			break;

A
Alexander Duyck 已提交
3411 3412
		tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
							  size, td_tag);
3413 3414 3415

		tx_desc++;
		i++;
3416 3417
		desc_count++;

3418 3419 3420 3421 3422
		if (i == tx_ring->count) {
			tx_desc = I40E_TX_DESC(tx_ring, 0);
			i = 0;
		}

A
Alexander Duyck 已提交
3423 3424
		size = skb_frag_size(frag);
		data_len -= size;
3425

A
Alexander Duyck 已提交
3426 3427
		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
				       DMA_TO_DEVICE);
3428

A
Alexander Duyck 已提交
3429 3430
		tx_bi = &tx_ring->tx_bi[i];
	}
3431

3432
	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
A
Alexander Duyck 已提交
3433 3434 3435 3436 3437 3438 3439

	i++;
	if (i == tx_ring->count)
		i = 0;

	tx_ring->next_to_use = i;

E
Eric Dumazet 已提交
3440
	i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
3441

3442 3443 3444
	/* write last descriptor with EOP bit */
	td_cmd |= I40E_TX_DESC_CMD_EOP;

3445 3446
	/* We OR these values together to check both against 4 (WB_STRIDE)
	 * below. This is safe since we don't re-use desc_count afterwards.
3447 3448 3449
	 */
	desc_count |= ++tx_ring->packet_stride;

3450
	if (desc_count >= WB_STRIDE) {
3451 3452
		/* write last descriptor with RS bit set */
		td_cmd |= I40E_TX_DESC_CMD_RS;
3453 3454 3455 3456
		tx_ring->packet_stride = 0;
	}

	tx_desc->cmd_type_offset_bsz =
3457 3458
			build_ctob(td_cmd, td_offset, size, td_tag);

3459 3460
	skb_tx_timestamp(skb);

3461 3462 3463 3464 3465 3466 3467 3468 3469 3470
	/* Force memory writes to complete before letting h/w know there
	 * are new descriptors to fetch.
	 *
	 * We also use this memory barrier to make certain all of the
	 * status bits have been updated before next_to_watch is written.
	 */
	wmb();

	/* set next_to_watch value indicating a packet is present */
	first->next_to_watch = tx_desc;
3471

A
Alexander Duyck 已提交
3472
	/* notify HW of packet */
3473
	if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
3474 3475
		writel(i, tx_ring->tail);
	}
3476

3477
	return 0;
3478 3479

dma_error:
A
Alexander Duyck 已提交
3480
	dev_info(tx_ring->dev, "TX DMA map failed\n");
3481 3482 3483 3484

	/* clear dma mappings for failed tx_bi map */
	for (;;) {
		tx_bi = &tx_ring->tx_bi[i];
A
Alexander Duyck 已提交
3485
		i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
3486 3487 3488 3489 3490 3491 3492 3493
		if (tx_bi == first)
			break;
		if (i == 0)
			i = tx_ring->count;
		i--;
	}

	tx_ring->next_to_use = i;
3494 3495

	return -1;
3496 3497
}

3498 3499 3500 3501 3502
/**
 * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
 * @xdp: data to transmit
 * @xdp_ring: XDP Tx ring
 **/
3503
static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
3504 3505 3506 3507 3508
			      struct i40e_ring *xdp_ring)
{
	u16 i = xdp_ring->next_to_use;
	struct i40e_tx_buffer *tx_bi;
	struct i40e_tx_desc *tx_desc;
3509
	void *data = xdpf->data;
3510
	u32 size = xdpf->len;
3511 3512 3513 3514 3515 3516
	dma_addr_t dma;

	if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
		xdp_ring->tx_stats.tx_busy++;
		return I40E_XDP_CONSUMED;
	}
3517
	dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
3518 3519 3520 3521 3522 3523
	if (dma_mapping_error(xdp_ring->dev, dma))
		return I40E_XDP_CONSUMED;

	tx_bi = &xdp_ring->tx_bi[i];
	tx_bi->bytecount = size;
	tx_bi->gso_segs = 1;
3524
	tx_bi->xdpf = xdpf;
3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550

	/* record length, and DMA address */
	dma_unmap_len_set(tx_bi, len, size);
	dma_unmap_addr_set(tx_bi, dma, dma);

	tx_desc = I40E_TX_DESC(xdp_ring, i);
	tx_desc->buffer_addr = cpu_to_le64(dma);
	tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC
						  | I40E_TXD_CMD,
						  0, size, 0);

	/* Make certain all of the status bits have been updated
	 * before next_to_watch is written.
	 */
	smp_wmb();

	i++;
	if (i == xdp_ring->count)
		i = 0;

	tx_bi->next_to_watch = tx_desc;
	xdp_ring->next_to_use = i;

	return I40E_XDP_TX;
}

3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568
/**
 * i40e_xmit_frame_ring - Sends buffer on Tx ring
 * @skb:     send buffer
 * @tx_ring: ring to send buffer on
 *
 * Returns NETDEV_TX_OK if sent, else an error code
 **/
static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
					struct i40e_ring *tx_ring)
{
	u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
	u32 cd_tunneling = 0, cd_l2tag2 = 0;
	struct i40e_tx_buffer *first;
	u32 td_offset = 0;
	u32 tx_flags = 0;
	__be16 protocol;
	u32 td_cmd = 0;
	u8 hdr_len = 0;
3569
	int tso, count;
J
Jacob Keller 已提交
3570
	int tsyn;
J
Jesse Brandeburg 已提交
3571

3572 3573 3574
	/* prefetch the data, we'll need it later */
	prefetch(skb->data);

S
Scott Peterson 已提交
3575 3576
	i40e_trace(xmit_frame_ring, skb, tx_ring);

3577
	count = i40e_xmit_descriptor_count(skb);
3578
	if (i40e_chk_linearize(skb, count)) {
3579 3580 3581 3582
		if (__skb_linearize(skb)) {
			dev_kfree_skb_any(skb);
			return NETDEV_TX_OK;
		}
3583
		count = i40e_txd_use_count(skb->len);
3584 3585
		tx_ring->tx_stats.tx_linearize++;
	}
3586 3587 3588 3589 3590 3591 3592 3593 3594

	/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
	 *       + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
	 *       + 4 desc gap to avoid the cache line where head is,
	 *       + 1 desc for context descriptor,
	 * otherwise try next time
	 */
	if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
		tx_ring->tx_stats.tx_busy++;
3595
		return NETDEV_TX_BUSY;
3596
	}
3597

3598 3599 3600 3601 3602 3603
	/* record the location of the first descriptor for this packet */
	first = &tx_ring->tx_bi[tx_ring->next_to_use];
	first->skb = skb;
	first->bytecount = skb->len;
	first->gso_segs = 1;

3604 3605 3606 3607 3608
	/* prepare the xmit flags */
	if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
		goto out_drop;

	/* obtain protocol of skb */
3609
	protocol = vlan_get_protocol(skb);
3610 3611

	/* setup IPv4/IPv6 offloads */
3612
	if (protocol == htons(ETH_P_IP))
3613
		tx_flags |= I40E_TX_FLAGS_IPV4;
3614
	else if (protocol == htons(ETH_P_IPV6))
3615 3616
		tx_flags |= I40E_TX_FLAGS_IPV6;

3617
	tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
3618 3619 3620 3621 3622 3623

	if (tso < 0)
		goto out_drop;
	else if (tso)
		tx_flags |= I40E_TX_FLAGS_TSO;

3624 3625 3626 3627 3628 3629
	/* Always offload the checksum, since it's in the data descriptor */
	tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
				  tx_ring, &cd_tunneling);
	if (tso < 0)
		goto out_drop;

J
Jacob Keller 已提交
3630 3631 3632 3633 3634
	tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);

	if (tsyn)
		tx_flags |= I40E_TX_FLAGS_TSYN;

3635 3636 3637
	/* always enable CRC insertion offload */
	td_cmd |= I40E_TX_DESC_CMD_ICRC;

3638 3639 3640 3641 3642 3643 3644
	i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
			   cd_tunneling, cd_l2tag2);

	/* Add Flow Director ATR if it's enabled.
	 *
	 * NOTE: this must always be directly before the data descriptor.
	 */
3645
	i40e_atr(tx_ring, skb, tx_flags);
3646

3647 3648 3649
	if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
			td_cmd, td_offset))
		goto cleanup_tx_tstamp;
3650 3651 3652 3653

	return NETDEV_TX_OK;

out_drop:
S
Scott Peterson 已提交
3654
	i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
3655 3656
	dev_kfree_skb_any(first->skb);
	first->skb = NULL;
3657 3658 3659 3660 3661 3662 3663 3664 3665
cleanup_tx_tstamp:
	if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) {
		struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev);

		dev_kfree_skb_any(pf->ptp_tx_skb);
		pf->ptp_tx_skb = NULL;
		clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
	}

3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679
	return NETDEV_TX_OK;
}

/**
 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
 * @skb:    send buffer
 * @netdev: network interface device structure
 *
 * Returns NETDEV_TX_OK if sent, else an error code
 **/
netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
	struct i40e_netdev_priv *np = netdev_priv(netdev);
	struct i40e_vsi *vsi = np->vsi;
3680
	struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
3681 3682 3683 3684

	/* hardware can't handle really short frames, hardware padding works
	 * beyond this point
	 */
3685 3686
	if (skb_put_padto(skb, I40E_MIN_TX_LEN))
		return NETDEV_TX_OK;
3687 3688 3689

	return i40e_xmit_frame_ring(skb, tx_ring);
}
3690 3691 3692 3693 3694 3695

/**
 * i40e_xdp_xmit - Implements ndo_xdp_xmit
 * @dev: netdev
 * @xdp: XDP buffer
 *
3696 3697 3698 3699 3700
 * Returns number of frames successfully sent. Frames that fail are
 * free'ed via XDP return API.
 *
 * For error cases, a negative errno code is returned and no-frames
 * are transmitted (caller must handle freeing frames).
3701
 **/
3702 3703
int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
		  u32 flags)
3704 3705 3706 3707
{
	struct i40e_netdev_priv *np = netdev_priv(dev);
	unsigned int queue_index = smp_processor_id();
	struct i40e_vsi *vsi = np->vsi;
3708
	struct i40e_pf *pf = vsi->back;
3709
	struct i40e_ring *xdp_ring;
3710 3711
	int drops = 0;
	int i;
3712 3713 3714 3715

	if (test_bit(__I40E_VSI_DOWN, vsi->state))
		return -ENETDOWN;

3716 3717
	if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs ||
	    test_bit(__I40E_CONFIG_BUSY, pf->state))
3718 3719
		return -ENXIO;

3720
	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
3721 3722
		return -EINVAL;

3723 3724
	xdp_ring = vsi->xdp_rings[queue_index];

3725 3726 3727
	for (i = 0; i < n; i++) {
		struct xdp_frame *xdpf = frames[i];
		int err;
3728

3729
		err = i40e_xmit_xdp_ring(xdpf, xdp_ring);
3730 3731 3732 3733 3734 3735
		if (err != I40E_XDP_TX) {
			xdp_return_frame_rx_napi(xdpf);
			drops++;
		}
	}

3736 3737
	if (unlikely(flags & XDP_XMIT_FLUSH))
		i40e_xdp_ring_update_tail(xdp_ring);
3738

3739
	return n - drops;
3740
}