skbuff.h 129.4 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0-or-later */
L
Linus Torvalds 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15
/*
 *	Definitions for the 'struct sk_buff' memory handlers.
 *
 *	Authors:
 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
 *		Florian La Roche, <rzsfl@rz.uni-sb.de>
 */

#ifndef _LINUX_SKBUFF_H
#define _LINUX_SKBUFF_H

#include <linux/kernel.h>
#include <linux/compiler.h>
#include <linux/time.h>
16
#include <linux/bug.h>
17
#include <linux/bvec.h>
L
Linus Torvalds 已提交
18
#include <linux/cache.h>
E
Eric Dumazet 已提交
19
#include <linux/rbtree.h>
20
#include <linux/socket.h>
21
#include <linux/refcount.h>
L
Linus Torvalds 已提交
22

A
Arun Sharma 已提交
23
#include <linux/atomic.h>
L
Linus Torvalds 已提交
24 25 26
#include <asm/types.h>
#include <linux/spinlock.h>
#include <linux/net.h>
27
#include <linux/textsearch.h>
L
Linus Torvalds 已提交
28
#include <net/checksum.h>
29
#include <linux/rcupdate.h>
30
#include <linux/hrtimer.h>
31
#include <linux/dma-mapping.h>
32
#include <linux/netdev_features.h>
33
#include <linux/sched.h>
34
#include <linux/sched/clock.h>
35
#include <net/flow_dissector.h>
36
#include <linux/splice.h>
37
#include <linux/in6.h>
38
#include <linux/if_packet.h>
39
#include <net/flow.h>
40 41 42
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <linux/netfilter/nf_conntrack_common.h>
#endif
L
Linus Torvalds 已提交
43

44 45 46 47 48 49
/* The interface for checksum offload between the stack and networking drivers
 * is as follows...
 *
 * A. IP checksum related features
 *
 * Drivers advertise checksum offload capabilities in the features of a device.
50 51
 * From the stack's point of view these are capabilities offered by the driver.
 * A driver typically only advertises features that it is capable of offloading
52 53 54 55 56 57 58 59 60 61 62 63 64 65
 * to its device.
 *
 * The checksum related features are:
 *
 *	NETIF_F_HW_CSUM	- The driver (or its device) is able to compute one
 *			  IP (one's complement) checksum for any combination
 *			  of protocols or protocol layering. The checksum is
 *			  computed and set in a packet per the CHECKSUM_PARTIAL
 *			  interface (see below).
 *
 *	NETIF_F_IP_CSUM - Driver (device) is only able to checksum plain
 *			  TCP or UDP packets over IPv4. These are specifically
 *			  unencapsulated packets of the form IPv4|TCP or
 *			  IPv4|UDP where the Protocol field in the IPv4 header
66
 *			  is TCP or UDP. The IPv4 header may contain IP options.
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
 *			  This feature cannot be set in features for a device
 *			  with NETIF_F_HW_CSUM also set. This feature is being
 *			  DEPRECATED (see below).
 *
 *	NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain
 *			  TCP or UDP packets over IPv6. These are specifically
 *			  unencapsulated packets of the form IPv6|TCP or
 *			  IPv4|UDP where the Next Header field in the IPv6
 *			  header is either TCP or UDP. IPv6 extension headers
 *			  are not supported with this feature. This feature
 *			  cannot be set in features for a device with
 *			  NETIF_F_HW_CSUM also set. This feature is being
 *			  DEPRECATED (see below).
 *
 *	NETIF_F_RXCSUM - Driver (device) performs receive checksum offload.
82
 *			 This flag is only used to disable the RX checksum
83 84 85 86 87
 *			 feature for a device. The stack will accept receive
 *			 checksum indication in packets received on a device
 *			 regardless of whether NETIF_F_RXCSUM is set.
 *
 * B. Checksumming of received packets by device. Indication of checksum
88
 *    verification is set in skb->ip_summed. Possible values are:
89 90 91
 *
 * CHECKSUM_NONE:
 *
92
 *   Device did not checksum this packet e.g. due to lack of capabilities.
93 94 95 96 97 98 99
 *   The packet contains full (though not verified) checksum in packet but
 *   not in skb->csum. Thus, skb->csum is undefined in this case.
 *
 * CHECKSUM_UNNECESSARY:
 *
 *   The hardware you're dealing with doesn't calculate the full checksum
 *   (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums
100 101
 *   for specific protocols. For such packets it will set CHECKSUM_UNNECESSARY
 *   if their checksums are okay. skb->csum is still undefined in this case
102 103
 *   though. A driver or device must never modify the checksum field in the
 *   packet even if checksum is verified.
104 105 106 107 108 109 110 111
 *
 *   CHECKSUM_UNNECESSARY is applicable to following protocols:
 *     TCP: IPv6 and IPv4.
 *     UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a
 *       zero UDP checksum for either IPv4 or IPv6, the networking stack
 *       may perform further validation in this case.
 *     GRE: only if the checksum is present in the header.
 *     SCTP: indicates the CRC in SCTP header has been validated.
112
 *     FCOE: indicates the CRC in FC frame has been validated.
113 114 115 116 117
 *
 *   skb->csum_level indicates the number of consecutive checksums found in
 *   the packet minus one that have been verified as CHECKSUM_UNNECESSARY.
 *   For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet
 *   and a device is able to verify the checksums for UDP (possibly zero),
118
 *   GRE (checksum flag is set) and TCP, skb->csum_level would be set to
119
 *   two. If the device were only able to verify the UDP checksum and not
120
 *   GRE, either because it doesn't support GRE checksum or because GRE
121 122
 *   checksum is bad, skb->csum_level would be set to zero (TCP checksum is
 *   not considered in this case).
123 124 125 126
 *
 * CHECKSUM_COMPLETE:
 *
 *   This is the most generic way. The device supplied checksum of the _whole_
127
 *   packet as seen by netif_rx() and fills in skb->csum. This means the
128 129
 *   hardware doesn't need to parse L3/L4 headers to implement this.
 *
130 131 132 133
 *   Notes:
 *   - Even if device supports only some protocols, but is able to produce
 *     skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY.
 *   - CHECKSUM_COMPLETE is not applicable to SCTP and FCoE protocols.
134 135 136
 *
 * CHECKSUM_PARTIAL:
 *
137 138
 *   A checksum is set up to be offloaded to a device as described in the
 *   output description for CHECKSUM_PARTIAL. This may occur on a packet
139
 *   received directly from another Linux OS, e.g., a virtualized Linux kernel
140 141 142 143 144 145
 *   on the same host, or it may be set in the input path in GRO or remote
 *   checksum offload. For the purposes of checksum verification, the checksum
 *   referred to by skb->csum_start + skb->csum_offset and any preceding
 *   checksums in the packet are considered verified. Any checksums in the
 *   packet that are after the checksum being offloaded are not considered to
 *   be verified.
146
 *
147 148
 * C. Checksumming on transmit for non-GSO. The stack requests checksum offload
 *    in the skb->ip_summed for a packet. Values are:
149 150 151
 *
 * CHECKSUM_PARTIAL:
 *
152
 *   The driver is required to checksum the packet as seen by hard_start_xmit()
153
 *   from skb->csum_start up to the end, and to record/write the checksum at
154 155
 *   offset skb->csum_start + skb->csum_offset. A driver may verify that the
 *   csum_start and csum_offset values are valid values given the length and
156 157
 *   offset of the packet, but it should not attempt to validate that the
 *   checksum refers to a legitimate transport layer checksum -- it is the
158 159 160 161 162 163 164 165 166 167
 *   purview of the stack to validate that csum_start and csum_offset are set
 *   correctly.
 *
 *   When the stack requests checksum offload for a packet, the driver MUST
 *   ensure that the checksum is set correctly. A driver can either offload the
 *   checksum calculation to the device, or call skb_checksum_help (in the case
 *   that the device does not support offload for a particular checksum).
 *
 *   NETIF_F_IP_CSUM and NETIF_F_IPV6_CSUM are being deprecated in favor of
 *   NETIF_F_HW_CSUM. New devices should use NETIF_F_HW_CSUM to indicate
168 169 170 171 172
 *   checksum offload capability.
 *   skb_csum_hwoffload_help() can be called to resolve CHECKSUM_PARTIAL based
 *   on network device checksumming capabilities: if a packet does not match
 *   them, skb_checksum_help or skb_crc32c_help (depending on the value of
 *   csum_not_inet, see item D.) is called to resolve the checksum.
173
 *
174
 * CHECKSUM_NONE:
175
 *
176 177
 *   The skb was already checksummed by the protocol, or a checksum is not
 *   required.
178 179 180
 *
 * CHECKSUM_UNNECESSARY:
 *
181
 *   This has the same meaning as CHECKSUM_NONE for checksum offload on
182
 *   output.
183
 *
184 185
 * CHECKSUM_COMPLETE:
 *   Not used in checksum output. If a driver observes a packet with this value
186
 *   set in skbuff, it should treat the packet as if CHECKSUM_NONE were set.
187 188 189 190 191
 *
 * D. Non-IP checksum (CRC) offloads
 *
 *   NETIF_F_SCTP_CRC - This feature indicates that a device is capable of
 *     offloading the SCTP CRC in a packet. To perform this offload the stack
192
 *     will set csum_start and csum_offset accordingly, set ip_summed to
193 194 195 196 197 198
 *     CHECKSUM_PARTIAL and set csum_not_inet to 1, to provide an indication in
 *     the skbuff that the CHECKSUM_PARTIAL refers to CRC32c.
 *     A driver that supports both IP checksum offload and SCTP CRC32c offload
 *     must verify which offload is configured for a packet by testing the
 *     value of skb->csum_not_inet; skb_crc32c_csum_help is provided to resolve
 *     CHECKSUM_PARTIAL on skbs where csum_not_inet is set to 1.
199 200 201 202
 *
 *   NETIF_F_FCOE_CRC - This feature indicates that a device is capable of
 *     offloading the FCOE CRC in a packet. To perform this offload the stack
 *     will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset
203 204
 *     accordingly. Note that there is no indication in the skbuff that the
 *     CHECKSUM_PARTIAL refers to an FCOE checksum, so a driver that supports
205
 *     both IP checksum offload and FCOE CRC offload must verify which offload
206
 *     is configured for a packet, presumably by inspecting packet headers.
207 208 209 210 211 212 213
 *
 * E. Checksumming on output with GSO.
 *
 * In the case of a GSO packet (skb_is_gso(skb) is true), checksum offload
 * is implied by the SKB_GSO_* flags in gso_type. Most obviously, if the
 * gso_type is SKB_GSO_TCPV4 or SKB_GSO_TCPV6, TCP checksum offload as
 * part of the GSO operation is implied. If a checksum is being offloaded
214 215 216
 * with GSO then ip_summed is CHECKSUM_PARTIAL, and both csum_start and
 * csum_offset are set to refer to the outermost checksum being offloaded
 * (two offloaded checksums are possible with UDP encapsulation).
217 218
 */

219
/* Don't change this without changing skb_csum_unnecessary! */
220 221 222 223
#define CHECKSUM_NONE		0
#define CHECKSUM_UNNECESSARY	1
#define CHECKSUM_COMPLETE	2
#define CHECKSUM_PARTIAL	3
L
Linus Torvalds 已提交
224

225 226 227
/* Maximum value in skb->csum_level */
#define SKB_MAX_CSUM_LEVEL	3

228
#define SKB_DATA_ALIGN(X)	ALIGN(X, SMP_CACHE_BYTES)
229
#define SKB_WITH_OVERHEAD(X)	\
230
	((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
231 232
#define SKB_MAX_ORDER(X, ORDER) \
	SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
L
Linus Torvalds 已提交
233 234 235
#define SKB_MAX_HEAD(X)		(SKB_MAX_ORDER((X), 0))
#define SKB_MAX_ALLOC		(SKB_MAX_ORDER(0, 2))

E
Eric Dumazet 已提交
236 237 238 239 240
/* return minimum truesize of one skb containing X bytes of data */
#define SKB_TRUESIZE(X) ((X) +						\
			 SKB_DATA_ALIGN(sizeof(struct sk_buff)) +	\
			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))

L
Linus Torvalds 已提交
241
struct net_device;
242
struct scatterlist;
J
Jens Axboe 已提交
243
struct pipe_inode_info;
H
Herbert Xu 已提交
244
struct iov_iter;
245
struct napi_struct;
246 247
struct bpf_prog;
union bpf_attr;
248
struct skb_ext;
L
Linus Torvalds 已提交
249

250
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
L
Linus Torvalds 已提交
251
struct nf_bridge_info {
252 253 254 255
	enum {
		BRNF_PROTO_UNCHANGED,
		BRNF_PROTO_8021Q,
		BRNF_PROTO_PPPOE
256
	} orig_proto:8;
257 258 259
	u8			pkt_otherhost:1;
	u8			in_prerouting:1;
	u8			bridged_dnat:1;
260
	__u16			frag_max_size;
261
	struct net_device	*physindev;
262 263 264

	/* always valid & non-NULL from FORWARD on, for physdev match */
	struct net_device	*physoutdev;
265
	union {
266
		/* prerouting: detect dnat in orig/reply direction */
267 268
		__be32          ipv4_daddr;
		struct in6_addr ipv6_daddr;
269 270 271 272 273 274

		/* after prerouting + nat detected: store original source
		 * mac since neigh resolution overwrites it, only used while
		 * skb is out in neigh layer.
		 */
		char neigh_header[8];
275
	};
L
Linus Torvalds 已提交
276 277 278
};
#endif

279 280 281 282 283 284 285 286 287 288
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
/* Chain in tc_skb_ext will be used to share the tc chain with
 * ovs recirc_id. It will be set to the current chain by tc
 * and read by ovs to recirc_id.
 */
struct tc_skb_ext {
	__u32 chain;
};
#endif

L
Linus Torvalds 已提交
289 290 291 292 293 294 295 296 297 298 299
struct sk_buff_head {
	/* These two members must be first. */
	struct sk_buff	*next;
	struct sk_buff	*prev;

	__u32		qlen;
	spinlock_t	lock;
};

struct sk_buff;

300 301 302 303 304 305
/* To allow 64K frame to be packed as single skb without frag_list we
 * require 64K/PAGE_SIZE pages plus 1 additional page to allow for
 * buffers which do not start on a page boundary.
 *
 * Since GRO uses frags we allocate at least 16 regardless of page
 * size.
306
 */
307
#if (65536/PAGE_SIZE + 1) < 16
308
#define MAX_SKB_FRAGS 16UL
309
#else
310
#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
311
#endif
H
Hans Westgaard Ry 已提交
312
extern int sysctl_max_skb_frags;
L
Linus Torvalds 已提交
313

314 315 316 317 318
/* Set skb_shinfo(skb)->gso_size to this in case you want skb_segment to
 * segment using its current segmentation instead.
 */
#define GSO_BY_FRAGS	0xFFFF

319
typedef struct bio_vec skb_frag_t;
L
Linus Torvalds 已提交
320

321
/**
322
 * skb_frag_size() - Returns the size of a skb fragment
323 324
 * @frag: skb fragment
 */
E
Eric Dumazet 已提交
325 326
static inline unsigned int skb_frag_size(const skb_frag_t *frag)
{
327
	return frag->bv_len;
E
Eric Dumazet 已提交
328 329
}

330
/**
331
 * skb_frag_size_set() - Sets the size of a skb fragment
332 333 334
 * @frag: skb fragment
 * @size: size of fragment
 */
E
Eric Dumazet 已提交
335 336
static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
{
337
	frag->bv_len = size;
E
Eric Dumazet 已提交
338 339
}

340
/**
341
 * skb_frag_size_add() - Increments the size of a skb fragment by @delta
342 343 344
 * @frag: skb fragment
 * @delta: value to add
 */
E
Eric Dumazet 已提交
345 346
static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
{
347
	frag->bv_len += delta;
E
Eric Dumazet 已提交
348 349
}

350
/**
351
 * skb_frag_size_sub() - Decrements the size of a skb fragment by @delta
352 353 354
 * @frag: skb fragment
 * @delta: value to subtract
 */
E
Eric Dumazet 已提交
355 356
static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
{
357
	frag->bv_len -= delta;
E
Eric Dumazet 已提交
358 359
}

360 361 362 363
/**
 * skb_frag_must_loop - Test if %p is a high memory page
 * @p: fragment's page
 */
364 365 366 367 368 369 370 371 372 373 374 375 376
static inline bool skb_frag_must_loop(struct page *p)
{
#if defined(CONFIG_HIGHMEM)
	if (PageHighMem(p))
		return true;
#endif
	return false;
}

/**
 *	skb_frag_foreach_page - loop over pages in a fragment
 *
 *	@f:		skb frag to operate on
377
 *	@f_off:		offset from start of f->bv_page
378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
 *	@f_len:		length from f_off to loop over
 *	@p:		(temp var) current page
 *	@p_off:		(temp var) offset from start of current page,
 *	                           non-zero only on first page.
 *	@p_len:		(temp var) length in current page,
 *				   < PAGE_SIZE only on first and last page.
 *	@copied:	(temp var) length so far, excluding current p_len.
 *
 *	A fragment can hold a compound page, in which case per-page
 *	operations, notably kmap_atomic, must be called for each
 *	regular page.
 */
#define skb_frag_foreach_page(f, f_off, f_len, p, p_off, p_len, copied)	\
	for (p = skb_frag_page(f) + ((f_off) >> PAGE_SHIFT),		\
	     p_off = (f_off) & (PAGE_SIZE - 1),				\
	     p_len = skb_frag_must_loop(p) ?				\
	     min_t(u32, f_len, PAGE_SIZE - p_off) : f_len,		\
	     copied = 0;						\
	     copied < f_len;						\
	     copied += p_len, p++, p_off = 0,				\
	     p_len = min_t(u32, f_len - copied, PAGE_SIZE))		\

400 401 402
#define HAVE_HW_TIME_STAMP

/**
403
 * struct skb_shared_hwtstamps - hardware time stamps
404 405 406 407
 * @hwtstamp:	hardware time stamp transformed into duration
 *		since arbitrary point in time
 *
 * Software time stamps generated by ktime_get_real() are stored in
408
 * skb->tstamp.
409 410 411 412 413 414 415 416 417 418 419
 *
 * hwtstamps can only be compared against other hwtstamps from
 * the same device.
 *
 * This structure is attached to packets as part of the
 * &skb_shared_info. Use skb_hwtstamps() to get a pointer.
 */
struct skb_shared_hwtstamps {
	ktime_t	hwtstamp;
};

420 421 422 423 424
/* Definitions for tx_flags in struct skb_shared_info */
enum {
	/* generate hardware time stamp */
	SKBTX_HW_TSTAMP = 1 << 0,

425
	/* generate software time stamp when queueing packet to NIC */
426 427 428 429 430
	SKBTX_SW_TSTAMP = 1 << 1,

	/* device driver is going to provide hardware time stamp */
	SKBTX_IN_PROGRESS = 1 << 2,

431
	/* device driver supports TX zero-copy buffers */
E
Eric Dumazet 已提交
432
	SKBTX_DEV_ZEROCOPY = 1 << 3,
433 434

	/* generate wifi status information (where possible) */
E
Eric Dumazet 已提交
435
	SKBTX_WIFI_STATUS = 1 << 4,
436 437 438 439 440 441 442

	/* This indicates at least one fragment might be overwritten
	 * (as in vmsplice(), sendfile() ...)
	 * If we need to compute a TX checksum, we'll need to copy
	 * all frags to avoid possible bad checksum
	 */
	SKBTX_SHARED_FRAG = 1 << 5,
443 444 445

	/* generate software time stamp when entering packet scheduling */
	SKBTX_SCHED_TSTAMP = 1 << 6,
446 447
};

W
Willem de Bruijn 已提交
448
#define SKBTX_ZEROCOPY_FRAG	(SKBTX_DEV_ZEROCOPY | SKBTX_SHARED_FRAG)
449
#define SKBTX_ANY_SW_TSTAMP	(SKBTX_SW_TSTAMP    | \
450
				 SKBTX_SCHED_TSTAMP)
451 452
#define SKBTX_ANY_TSTAMP	(SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)

453 454 455
/*
 * The callback notifies userspace to release buffers when skb DMA is done in
 * lower device, the skb last reference should be 0 when calling this.
456 457
 * The zerocopy_success argument is true if zero copy transmit occurred,
 * false on data copy or out of memory error caused by data copy attempt.
458 459
 * The ctx field is used to track device context.
 * The desc field is used to track userspace buffer index.
460 461
 */
struct ubuf_info {
462
	void (*callback)(struct ubuf_info *, bool zerocopy_success);
463 464 465 466 467 468 469 470 471 472 473 474
	union {
		struct {
			unsigned long desc;
			void *ctx;
		};
		struct {
			u32 id;
			u16 len;
			u16 zerocopy:1;
			u32 bytelen;
		};
	};
475
	refcount_t refcnt;
476 477 478 479 480

	struct mmpin {
		struct user_struct *user;
		unsigned int num_pg;
	} mmp;
481 482
};

W
Willem de Bruijn 已提交
483 484
#define skb_uarg(SKB)	((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))

485 486 487
int mm_account_pinned_pages(struct mmpin *mmp, size_t size);
void mm_unaccount_pinned_pages(struct mmpin *mmp);

W
Willem de Bruijn 已提交
488
struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size);
489 490
struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
					struct ubuf_info *uarg);
W
Willem de Bruijn 已提交
491 492 493

static inline void sock_zerocopy_get(struct ubuf_info *uarg)
{
494
	refcount_inc(&uarg->refcnt);
W
Willem de Bruijn 已提交
495 496 497
}

void sock_zerocopy_put(struct ubuf_info *uarg);
498
void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
W
Willem de Bruijn 已提交
499 500 501

void sock_zerocopy_callback(struct ubuf_info *uarg, bool success);

W
Willem de Bruijn 已提交
502
int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len);
W
Willem de Bruijn 已提交
503 504 505 506
int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
			     struct msghdr *msg, int len,
			     struct ubuf_info *uarg);

L
Linus Torvalds 已提交
507 508 509 510
/* This data is invariant across clones and lives at
 * the end of the header data, ie. at skb->end.
 */
struct skb_shared_info {
511 512 513
	__u8		__unused;
	__u8		meta_len;
	__u8		nr_frags;
514
	__u8		tx_flags;
515 516 517
	unsigned short	gso_size;
	/* Warning: this field is not always filled in (UFO)! */
	unsigned short	gso_segs;
L
Linus Torvalds 已提交
518
	struct sk_buff	*frag_list;
519
	struct skb_shared_hwtstamps hwtstamps;
520
	unsigned int	gso_type;
521
	u32		tskey;
E
Eric Dumazet 已提交
522 523 524 525 526 527

	/*
	 * Warning : all fields before dataref are cleared in __alloc_skb()
	 */
	atomic_t	dataref;

J
Johann Baudy 已提交
528 529 530
	/* Intermediate layers must ensure that destructor_arg
	 * remains valid until skb destructor */
	void *		destructor_arg;
531

532 533
	/* must be last field, see pskb_expand_head() */
	skb_frag_t	frags[MAX_SKB_FRAGS];
L
Linus Torvalds 已提交
534 535 536 537
};

/* We divide dataref into two halves.  The higher 16 bits hold references
 * to the payload part of skb->data.  The lower 16 bits hold references to
538 539
 * the entire skb->data.  A clone of a headerless skb holds the length of
 * the header in skb->hdr_len.
L
Linus Torvalds 已提交
540 541 542 543 544 545 546 547 548 549
 *
 * All users must obey the rule that the skb->data reference count must be
 * greater than or equal to the payload reference count.
 *
 * Holding a reference to the payload part means that the user does not
 * care about modifications to the header part of skb->data.
 */
#define SKB_DATAREF_SHIFT 16
#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)

550 551

enum {
552 553 554
	SKB_FCLONE_UNAVAILABLE,	/* skb has no fclone (from head_cache) */
	SKB_FCLONE_ORIG,	/* orig skb (from fclone_cache) */
	SKB_FCLONE_CLONE,	/* companion fclone skb (from fclone_cache) */
555 556
};

557 558
enum {
	SKB_GSO_TCPV4 = 1 << 0,
559 560

	/* This indicates the skb is from an untrusted source. */
561
	SKB_GSO_DODGY = 1 << 1,
M
Michael Chan 已提交
562 563

	/* This indicates the tcp segment has CWR set. */
564
	SKB_GSO_TCP_ECN = 1 << 2,
H
Herbert Xu 已提交
565

566
	SKB_GSO_TCP_FIXEDID = 1 << 3,
567

568
	SKB_GSO_TCPV6 = 1 << 4,
569

570
	SKB_GSO_FCOE = 1 << 5,
571

572
	SKB_GSO_GRE = 1 << 6,
S
Simon Horman 已提交
573

574
	SKB_GSO_GRE_CSUM = 1 << 7,
E
Eric Dumazet 已提交
575

576
	SKB_GSO_IPXIP4 = 1 << 8,
E
Eric Dumazet 已提交
577

578
	SKB_GSO_IPXIP6 = 1 << 9,
579

580
	SKB_GSO_UDP_TUNNEL = 1 << 10,
T
Tom Herbert 已提交
581

582
	SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
583

584
	SKB_GSO_PARTIAL = 1 << 12,
585

586
	SKB_GSO_TUNNEL_REMCSUM = 1 << 13,
M
Marcelo Ricardo Leitner 已提交
587

588
	SKB_GSO_SCTP = 1 << 14,
S
Steffen Klassert 已提交
589

590
	SKB_GSO_ESP = 1 << 15,
591 592

	SKB_GSO_UDP = 1 << 16,
W
Willem de Bruijn 已提交
593 594

	SKB_GSO_UDP_L4 = 1 << 17,
595 596

	SKB_GSO_FRAGLIST = 1 << 18,
597 598
};

599 600 601 602 603 604 605 606 607 608
#if BITS_PER_LONG > 32
#define NET_SKBUFF_DATA_USES_OFFSET 1
#endif

#ifdef NET_SKBUFF_DATA_USES_OFFSET
typedef unsigned int sk_buff_data_t;
#else
typedef unsigned char *sk_buff_data_t;
#endif

609
/**
L
Linus Torvalds 已提交
610 611 612
 *	struct sk_buff - socket buffer
 *	@next: Next buffer in list
 *	@prev: Previous buffer in list
613
 *	@tstamp: Time we arrived/left
614 615
 *	@skb_mstamp_ns: (aka @tstamp) earliest departure time; start point
 *		for retransmit timer
E
Eric Dumazet 已提交
616
 *	@rbnode: RB tree node, alternative to next/prev for netem/tcp
617
 *	@list: queue head
618
 *	@sk: Socket we are owned by
619 620
 *	@ip_defrag_offset: (aka @sk) alternate use of @sk, used in
 *		fragmentation management
L
Linus Torvalds 已提交
621
 *	@dev: Device we arrived on/are leaving by
622
 *	@dev_scratch: (aka @dev) alternate use of @dev when @dev would be %NULL
623
 *	@cb: Control buffer. Free for use by every layer. Put private vars here
E
Eric Dumazet 已提交
624
 *	@_skb_refdst: destination entry (with norefcount bit)
625
 *	@sp: the security path, used for xfrm
L
Linus Torvalds 已提交
626 627 628
 *	@len: Length of actual data
 *	@data_len: Data length
 *	@mac_len: Length of link layer header
629
 *	@hdr_len: writable header length of cloned skb
630 631 632
 *	@csum: Checksum (must include start/offset pair)
 *	@csum_start: Offset from skb->head where checksumming should start
 *	@csum_offset: Offset from csum_start where checksum should be stored
633
 *	@priority: Packet queueing priority
W
WANG Cong 已提交
634
 *	@ignore_df: allow local fragmentation
L
Linus Torvalds 已提交
635
 *	@cloned: Head may be cloned (check refcnt to be sure)
636
 *	@ip_summed: Driver fed us an IP checksum
L
Linus Torvalds 已提交
637 638
 *	@nohdr: Payload reference only, must not modify header
 *	@pkt_type: Packet class
639 640
 *	@fclone: skbuff clone status
 *	@ipvs_property: skbuff is owned by ipvs
641 642 643
 *	@inner_protocol_type: whether the inner protocol is
 *		ENCAP_TYPE_ETHER or ENCAP_TYPE_IPPROTO
 *	@remcsum_offload: remote checksum offload is enabled
644 645
 *	@offload_fwd_mark: Packet was L2-forwarded in hardware
 *	@offload_l3_fwd_mark: Packet was L3-forwarded in hardware
646
 *	@tc_skip_classify: do not classify packet. set by IFB device
647
 *	@tc_at_ingress: used within tc_classify to distinguish in/egress
648 649
 *	@redirected: packet was redirected by packet classifier
 *	@from_ingress: packet was redirected from the ingress path
650 651
 *	@peeked: this packet has been seen already, so stats have been
 *		done for it, don't do them again
652
 *	@nf_trace: netfilter packet trace flag
653 654
 *	@protocol: Packet protocol from driver
 *	@destructor: Destruct function
655
 *	@tcp_tsorted_anchor: list structure for TCP (tp->tsorted_sent_queue)
656
 *	@_nfct: Associated connection, if any (with nfctinfo bits)
L
Linus Torvalds 已提交
657
 *	@nf_bridge: Saved data about a bridged frame - see br_netfilter.c
658
 *	@skb_iif: ifindex of device we arrived on
L
Linus Torvalds 已提交
659
 *	@tc_index: Traffic control index
660
 *	@hash: the packet hash
661
 *	@queue_mapping: Queue mapping for multiqueue devices
662 663
 *	@head_frag: skb was allocated from page fragments,
 *		not allocated by kmalloc() or vmalloc().
664
 *	@pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
665
 *	@active_extensions: active extensions (skb_ext_id types)
666
 *	@ndisc_nodetype: router type (from link layer)
667
 *	@ooo_okay: allow the mapping of a socket to a queue to be changed
668
 *	@l4_hash: indicate hash is a canonical 4-tuple hash over transport
669
 *		ports.
670
 *	@sw_hash: indicates hash was computed in software stack
671 672
 *	@wifi_acked_valid: wifi_acked was set
 *	@wifi_acked: whether frame was acked on wifi or not
673
 *	@no_fcs:  Request NIC to treat last 4 bytes as Ethernet FCS
674 675 676
 *	@encapsulation: indicates the inner headers in the skbuff are valid
 *	@encap_hdr_csum: software checksum is needed
 *	@csum_valid: checksum is already valid
677
 *	@csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL
678 679 680 681
 *	@csum_complete_sw: checksum was completed by software
 *	@csum_level: indicates the number of consecutive checksums found in
 *		the packet minus one that have been verified as
 *		CHECKSUM_UNNECESSARY (max 3)
682
 *	@dst_pending_confirm: need to confirm neighbour
683
 *	@decrypted: Decrypted SKB
684
 *	@napi_id: id of the NAPI struct this skb came from
685
 *	@sender_cpu: (aka @napi_id) source CPU in XPS
686
 *	@secmark: security marking
687
 *	@mark: Generic packet mark
688 689 690
 *	@reserved_tailroom: (aka @mark) number of bytes of free space available
 *		at the tail of an sk_buff
 *	@vlan_present: VLAN tag is present
691
 *	@vlan_proto: vlan encapsulation protocol
692
 *	@vlan_tci: vlan tag control information
S
Simon Horman 已提交
693
 *	@inner_protocol: Protocol (encapsulation)
694 695
 *	@inner_ipproto: (aka @inner_protocol) stores ipproto when
 *		skb->inner_protocol_type == ENCAP_TYPE_IPPROTO;
696 697
 *	@inner_transport_header: Inner transport layer header (encapsulation)
 *	@inner_network_header: Network layer header (encapsulation)
698
 *	@inner_mac_header: Link layer header (encapsulation)
699 700 701 702 703 704 705 706 707
 *	@transport_header: Transport layer header
 *	@network_header: Network layer header
 *	@mac_header: Link layer header
 *	@tail: Tail pointer
 *	@end: End pointer
 *	@head: Head of buffer
 *	@data: Data head pointer
 *	@truesize: Buffer size
 *	@users: User count - see {datagram,tcp}.c
708
 *	@extensions: allocated extensions, valid if active_extensions is nonzero
L
Linus Torvalds 已提交
709 710 711
 */

struct sk_buff {
712
	union {
E
Eric Dumazet 已提交
713 714 715 716 717 718
		struct {
			/* These two members must be first. */
			struct sk_buff		*next;
			struct sk_buff		*prev;

			union {
E
Eric Dumazet 已提交
719 720 721 722 723 724
				struct net_device	*dev;
				/* Some protocols might use this space to store information,
				 * while device pointer would be NULL.
				 * UDP receive path is one user.
				 */
				unsigned long		dev_scratch;
E
Eric Dumazet 已提交
725 726
			};
		};
727
		struct rb_node		rbnode; /* used in netem, ip4 defrag, and tcp stack */
728
		struct list_head	list;
729
	};
730 731 732 733 734

	union {
		struct sock		*sk;
		int			ip_defrag_offset;
	};
L
Linus Torvalds 已提交
735

736
	union {
E
Eric Dumazet 已提交
737
		ktime_t		tstamp;
738
		u64		skb_mstamp_ns; /* earliest departure time */
739
	};
L
Linus Torvalds 已提交
740 741 742 743 744 745
	/*
	 * This is the control buffer. It is free to use for every
	 * layer. Please put your private variables there. If you
	 * want to keep them across layers you have to do a skb_clone()
	 * first. This is owned by whoever has the skb queued ATM.
	 */
746
	char			cb[48] __aligned(8);
L
Linus Torvalds 已提交
747

748 749 750 751 752 753 754 755
	union {
		struct {
			unsigned long	_skb_refdst;
			void		(*destructor)(struct sk_buff *skb);
		};
		struct list_head	tcp_tsorted_anchor;
	};

756
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
757
	unsigned long		 _nfct;
758
#endif
L
Linus Torvalds 已提交
759
	unsigned int		len,
760 761 762
				data_len;
	__u16			mac_len,
				hdr_len;
763 764 765 766 767

	/* Following fields are _not_ copied in __copy_skb_header()
	 * Note that queue_mapping is here mostly to fill a hole.
	 */
	__u16			queue_mapping;
768 769 770 771 772 773 774 775 776

/* if you move cloned around you also must adapt those constants */
#ifdef __BIG_ENDIAN_BITFIELD
#define CLONED_MASK	(1 << 7)
#else
#define CLONED_MASK	1
#endif
#define CLONED_OFFSET()		offsetof(struct sk_buff, __cloned_offset)

777
	/* private: */
778
	__u8			__cloned_offset[0];
779
	/* public: */
780
	__u8			cloned:1,
781
				nohdr:1,
782
				fclone:2,
783
				peeked:1,
784
				head_frag:1,
785
				pfmemalloc:1;
786 787 788
#ifdef CONFIG_SKB_EXTENSIONS
	__u8			active_extensions;
#endif
789 790 791
	/* fields enclosed in headers_start/headers_end are copied
	 * using a single memcpy() in __copy_skb_header()
	 */
792
	/* private: */
793
	__u32			headers_start[0];
794
	/* public: */
795

796 797 798 799 800
/* if you move pkt_type around you also must adapt those constants */
#ifdef __BIG_ENDIAN_BITFIELD
#define PKT_TYPE_MAX	(7 << 5)
#else
#define PKT_TYPE_MAX	7
L
Linus Torvalds 已提交
801
#endif
802
#define PKT_TYPE_OFFSET()	offsetof(struct sk_buff, __pkt_type_offset)
803

804
	/* private: */
805
	__u8			__pkt_type_offset[0];
806
	/* public: */
807 808 809 810
	__u8			pkt_type:3;
	__u8			ignore_df:1;
	__u8			nf_trace:1;
	__u8			ip_summed:2;
811
	__u8			ooo_okay:1;
812

813
	__u8			l4_hash:1;
814
	__u8			sw_hash:1;
815 816
	__u8			wifi_acked_valid:1;
	__u8			wifi_acked:1;
817
	__u8			no_fcs:1;
818
	/* Indicates the inner headers are valid in the skbuff. */
819
	__u8			encapsulation:1;
820
	__u8			encap_hdr_csum:1;
821
	__u8			csum_valid:1;
822

M
Michał Mirosław 已提交
823 824 825 826 827 828
#ifdef __BIG_ENDIAN_BITFIELD
#define PKT_VLAN_PRESENT_BIT	7
#else
#define PKT_VLAN_PRESENT_BIT	0
#endif
#define PKT_VLAN_PRESENT_OFFSET()	offsetof(struct sk_buff, __pkt_vlan_present_offset)
829
	/* private: */
M
Michał Mirosław 已提交
830
	__u8			__pkt_vlan_present_offset[0];
831
	/* public: */
M
Michał Mirosław 已提交
832
	__u8			vlan_present:1;
833
	__u8			csum_complete_sw:1;
834
	__u8			csum_level:2;
835
	__u8			csum_not_inet:1;
836
	__u8			dst_pending_confirm:1;
837 838 839
#ifdef CONFIG_IPV6_NDISC_NODETYPE
	__u8			ndisc_nodetype:2;
#endif
840

M
Michał Mirosław 已提交
841
	__u8			ipvs_property:1;
T
Tom Herbert 已提交
842
	__u8			inner_protocol_type:1;
843
	__u8			remcsum_offload:1;
844 845
#ifdef CONFIG_NET_SWITCHDEV
	__u8			offload_fwd_mark:1;
846
	__u8			offload_l3_fwd_mark:1;
847
#endif
848 849
#ifdef CONFIG_NET_CLS_ACT
	__u8			tc_skip_classify:1;
850
	__u8			tc_at_ingress:1;
851 852 853 854
#endif
#ifdef CONFIG_NET_REDIRECT
	__u8			redirected:1;
	__u8			from_ingress:1;
855
#endif
856 857 858
#ifdef CONFIG_TLS_DEVICE
	__u8			decrypted:1;
#endif
859 860 861 862

#ifdef CONFIG_NET_SCHED
	__u16			tc_index;	/* traffic control index */
#endif
863

864 865 866 867 868 869 870 871 872 873 874 875
	union {
		__wsum		csum;
		struct {
			__u16	csum_start;
			__u16	csum_offset;
		};
	};
	__u32			priority;
	int			skb_iif;
	__u32			hash;
	__be16			vlan_proto;
	__u16			vlan_tci;
E
Eric Dumazet 已提交
876 877 878 879 880
#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
	union {
		unsigned int	napi_id;
		unsigned int	sender_cpu;
	};
881
#endif
882
#ifdef CONFIG_NETWORK_SECMARK
883
	__u32		secmark;
884 885
#endif

886 887
	union {
		__u32		mark;
E
Eric Dumazet 已提交
888
		__u32		reserved_tailroom;
889
	};
L
Linus Torvalds 已提交
890

T
Tom Herbert 已提交
891 892 893 894 895
	union {
		__be16		inner_protocol;
		__u8		inner_ipproto;
	};

896 897 898
	__u16			inner_transport_header;
	__u16			inner_network_header;
	__u16			inner_mac_header;
899 900

	__be16			protocol;
901 902 903
	__u16			transport_header;
	__u16			network_header;
	__u16			mac_header;
904

905
	/* private: */
906
	__u32			headers_end[0];
907
	/* public: */
908

L
Linus Torvalds 已提交
909
	/* These elements must be at the end, see alloc_skb() for details.  */
910
	sk_buff_data_t		tail;
911
	sk_buff_data_t		end;
L
Linus Torvalds 已提交
912
	unsigned char		*head,
913
				*data;
914
	unsigned int		truesize;
915
	refcount_t		users;
916 917 918 919 920

#ifdef CONFIG_SKB_EXTENSIONS
	/* only useable after checking ->active_extensions != 0 */
	struct skb_ext		*extensions;
#endif
L
Linus Torvalds 已提交
921 922 923 924 925 926 927
};

#ifdef __KERNEL__
/*
 *	Handling routines are only of interest to the kernel
 */

928 929
#define SKB_ALLOC_FCLONE	0x01
#define SKB_ALLOC_RX		0x02
930
#define SKB_ALLOC_NAPI		0x04
931

932 933 934 935
/**
 * skb_pfmemalloc - Test if the skb was allocated from PFMEMALLOC reserves
 * @skb: buffer
 */
936 937 938 939 940
static inline bool skb_pfmemalloc(const struct sk_buff *skb)
{
	return unlikely(skb->pfmemalloc);
}

E
Eric Dumazet 已提交
941 942 943 944 945 946 947 948 949 950 951 952 953
/*
 * skb might have a dst pointer attached, refcounted or not.
 * _skb_refdst low order bit is set if refcount was _not_ taken
 */
#define SKB_DST_NOREF	1UL
#define SKB_DST_PTRMASK	~(SKB_DST_NOREF)

/**
 * skb_dst - returns skb dst_entry
 * @skb: buffer
 *
 * Returns skb dst_entry, regardless of reference taken or not.
 */
E
Eric Dumazet 已提交
954 955
static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
{
956
	/* If refdst was not refcounted, check we still are in a
E
Eric Dumazet 已提交
957 958 959 960 961 962
	 * rcu_read_lock section
	 */
	WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
		!rcu_read_lock_held() &&
		!rcu_read_lock_bh_held());
	return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
E
Eric Dumazet 已提交
963 964
}

E
Eric Dumazet 已提交
965 966 967 968 969 970 971 972
/**
 * skb_dst_set - sets skb dst
 * @skb: buffer
 * @dst: dst entry
 *
 * Sets skb dst, assuming a reference was taken on dst and should
 * be released by skb_dst_drop()
 */
E
Eric Dumazet 已提交
973 974
static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
{
E
Eric Dumazet 已提交
975 976 977
	skb->_skb_refdst = (unsigned long)dst;
}

978 979 980 981 982 983 984 985 986 987 988 989
/**
 * skb_dst_set_noref - sets skb dst, hopefully, without taking reference
 * @skb: buffer
 * @dst: dst entry
 *
 * Sets skb dst, assuming a reference was not taken on dst.
 * If dst entry is cached, we do not take reference and dst_release
 * will be avoided by refdst_drop. If dst entry is not cached, we take
 * reference, so that last dst_release can destroy the dst immediately.
 */
static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
{
990 991
	WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
	skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
992
}
E
Eric Dumazet 已提交
993 994

/**
L
Lucas De Marchi 已提交
995
 * skb_dst_is_noref - Test if skb dst isn't refcounted
E
Eric Dumazet 已提交
996 997 998 999 1000
 * @skb: buffer
 */
static inline bool skb_dst_is_noref(const struct sk_buff *skb)
{
	return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
E
Eric Dumazet 已提交
1001 1002
}

1003 1004 1005 1006
/**
 * skb_rtable - Returns the skb &rtable
 * @skb: buffer
 */
E
Eric Dumazet 已提交
1007 1008
static inline struct rtable *skb_rtable(const struct sk_buff *skb)
{
E
Eric Dumazet 已提交
1009
	return (struct rtable *)skb_dst(skb);
E
Eric Dumazet 已提交
1010 1011
}

1012 1013 1014 1015 1016 1017 1018 1019 1020
/* For mangling skb->pkt_type from user space side from applications
 * such as nft, tc, etc, we only allow a conservative subset of
 * possible pkt_types to be set.
*/
static inline bool skb_pkt_type_ok(u32 ptype)
{
	return ptype <= PACKET_OTHERHOST;
}

1021 1022 1023 1024
/**
 * skb_napi_id - Returns the skb's NAPI id
 * @skb: buffer
 */
1025 1026 1027 1028 1029 1030 1031 1032 1033
static inline unsigned int skb_napi_id(const struct sk_buff *skb)
{
#ifdef CONFIG_NET_RX_BUSY_POLL
	return skb->napi_id;
#else
	return 0;
#endif
}

1034 1035 1036 1037 1038 1039
/**
 * skb_unref - decrement the skb's reference count
 * @skb: buffer
 *
 * Returns true if we can free the skb.
 */
1040 1041 1042 1043
static inline bool skb_unref(struct sk_buff *skb)
{
	if (unlikely(!skb))
		return false;
1044
	if (likely(refcount_read(&skb->users) == 1))
1045
		smp_rmb();
1046
	else if (likely(!refcount_dec_and_test(&skb->users)))
1047 1048 1049 1050 1051
		return false;

	return true;
}

P
Paolo Abeni 已提交
1052
void skb_release_head_state(struct sk_buff *skb);
1053 1054
void kfree_skb(struct sk_buff *skb);
void kfree_skb_list(struct sk_buff *segs);
1055
void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt);
1056 1057
void skb_tx_error(struct sk_buff *skb);
void consume_skb(struct sk_buff *skb);
1058
void __consume_stateless_skb(struct sk_buff *skb);
1059
void  __kfree_skb(struct sk_buff *skb);
1060
extern struct kmem_cache *skbuff_head_cache;
E
Eric Dumazet 已提交
1061

1062 1063 1064
void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
		      bool *fragstolen, int *delta_truesize);
E
Eric Dumazet 已提交
1065

1066 1067
struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
			    int node);
E
Eric Dumazet 已提交
1068
struct sk_buff *__build_skb(void *data, unsigned int frag_size);
1069
struct sk_buff *build_skb(void *data, unsigned int frag_size);
1070 1071
struct sk_buff *build_skb_around(struct sk_buff *skb,
				 void *data, unsigned int frag_size);
1072 1073 1074 1075 1076 1077 1078 1079

/**
 * alloc_skb - allocate a network buffer
 * @size: size to allocate
 * @priority: allocation mask
 *
 * This function is a convenient wrapper around __alloc_skb().
 */
1080
static inline struct sk_buff *alloc_skb(unsigned int size,
A
Al Viro 已提交
1081
					gfp_t priority)
1082
{
E
Eric Dumazet 已提交
1083
	return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
1084 1085
}

1086 1087 1088 1089 1090
struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
				     unsigned long data_len,
				     int max_page_order,
				     int *errcode,
				     gfp_t gfp_mask);
1091
struct sk_buff *alloc_skb_for_msg(struct sk_buff *first);
1092

1093 1094 1095 1096 1097 1098
/* Layout of fast clones : [skb1][skb2][fclone_ref] */
struct sk_buff_fclones {
	struct sk_buff	skb1;

	struct sk_buff	skb2;

1099
	refcount_t	fclone_ref;
1100 1101 1102 1103
};

/**
 *	skb_fclone_busy - check if fclone is busy
1104
 *	@sk: socket
1105 1106
 *	@skb: buffer
 *
M
Masanari Iida 已提交
1107
 * Returns true if skb is a fast clone, and its clone is not freed.
1108 1109
 * Some drivers call skb_orphan() in their ndo_start_xmit(),
 * so we also check that this didnt happen.
1110
 */
1111 1112
static inline bool skb_fclone_busy(const struct sock *sk,
				   const struct sk_buff *skb)
1113 1114 1115 1116 1117 1118
{
	const struct sk_buff_fclones *fclones;

	fclones = container_of(skb, struct sk_buff_fclones, skb1);

	return skb->fclone == SKB_FCLONE_ORIG &&
1119
	       refcount_read(&fclones->fclone_ref) > 1 &&
1120
	       fclones->skb2.sk == sk;
1121 1122
}

1123 1124 1125 1126 1127 1128 1129
/**
 * alloc_skb_fclone - allocate a network buffer from fclone cache
 * @size: size to allocate
 * @priority: allocation mask
 *
 * This function is a convenient wrapper around __alloc_skb().
 */
1130
static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
A
Al Viro 已提交
1131
					       gfp_t priority)
1132
{
1133
	return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
1134 1135
}

1136
struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
1137
void skb_headers_offset_update(struct sk_buff *skb, int off);
1138 1139
int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
1140
void skb_copy_header(struct sk_buff *new, const struct sk_buff *old);
1141
struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
1142 1143 1144 1145 1146 1147 1148
struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
				   gfp_t gfp_mask, bool fclone);
static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
					  gfp_t gfp_mask)
{
	return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
}
1149 1150 1151 1152 1153 1154

int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
				     unsigned int headroom);
struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
				int newtailroom, gfp_t priority);
1155 1156 1157 1158
int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
				     int offset, int len);
int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
			      int offset, int len);
1159
int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176
int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error);

/**
 *	skb_pad			-	zero pad the tail of an skb
 *	@skb: buffer to pad
 *	@pad: space to pad
 *
 *	Ensure that a buffer is followed by a padding area that is zero
 *	filled. Used by network drivers which may DMA or transfer data
 *	beyond the buffer end onto the wire.
 *
 *	May return error in out of memory cases. The skb is freed on error.
 */
static inline int skb_pad(struct sk_buff *skb, int pad)
{
	return __skb_pad(skb, pad, true);
}
1177
#define dev_kfree_skb(a)	consume_skb(a)
L
Linus Torvalds 已提交
1178

1179 1180 1181
int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
			 int offset, size_t size);

E
Eric Dumazet 已提交
1182
struct skb_seq_state {
1183 1184 1185 1186 1187 1188 1189 1190 1191
	__u32		lower_offset;
	__u32		upper_offset;
	__u32		frag_idx;
	__u32		stepped_offset;
	struct sk_buff	*root_skb;
	struct sk_buff	*cur_skb;
	__u8		*frag_data;
};

1192 1193 1194 1195 1196
void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
			  unsigned int to, struct skb_seq_state *st);
unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
			  struct skb_seq_state *st);
void skb_abort_seq_read(struct skb_seq_state *st);
1197

1198
unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
1199
			   unsigned int to, struct ts_config *config);
1200

T
Tom Herbert 已提交
1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233
/*
 * Packet hash types specify the type of hash in skb_set_hash.
 *
 * Hash types refer to the protocol layer addresses which are used to
 * construct a packet's hash. The hashes are used to differentiate or identify
 * flows of the protocol layer for the hash type. Hash types are either
 * layer-2 (L2), layer-3 (L3), or layer-4 (L4).
 *
 * Properties of hashes:
 *
 * 1) Two packets in different flows have different hash values
 * 2) Two packets in the same flow should have the same hash value
 *
 * A hash at a higher layer is considered to be more specific. A driver should
 * set the most specific hash possible.
 *
 * A driver cannot indicate a more specific hash than the layer at which a hash
 * was computed. For instance an L3 hash cannot be set as an L4 hash.
 *
 * A driver may indicate a hash level which is less specific than the
 * actual layer the hash was computed on. For instance, a hash computed
 * at L4 may be considered an L3 hash. This should only be done if the
 * driver can't unambiguously determine that the HW computed the hash at
 * the higher layer. Note that the "should" in the second property above
 * permits this.
 */
enum pkt_hash_types {
	PKT_HASH_TYPE_NONE,	/* Undefined type */
	PKT_HASH_TYPE_L2,	/* Input: src_MAC, dest_MAC */
	PKT_HASH_TYPE_L3,	/* Input: src_IP, dst_IP */
	PKT_HASH_TYPE_L4,	/* Input: src_IP, dst_IP, src_port, dst_port */
};

1234
static inline void skb_clear_hash(struct sk_buff *skb)
T
Tom Herbert 已提交
1235
{
1236
	skb->hash = 0;
1237
	skb->sw_hash = 0;
1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251
	skb->l4_hash = 0;
}

static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
{
	if (!skb->l4_hash)
		skb_clear_hash(skb);
}

static inline void
__skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
{
	skb->l4_hash = is_l4;
	skb->sw_hash = is_sw;
1252
	skb->hash = hash;
T
Tom Herbert 已提交
1253 1254
}

1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267
static inline void
skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
{
	/* Used by drivers to set hash from HW */
	__skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
}

static inline void
__skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
{
	__skb_set_hash(skb, hash, true, is_l4);
}

1268
void __skb_get_hash(struct sk_buff *skb);
1269
u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
1270 1271
u32 skb_get_poff(const struct sk_buff *skb);
u32 __skb_get_poff(const struct sk_buff *skb, void *data,
1272
		   const struct flow_keys_basic *keys, int hlen);
1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285
__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
			    void *data, int hlen_proto);

static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
					int thoff, u8 ip_proto)
{
	return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
}

void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
			     const struct flow_dissector_key *key,
			     unsigned int key_count);

1286 1287
struct bpf_flow_dissector;
bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
1288
		      __be16 proto, int nhoff, int hlen, unsigned int flags);
1289

1290 1291
bool __skb_flow_dissect(const struct net *net,
			const struct sk_buff *skb,
1292 1293
			struct flow_dissector *flow_dissector,
			void *target_container,
1294 1295
			void *data, __be16 proto, int nhoff, int hlen,
			unsigned int flags);
1296 1297 1298

static inline bool skb_flow_dissect(const struct sk_buff *skb,
				    struct flow_dissector *flow_dissector,
1299
				    void *target_container, unsigned int flags)
1300
{
1301 1302
	return __skb_flow_dissect(NULL, skb, flow_dissector,
				  target_container, NULL, 0, 0, 0, flags);
1303 1304 1305
}

static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
1306 1307
					      struct flow_keys *flow,
					      unsigned int flags)
1308 1309
{
	memset(flow, 0, sizeof(*flow));
1310 1311
	return __skb_flow_dissect(NULL, skb, &flow_keys_dissector,
				  flow, NULL, 0, 0, 0, flags);
1312 1313
}

1314
static inline bool
1315 1316
skb_flow_dissect_flow_keys_basic(const struct net *net,
				 const struct sk_buff *skb,
1317 1318 1319
				 struct flow_keys_basic *flow, void *data,
				 __be16 proto, int nhoff, int hlen,
				 unsigned int flags)
1320 1321
{
	memset(flow, 0, sizeof(*flow));
1322
	return __skb_flow_dissect(net, skb, &flow_keys_basic_dissector, flow,
1323
				  data, proto, nhoff, hlen, flags);
1324 1325
}

1326 1327 1328 1329
void skb_flow_dissect_meta(const struct sk_buff *skb,
			   struct flow_dissector *flow_dissector,
			   void *target_container);

1330 1331 1332 1333 1334 1335 1336 1337 1338 1339
/* Gets a skb connection tracking info, ctinfo map should be a
 * a map of mapsize to translate enum ip_conntrack_info states
 * to user states.
 */
void
skb_flow_dissect_ct(const struct sk_buff *skb,
		    struct flow_dissector *flow_dissector,
		    void *target_container,
		    u16 *ctinfo_map,
		    size_t mapsize);
1340 1341 1342 1343 1344
void
skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
			     struct flow_dissector *flow_dissector,
			     void *target_container);

1345
static inline __u32 skb_get_hash(struct sk_buff *skb)
1346
{
1347
	if (!skb->l4_hash && !skb->sw_hash)
1348
		__skb_get_hash(skb);
1349

1350
	return skb->hash;
1351 1352
}

1353
static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
1354
{
1355 1356
	if (!skb->l4_hash && !skb->sw_hash) {
		struct flow_keys keys;
1357
		__u32 hash = __get_hash_from_flowi6(fl6, &keys);
1358

1359
		__skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1360
	}
1361 1362 1363 1364

	return skb->hash;
}

1365 1366
__u32 skb_get_hash_perturb(const struct sk_buff *skb,
			   const siphash_key_t *perturb);
T
Tom Herbert 已提交
1367

T
Tom Herbert 已提交
1368 1369
static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
{
1370
	return skb->hash;
T
Tom Herbert 已提交
1371 1372
}

1373 1374
static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
{
1375
	to->hash = from->hash;
1376
	to->sw_hash = from->sw_hash;
1377
	to->l4_hash = from->l4_hash;
1378 1379
};

1380 1381 1382 1383 1384 1385 1386 1387
static inline void skb_copy_decrypted(struct sk_buff *to,
				      const struct sk_buff *from)
{
#ifdef CONFIG_TLS_DEVICE
	to->decrypted = from->decrypted;
#endif
}

1388 1389 1390 1391 1392
#ifdef NET_SKBUFF_DATA_USES_OFFSET
static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
{
	return skb->head + skb->end;
}
1393 1394 1395 1396 1397

static inline unsigned int skb_end_offset(const struct sk_buff *skb)
{
	return skb->end;
}
1398 1399 1400 1401 1402
#else
static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
{
	return skb->end;
}
1403 1404 1405 1406 1407

static inline unsigned int skb_end_offset(const struct sk_buff *skb)
{
	return skb->end - skb->head;
}
1408 1409
#endif

L
Linus Torvalds 已提交
1410
/* Internal */
1411
#define skb_shinfo(SKB)	((struct skb_shared_info *)(skb_end_pointer(SKB)))
L
Linus Torvalds 已提交
1412

1413 1414 1415 1416 1417
static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
{
	return &skb_shinfo(skb)->hwtstamps;
}

W
Willem de Bruijn 已提交
1418 1419 1420 1421 1422 1423 1424
static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
{
	bool is_zcopy = skb && skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY;

	return is_zcopy ? skb_uarg(skb) : NULL;
}

1425 1426
static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg,
				 bool *have_ref)
W
Willem de Bruijn 已提交
1427 1428
{
	if (skb && uarg && !skb_zcopy(skb)) {
1429 1430 1431 1432
		if (unlikely(have_ref && *have_ref))
			*have_ref = false;
		else
			sock_zerocopy_get(uarg);
W
Willem de Bruijn 已提交
1433 1434 1435 1436 1437
		skb_shinfo(skb)->destructor_arg = uarg;
		skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
	}
}

1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453
static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val)
{
	skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL);
	skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
}

static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb)
{
	return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL;
}

static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb)
{
	return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL);
}

W
Willem de Bruijn 已提交
1454 1455 1456 1457 1458 1459
/* Release a reference on a zerocopy structure */
static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
{
	struct ubuf_info *uarg = skb_zcopy(skb);

	if (uarg) {
1460 1461 1462
		if (skb_zcopy_is_nouarg(skb)) {
			/* no notification callback */
		} else if (uarg->callback == sock_zerocopy_callback) {
1463 1464
			uarg->zerocopy = uarg->zerocopy && zerocopy;
			sock_zerocopy_put(uarg);
1465
		} else {
1466 1467 1468
			uarg->callback(uarg, zerocopy);
		}

W
Willem de Bruijn 已提交
1469 1470 1471 1472 1473 1474 1475 1476 1477 1478
		skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
	}
}

/* Abort a zerocopy operation and revert zckey on error in send syscall */
static inline void skb_zcopy_abort(struct sk_buff *skb)
{
	struct ubuf_info *uarg = skb_zcopy(skb);

	if (uarg) {
1479
		sock_zerocopy_put_abort(uarg, false);
W
Willem de Bruijn 已提交
1480 1481 1482 1483
		skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
	}
}

1484 1485 1486 1487 1488
static inline void skb_mark_not_on_list(struct sk_buff *skb)
{
	skb->next = NULL;
}

1489
/* Iterate through singly-linked GSO fragments of an skb. */
1490 1491 1492
#define skb_list_walk_safe(first, skb, next_skb)                               \
	for ((skb) = (first), (next_skb) = (skb) ? (skb)->next : NULL; (skb);  \
	     (skb) = (next_skb), (next_skb) = (skb) ? (skb)->next : NULL)
1493

1494 1495 1496 1497 1498 1499
static inline void skb_list_del_init(struct sk_buff *skb)
{
	__list_del_entry(&skb->list);
	skb_mark_not_on_list(skb);
}

L
Linus Torvalds 已提交
1500 1501 1502 1503 1504 1505 1506 1507
/**
 *	skb_queue_empty - check if a queue is empty
 *	@list: queue head
 *
 *	Returns true if the queue is empty, false otherwise.
 */
static inline int skb_queue_empty(const struct sk_buff_head *list)
{
1508
	return list->next == (const struct sk_buff *) list;
L
Linus Torvalds 已提交
1509 1510
}

E
Eric Dumazet 已提交
1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523
/**
 *	skb_queue_empty_lockless - check if a queue is empty
 *	@list: queue head
 *
 *	Returns true if the queue is empty, false otherwise.
 *	This variant can be used in lockless contexts.
 */
static inline bool skb_queue_empty_lockless(const struct sk_buff_head *list)
{
	return READ_ONCE(list->next) == (const struct sk_buff *) list;
}


D
David S. Miller 已提交
1524 1525 1526 1527 1528 1529 1530 1531 1532 1533
/**
 *	skb_queue_is_last - check if skb is the last entry in the queue
 *	@list: queue head
 *	@skb: buffer
 *
 *	Returns true if @skb is the last buffer on the list.
 */
static inline bool skb_queue_is_last(const struct sk_buff_head *list,
				     const struct sk_buff *skb)
{
1534
	return skb->next == (const struct sk_buff *) list;
D
David S. Miller 已提交
1535 1536
}

1537 1538 1539 1540 1541 1542 1543 1544 1545 1546
/**
 *	skb_queue_is_first - check if skb is the first entry in the queue
 *	@list: queue head
 *	@skb: buffer
 *
 *	Returns true if @skb is the first buffer on the list.
 */
static inline bool skb_queue_is_first(const struct sk_buff_head *list,
				      const struct sk_buff *skb)
{
1547
	return skb->prev == (const struct sk_buff *) list;
1548 1549
}

D
David S. Miller 已提交
1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567
/**
 *	skb_queue_next - return the next packet in the queue
 *	@list: queue head
 *	@skb: current buffer
 *
 *	Return the next packet in @list after @skb.  It is only valid to
 *	call this if skb_queue_is_last() evaluates to false.
 */
static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
					     const struct sk_buff *skb)
{
	/* This BUG_ON may seem severe, but if we just return then we
	 * are going to dereference garbage.
	 */
	BUG_ON(skb_queue_is_last(list, skb));
	return skb->next;
}

1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585
/**
 *	skb_queue_prev - return the prev packet in the queue
 *	@list: queue head
 *	@skb: current buffer
 *
 *	Return the prev packet in @list before @skb.  It is only valid to
 *	call this if skb_queue_is_first() evaluates to false.
 */
static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
					     const struct sk_buff *skb)
{
	/* This BUG_ON may seem severe, but if we just return then we
	 * are going to dereference garbage.
	 */
	BUG_ON(skb_queue_is_first(list, skb));
	return skb->prev;
}

L
Linus Torvalds 已提交
1586 1587 1588 1589 1590 1591 1592 1593 1594
/**
 *	skb_get - reference buffer
 *	@skb: buffer to reference
 *
 *	Makes another reference to a socket buffer and returns a pointer
 *	to the buffer.
 */
static inline struct sk_buff *skb_get(struct sk_buff *skb)
{
1595
	refcount_inc(&skb->users);
L
Linus Torvalds 已提交
1596 1597 1598 1599
	return skb;
}

/*
1600
 * If users == 1, we are the only owner and can avoid redundant atomic changes.
L
Linus Torvalds 已提交
1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616
 */

/**
 *	skb_cloned - is the buffer a clone
 *	@skb: buffer to check
 *
 *	Returns true if the buffer was generated with skb_clone() and is
 *	one of multiple shared copies of the buffer. Cloned buffers are
 *	shared data so must not be written to under normal circumstances.
 */
static inline int skb_cloned(const struct sk_buff *skb)
{
	return skb->cloned &&
	       (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
}

1617 1618
static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
{
1619
	might_sleep_if(gfpflags_allow_blocking(pri));
1620 1621 1622 1623 1624 1625 1626

	if (skb_cloned(skb))
		return pskb_expand_head(skb, 0, 0, pri);

	return 0;
}

L
Linus Torvalds 已提交
1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645
/**
 *	skb_header_cloned - is the header a clone
 *	@skb: buffer to check
 *
 *	Returns true if modifying the header part of the buffer requires
 *	the data to be copied.
 */
static inline int skb_header_cloned(const struct sk_buff *skb)
{
	int dataref;

	if (!skb->cloned)
		return 0;

	dataref = atomic_read(&skb_shinfo(skb)->dataref);
	dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
	return dataref != 1;
}

1646 1647 1648 1649 1650 1651 1652 1653 1654 1655
static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
{
	might_sleep_if(gfpflags_allow_blocking(pri));

	if (skb_header_cloned(skb))
		return pskb_expand_head(skb, 0, 0, pri);

	return 0;
}

1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666
/**
 *	__skb_header_release - release reference to header
 *	@skb: buffer to operate on
 */
static inline void __skb_header_release(struct sk_buff *skb)
{
	skb->nohdr = 1;
	atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
}


L
Linus Torvalds 已提交
1667 1668 1669 1670 1671 1672 1673 1674 1675
/**
 *	skb_shared - is the buffer shared
 *	@skb: buffer to check
 *
 *	Returns true if more than one person has a reference to this
 *	buffer.
 */
static inline int skb_shared(const struct sk_buff *skb)
{
1676
	return refcount_read(&skb->users) != 1;
L
Linus Torvalds 已提交
1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691
}

/**
 *	skb_share_check - check if buffer is shared and if so clone it
 *	@skb: buffer to check
 *	@pri: priority for memory allocation
 *
 *	If the buffer is shared the buffer is cloned and the old copy
 *	drops a reference. A new clone with a single reference is returned.
 *	If the buffer is not shared the original buffer is returned. When
 *	being called from interrupt status or with spinlocks held pri must
 *	be GFP_ATOMIC.
 *
 *	NULL is returned on a memory allocation failure.
 */
1692
static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
L
Linus Torvalds 已提交
1693
{
1694
	might_sleep_if(gfpflags_allow_blocking(pri));
L
Linus Torvalds 已提交
1695 1696
	if (skb_shared(skb)) {
		struct sk_buff *nskb = skb_clone(skb, pri);
1697 1698 1699 1700 1701

		if (likely(nskb))
			consume_skb(skb);
		else
			kfree_skb(skb);
L
Linus Torvalds 已提交
1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726
		skb = nskb;
	}
	return skb;
}

/*
 *	Copy shared buffers into a new sk_buff. We effectively do COW on
 *	packets to handle cases where we have a local reader and forward
 *	and a couple of other messy ones. The normal one is tcpdumping
 *	a packet thats being forwarded.
 */

/**
 *	skb_unshare - make a copy of a shared buffer
 *	@skb: buffer to check
 *	@pri: priority for memory allocation
 *
 *	If the socket buffer is a clone then this function creates a new
 *	copy of the data, drops a reference count on the old copy and returns
 *	the new copy with the reference count at 1. If the buffer is not a clone
 *	the original buffer is returned. When called with a spinlock held or
 *	from interrupt state @pri must be %GFP_ATOMIC
 *
 *	%NULL is returned on a memory allocation failure.
 */
1727
static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
A
Al Viro 已提交
1728
					  gfp_t pri)
L
Linus Torvalds 已提交
1729
{
1730
	might_sleep_if(gfpflags_allow_blocking(pri));
L
Linus Torvalds 已提交
1731 1732
	if (skb_cloned(skb)) {
		struct sk_buff *nskb = skb_copy(skb, pri);
1733 1734 1735 1736 1737 1738

		/* Free our shared copy */
		if (likely(nskb))
			consume_skb(skb);
		else
			kfree_skb(skb);
L
Linus Torvalds 已提交
1739 1740 1741 1742 1743 1744
		skb = nskb;
	}
	return skb;
}

/**
1745
 *	skb_peek - peek at the head of an &sk_buff_head
L
Linus Torvalds 已提交
1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756
 *	@list_: list to peek at
 *
 *	Peek an &sk_buff. Unlike most other operations you _MUST_
 *	be careful with this one. A peek leaves the buffer on the
 *	list and someone else may run off with it. You must hold
 *	the appropriate locks or have a private queue to do this.
 *
 *	Returns %NULL for an empty list or a pointer to the head element.
 *	The reference count is not incremented and the reference is therefore
 *	volatile. Use with caution.
 */
1757
static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
L
Linus Torvalds 已提交
1758
{
1759 1760 1761 1762 1763
	struct sk_buff *skb = list_->next;

	if (skb == (struct sk_buff *)list_)
		skb = NULL;
	return skb;
L
Linus Torvalds 已提交
1764 1765
}

1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776
/**
 *	__skb_peek - peek at the head of a non-empty &sk_buff_head
 *	@list_: list to peek at
 *
 *	Like skb_peek(), but the caller knows that the list is not empty.
 */
static inline struct sk_buff *__skb_peek(const struct sk_buff_head *list_)
{
	return list_->next;
}

P
Pavel Emelyanov 已提交
1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789
/**
 *	skb_peek_next - peek skb following the given one from a queue
 *	@skb: skb to start from
 *	@list_: list to peek at
 *
 *	Returns %NULL when the end of the list is met or a pointer to the
 *	next element. The reference count is not incremented and the
 *	reference is therefore volatile. Use with caution.
 */
static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
		const struct sk_buff_head *list_)
{
	struct sk_buff *next = skb->next;
1790

P
Pavel Emelyanov 已提交
1791 1792 1793 1794 1795
	if (next == (struct sk_buff *)list_)
		next = NULL;
	return next;
}

L
Linus Torvalds 已提交
1796
/**
1797
 *	skb_peek_tail - peek at the tail of an &sk_buff_head
L
Linus Torvalds 已提交
1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808
 *	@list_: list to peek at
 *
 *	Peek an &sk_buff. Unlike most other operations you _MUST_
 *	be careful with this one. A peek leaves the buffer on the
 *	list and someone else may run off with it. You must hold
 *	the appropriate locks or have a private queue to do this.
 *
 *	Returns %NULL for an empty list or a pointer to the tail element.
 *	The reference count is not incremented and the reference is therefore
 *	volatile. Use with caution.
 */
1809
static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
L
Linus Torvalds 已提交
1810
{
1811
	struct sk_buff *skb = READ_ONCE(list_->prev);
1812 1813 1814 1815 1816

	if (skb == (struct sk_buff *)list_)
		skb = NULL;
	return skb;

L
Linus Torvalds 已提交
1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829
}

/**
 *	skb_queue_len	- get queue length
 *	@list_: list to measure
 *
 *	Return the length of an &sk_buff queue.
 */
static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
{
	return list_->qlen;
}

1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841
/**
 *	skb_queue_len_lockless	- get queue length
 *	@list_: list to measure
 *
 *	Return the length of an &sk_buff queue.
 *	This variant can be used in lockless contexts.
 */
static inline __u32 skb_queue_len_lockless(const struct sk_buff_head *list_)
{
	return READ_ONCE(list_->qlen);
}

1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857
/**
 *	__skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
 *	@list: queue to initialize
 *
 *	This initializes only the list and queue length aspects of
 *	an sk_buff_head object.  This allows to initialize the list
 *	aspects of an sk_buff_head without reinitializing things like
 *	the spinlock.  It can also be used for on-stack sk_buff_head
 *	objects where the spinlock is known to not be used.
 */
static inline void __skb_queue_head_init(struct sk_buff_head *list)
{
	list->prev = list->next = (struct sk_buff *)list;
	list->qlen = 0;
}

1858 1859 1860 1861 1862 1863 1864 1865
/*
 * This function creates a split out lock class for each invocation;
 * this is needed for now since a whole lot of users of the skb-queue
 * infrastructure in drivers have different locking usage (in hardirq)
 * than the networking core (in softirq only). In the long run either the
 * network layer or drivers should need annotation to consolidate the
 * main types of usage into 3 classes.
 */
L
Linus Torvalds 已提交
1866 1867 1868
static inline void skb_queue_head_init(struct sk_buff_head *list)
{
	spin_lock_init(&list->lock);
1869
	__skb_queue_head_init(list);
L
Linus Torvalds 已提交
1870 1871
}

1872 1873 1874 1875 1876 1877 1878
static inline void skb_queue_head_init_class(struct sk_buff_head *list,
		struct lock_class_key *class)
{
	skb_queue_head_init(list);
	lockdep_set_class(&list->lock, class);
}

L
Linus Torvalds 已提交
1879
/*
1880
 *	Insert an sk_buff on a list.
L
Linus Torvalds 已提交
1881 1882 1883 1884
 *
 *	The "__skb_xxxx()" functions are the non-atomic ones that
 *	can only be called with interrupts disabled.
 */
1885 1886 1887 1888
static inline void __skb_insert(struct sk_buff *newsk,
				struct sk_buff *prev, struct sk_buff *next,
				struct sk_buff_head *list)
{
1889 1890 1891
	/* See skb_queue_empty_lockless() and skb_peek_tail()
	 * for the opposite READ_ONCE()
	 */
E
Eric Dumazet 已提交
1892 1893 1894 1895
	WRITE_ONCE(newsk->next, next);
	WRITE_ONCE(newsk->prev, prev);
	WRITE_ONCE(next->prev, newsk);
	WRITE_ONCE(prev->next, newsk);
1896 1897
	list->qlen++;
}
L
Linus Torvalds 已提交
1898

1899 1900 1901 1902 1903 1904 1905
static inline void __skb_queue_splice(const struct sk_buff_head *list,
				      struct sk_buff *prev,
				      struct sk_buff *next)
{
	struct sk_buff *first = list->next;
	struct sk_buff *last = list->prev;

E
Eric Dumazet 已提交
1906 1907
	WRITE_ONCE(first->prev, prev);
	WRITE_ONCE(prev->next, first);
1908

E
Eric Dumazet 已提交
1909 1910
	WRITE_ONCE(last->next, next);
	WRITE_ONCE(next->prev, last);
1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922
}

/**
 *	skb_queue_splice - join two skb lists, this is designed for stacks
 *	@list: the new list to add
 *	@head: the place to add it in the first list
 */
static inline void skb_queue_splice(const struct sk_buff_head *list,
				    struct sk_buff_head *head)
{
	if (!skb_queue_empty(list)) {
		__skb_queue_splice(list, (struct sk_buff *) head, head->next);
1923
		head->qlen += list->qlen;
1924 1925 1926 1927
	}
}

/**
E
Eric Dumazet 已提交
1928
 *	skb_queue_splice_init - join two skb lists and reinitialise the emptied list
1929 1930 1931 1932 1933 1934 1935 1936 1937 1938
 *	@list: the new list to add
 *	@head: the place to add it in the first list
 *
 *	The list at @list is reinitialised
 */
static inline void skb_queue_splice_init(struct sk_buff_head *list,
					 struct sk_buff_head *head)
{
	if (!skb_queue_empty(list)) {
		__skb_queue_splice(list, (struct sk_buff *) head, head->next);
1939
		head->qlen += list->qlen;
1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953
		__skb_queue_head_init(list);
	}
}

/**
 *	skb_queue_splice_tail - join two skb lists, each list being a queue
 *	@list: the new list to add
 *	@head: the place to add it in the first list
 */
static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
					 struct sk_buff_head *head)
{
	if (!skb_queue_empty(list)) {
		__skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1954
		head->qlen += list->qlen;
1955 1956 1957 1958
	}
}

/**
E
Eric Dumazet 已提交
1959
 *	skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list
1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970
 *	@list: the new list to add
 *	@head: the place to add it in the first list
 *
 *	Each of the lists is a queue.
 *	The list at @list is reinitialised
 */
static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
					      struct sk_buff_head *head)
{
	if (!skb_queue_empty(list)) {
		__skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1971
		head->qlen += list->qlen;
1972 1973 1974 1975
		__skb_queue_head_init(list);
	}
}

L
Linus Torvalds 已提交
1976
/**
1977
 *	__skb_queue_after - queue a buffer at the list head
L
Linus Torvalds 已提交
1978
 *	@list: list to use
1979
 *	@prev: place after this buffer
L
Linus Torvalds 已提交
1980 1981
 *	@newsk: buffer to queue
 *
1982
 *	Queue a buffer int the middle of a list. This function takes no locks
L
Linus Torvalds 已提交
1983 1984 1985 1986
 *	and you must therefore hold required locks before calling it.
 *
 *	A buffer cannot be placed on two lists at the same time.
 */
1987 1988 1989
static inline void __skb_queue_after(struct sk_buff_head *list,
				     struct sk_buff *prev,
				     struct sk_buff *newsk)
L
Linus Torvalds 已提交
1990
{
1991
	__skb_insert(newsk, prev, prev->next, list);
L
Linus Torvalds 已提交
1992 1993
}

1994 1995
void skb_append(struct sk_buff *old, struct sk_buff *newsk,
		struct sk_buff_head *list);
1996

1997 1998 1999 2000 2001 2002 2003
static inline void __skb_queue_before(struct sk_buff_head *list,
				      struct sk_buff *next,
				      struct sk_buff *newsk)
{
	__skb_insert(newsk, next->prev, next, list);
}

2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018
/**
 *	__skb_queue_head - queue a buffer at the list head
 *	@list: list to use
 *	@newsk: buffer to queue
 *
 *	Queue a buffer at the start of a list. This function takes no locks
 *	and you must therefore hold required locks before calling it.
 *
 *	A buffer cannot be placed on two lists at the same time.
 */
static inline void __skb_queue_head(struct sk_buff_head *list,
				    struct sk_buff *newsk)
{
	__skb_queue_after(list, (struct sk_buff *)list, newsk);
}
2019
void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
2020

L
Linus Torvalds 已提交
2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033
/**
 *	__skb_queue_tail - queue a buffer at the list tail
 *	@list: list to use
 *	@newsk: buffer to queue
 *
 *	Queue a buffer at the end of a list. This function takes no locks
 *	and you must therefore hold required locks before calling it.
 *
 *	A buffer cannot be placed on two lists at the same time.
 */
static inline void __skb_queue_tail(struct sk_buff_head *list,
				   struct sk_buff *newsk)
{
2034
	__skb_queue_before(list, (struct sk_buff *)list, newsk);
L
Linus Torvalds 已提交
2035
}
2036
void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
L
Linus Torvalds 已提交
2037 2038 2039 2040 2041

/*
 * remove sk_buff from list. _Must_ be called atomically, and with
 * the list known..
 */
2042
void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
L
Linus Torvalds 已提交
2043 2044 2045 2046
static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
{
	struct sk_buff *next, *prev;

2047
	WRITE_ONCE(list->qlen, list->qlen - 1);
L
Linus Torvalds 已提交
2048 2049 2050
	next	   = skb->next;
	prev	   = skb->prev;
	skb->next  = skb->prev = NULL;
E
Eric Dumazet 已提交
2051 2052
	WRITE_ONCE(next->prev, prev);
	WRITE_ONCE(prev->next, next);
L
Linus Torvalds 已提交
2053 2054
}

2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069
/**
 *	__skb_dequeue - remove from the head of the queue
 *	@list: list to dequeue from
 *
 *	Remove the head of the list. This function does not take any locks
 *	so must be used with appropriate locks held only. The head item is
 *	returned or %NULL if the list is empty.
 */
static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
{
	struct sk_buff *skb = skb_peek(list);
	if (skb)
		__skb_unlink(skb, list);
	return skb;
}
2070
struct sk_buff *skb_dequeue(struct sk_buff_head *list);
L
Linus Torvalds 已提交
2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086

/**
 *	__skb_dequeue_tail - remove from the tail of the queue
 *	@list: list to dequeue from
 *
 *	Remove the tail of the list. This function does not take any locks
 *	so must be used with appropriate locks held only. The tail item is
 *	returned or %NULL if the list is empty.
 */
static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
{
	struct sk_buff *skb = skb_peek_tail(list);
	if (skb)
		__skb_unlink(skb, list);
	return skb;
}
2087
struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
L
Linus Torvalds 已提交
2088 2089


2090
static inline bool skb_is_nonlinear(const struct sk_buff *skb)
L
Linus Torvalds 已提交
2091 2092 2093 2094 2095 2096 2097 2098 2099
{
	return skb->data_len;
}

static inline unsigned int skb_headlen(const struct sk_buff *skb)
{
	return skb->len - skb->data_len;
}

2100
static inline unsigned int __skb_pagelen(const struct sk_buff *skb)
L
Linus Torvalds 已提交
2101
{
2102
	unsigned int i, len = 0;
L
Linus Torvalds 已提交
2103

2104
	for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--)
E
Eric Dumazet 已提交
2105
		len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
2106 2107 2108 2109 2110 2111
	return len;
}

static inline unsigned int skb_pagelen(const struct sk_buff *skb)
{
	return skb_headlen(skb) + __skb_pagelen(skb);
L
Linus Torvalds 已提交
2112 2113
}

2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128
/**
 * __skb_fill_page_desc - initialise a paged fragment in an skb
 * @skb: buffer containing fragment to be initialised
 * @i: paged fragment index to initialise
 * @page: the page to use for this fragment
 * @off: the offset to the data with @page
 * @size: the length of the data
 *
 * Initialises the @i'th fragment of @skb to point to &size bytes at
 * offset @off within @page.
 *
 * Does not take any additional reference on the fragment.
 */
static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
					struct page *page, int off, int size)
L
Linus Torvalds 已提交
2129 2130 2131
{
	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];

2132
	/*
2133 2134 2135
	 * Propagate page pfmemalloc to the skb if we can. The problem is
	 * that not all callers have unique ownership of the page but rely
	 * on page_is_pfmemalloc doing the right thing(tm).
2136
	 */
2137
	frag->bv_page		  = page;
2138
	frag->bv_offset		  = off;
E
Eric Dumazet 已提交
2139
	skb_frag_size_set(frag, size);
2140 2141

	page = compound_head(page);
2142
	if (page_is_pfmemalloc(page))
2143
		skb->pfmemalloc	= true;
2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154
}

/**
 * skb_fill_page_desc - initialise a paged fragment in an skb
 * @skb: buffer containing fragment to be initialised
 * @i: paged fragment index to initialise
 * @page: the page to use for this fragment
 * @off: the offset to the data with @page
 * @size: the length of the data
 *
 * As per __skb_fill_page_desc() -- initialises the @i'th fragment of
M
Mathias Krause 已提交
2155
 * @skb to point to @size bytes at offset @off within @page. In
2156 2157 2158 2159 2160 2161 2162 2163
 * addition updates @skb such that @i is the last fragment.
 *
 * Does not take any additional reference on the fragment.
 */
static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
				      struct page *page, int off, int size)
{
	__skb_fill_page_desc(skb, i, page, off, size);
L
Linus Torvalds 已提交
2164 2165 2166
	skb_shinfo(skb)->nr_frags = i + 1;
}

2167 2168
void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
		     int size, unsigned int truesize);
P
Peter Zijlstra 已提交
2169

J
Jason Wang 已提交
2170 2171 2172
void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
			  unsigned int truesize);

L
Linus Torvalds 已提交
2173 2174
#define SKB_LINEAR_ASSERT(skb)  BUG_ON(skb_is_nonlinear(skb))

2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190
#ifdef NET_SKBUFF_DATA_USES_OFFSET
static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
{
	return skb->head + skb->tail;
}

static inline void skb_reset_tail_pointer(struct sk_buff *skb)
{
	skb->tail = skb->data - skb->head;
}

static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
{
	skb_reset_tail_pointer(skb);
	skb->tail += offset;
}
2191

2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206
#else /* NET_SKBUFF_DATA_USES_OFFSET */
static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
{
	return skb->tail;
}

static inline void skb_reset_tail_pointer(struct sk_buff *skb)
{
	skb->tail = skb->data;
}

static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
{
	skb->tail = skb->data + offset;
}
2207

2208 2209
#endif /* NET_SKBUFF_DATA_USES_OFFSET */

L
Linus Torvalds 已提交
2210 2211 2212
/*
 *	Add data to an sk_buff
 */
2213 2214 2215
void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
void *skb_put(struct sk_buff *skb, unsigned int len);
static inline void *__skb_put(struct sk_buff *skb, unsigned int len)
L
Linus Torvalds 已提交
2216
{
2217
	void *tmp = skb_tail_pointer(skb);
L
Linus Torvalds 已提交
2218 2219 2220 2221 2222 2223
	SKB_LINEAR_ASSERT(skb);
	skb->tail += len;
	skb->len  += len;
	return tmp;
}

2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245
static inline void *__skb_put_zero(struct sk_buff *skb, unsigned int len)
{
	void *tmp = __skb_put(skb, len);

	memset(tmp, 0, len);
	return tmp;
}

static inline void *__skb_put_data(struct sk_buff *skb, const void *data,
				   unsigned int len)
{
	void *tmp = __skb_put(skb, len);

	memcpy(tmp, data, len);
	return tmp;
}

static inline void __skb_put_u8(struct sk_buff *skb, u8 val)
{
	*(u8 *)__skb_put(skb, 1) = val;
}

2246
static inline void *skb_put_zero(struct sk_buff *skb, unsigned int len)
2247
{
2248
	void *tmp = skb_put(skb, len);
2249 2250 2251 2252 2253 2254

	memset(tmp, 0, len);

	return tmp;
}

2255 2256 2257 2258 2259 2260 2261 2262 2263 2264
static inline void *skb_put_data(struct sk_buff *skb, const void *data,
				 unsigned int len)
{
	void *tmp = skb_put(skb, len);

	memcpy(tmp, data, len);

	return tmp;
}

2265 2266 2267 2268 2269
static inline void skb_put_u8(struct sk_buff *skb, u8 val)
{
	*(u8 *)skb_put(skb, 1) = val;
}

2270 2271
void *skb_push(struct sk_buff *skb, unsigned int len);
static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
L
Linus Torvalds 已提交
2272 2273 2274 2275 2276 2277
{
	skb->data -= len;
	skb->len  += len;
	return skb->data;
}

2278 2279
void *skb_pull(struct sk_buff *skb, unsigned int len);
static inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
L
Linus Torvalds 已提交
2280 2281 2282 2283 2284 2285
{
	skb->len -= len;
	BUG_ON(skb->len < skb->data_len);
	return skb->data += len;
}

2286
static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len)
2287 2288 2289 2290
{
	return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
}

2291
void *__pskb_pull_tail(struct sk_buff *skb, int delta);
L
Linus Torvalds 已提交
2292

2293
static inline void *__pskb_pull(struct sk_buff *skb, unsigned int len)
L
Linus Torvalds 已提交
2294 2295
{
	if (len > skb_headlen(skb) &&
G
Gerrit Renker 已提交
2296
	    !__pskb_pull_tail(skb, len - skb_headlen(skb)))
L
Linus Torvalds 已提交
2297 2298 2299 2300 2301
		return NULL;
	skb->len -= len;
	return skb->data += len;
}

2302
static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
L
Linus Torvalds 已提交
2303 2304 2305 2306
{
	return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
}

2307
static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len)
L
Linus Torvalds 已提交
2308 2309
{
	if (likely(len <= skb_headlen(skb)))
2310
		return true;
L
Linus Torvalds 已提交
2311
	if (unlikely(len > skb->len))
2312
		return false;
G
Gerrit Renker 已提交
2313
	return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
L
Linus Torvalds 已提交
2314 2315
}

2316 2317
void skb_condense(struct sk_buff *skb);

L
Linus Torvalds 已提交
2318 2319 2320 2321 2322 2323
/**
 *	skb_headroom - bytes at buffer head
 *	@skb: buffer to check
 *
 *	Return the number of bytes of free space at the head of an &sk_buff.
 */
2324
static inline unsigned int skb_headroom(const struct sk_buff *skb)
L
Linus Torvalds 已提交
2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336
{
	return skb->data - skb->head;
}

/**
 *	skb_tailroom - bytes at buffer end
 *	@skb: buffer to check
 *
 *	Return the number of bytes of free space at the tail of an sk_buff
 */
static inline int skb_tailroom(const struct sk_buff *skb)
{
2337
	return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
L
Linus Torvalds 已提交
2338 2339
}

2340 2341 2342 2343 2344 2345 2346 2347 2348
/**
 *	skb_availroom - bytes at buffer end
 *	@skb: buffer to check
 *
 *	Return the number of bytes of free space at the tail of an sk_buff
 *	allocated by sk_stream_alloc()
 */
static inline int skb_availroom(const struct sk_buff *skb)
{
E
Eric Dumazet 已提交
2349 2350 2351 2352
	if (skb_is_nonlinear(skb))
		return 0;

	return skb->end - skb->tail - skb->reserved_tailroom;
2353 2354
}

L
Linus Torvalds 已提交
2355 2356 2357 2358 2359 2360 2361 2362
/**
 *	skb_reserve - adjust headroom
 *	@skb: buffer to alter
 *	@len: bytes to move
 *
 *	Increase the headroom of an empty &sk_buff by reducing the tail
 *	room. This is only allowed for an empty buffer.
 */
2363
static inline void skb_reserve(struct sk_buff *skb, int len)
L
Linus Torvalds 已提交
2364 2365 2366 2367 2368
{
	skb->data += len;
	skb->tail += len;
}

2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392
/**
 *	skb_tailroom_reserve - adjust reserved_tailroom
 *	@skb: buffer to alter
 *	@mtu: maximum amount of headlen permitted
 *	@needed_tailroom: minimum amount of reserved_tailroom
 *
 *	Set reserved_tailroom so that headlen can be as large as possible but
 *	not larger than mtu and tailroom cannot be smaller than
 *	needed_tailroom.
 *	The required headroom should already have been reserved before using
 *	this function.
 */
static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
					unsigned int needed_tailroom)
{
	SKB_LINEAR_ASSERT(skb);
	if (mtu < skb_tailroom(skb) - needed_tailroom)
		/* use at most mtu */
		skb->reserved_tailroom = skb_tailroom(skb) - mtu;
	else
		/* use up to all available space */
		skb->reserved_tailroom = needed_tailroom;
}

T
Tom Herbert 已提交
2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409
#define ENCAP_TYPE_ETHER	0
#define ENCAP_TYPE_IPPROTO	1

static inline void skb_set_inner_protocol(struct sk_buff *skb,
					  __be16 protocol)
{
	skb->inner_protocol = protocol;
	skb->inner_protocol_type = ENCAP_TYPE_ETHER;
}

static inline void skb_set_inner_ipproto(struct sk_buff *skb,
					 __u8 ipproto)
{
	skb->inner_ipproto = ipproto;
	skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
}

2410 2411
static inline void skb_reset_inner_headers(struct sk_buff *skb)
{
2412
	skb->inner_mac_header = skb->mac_header;
2413 2414 2415 2416
	skb->inner_network_header = skb->network_header;
	skb->inner_transport_header = skb->transport_header;
}

2417 2418 2419 2420 2421
static inline void skb_reset_mac_len(struct sk_buff *skb)
{
	skb->mac_len = skb->network_header - skb->mac_header;
}

2422 2423 2424 2425 2426 2427
static inline unsigned char *skb_inner_transport_header(const struct sk_buff
							*skb)
{
	return skb->head + skb->inner_transport_header;
}

2428 2429 2430 2431 2432
static inline int skb_inner_transport_offset(const struct sk_buff *skb)
{
	return skb_inner_transport_header(skb) - skb->data;
}

2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461
static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
{
	skb->inner_transport_header = skb->data - skb->head;
}

static inline void skb_set_inner_transport_header(struct sk_buff *skb,
						   const int offset)
{
	skb_reset_inner_transport_header(skb);
	skb->inner_transport_header += offset;
}

static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
{
	return skb->head + skb->inner_network_header;
}

static inline void skb_reset_inner_network_header(struct sk_buff *skb)
{
	skb->inner_network_header = skb->data - skb->head;
}

static inline void skb_set_inner_network_header(struct sk_buff *skb,
						const int offset)
{
	skb_reset_inner_network_header(skb);
	skb->inner_network_header += offset;
}

2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477
static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
{
	return skb->head + skb->inner_mac_header;
}

static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
{
	skb->inner_mac_header = skb->data - skb->head;
}

static inline void skb_set_inner_mac_header(struct sk_buff *skb,
					    const int offset)
{
	skb_reset_inner_mac_header(skb);
	skb->inner_mac_header += offset;
}
2478 2479
static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
{
C
Cong Wang 已提交
2480
	return skb->transport_header != (typeof(skb->transport_header))~0U;
2481 2482
}

2483 2484
static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
{
2485
	return skb->head + skb->transport_header;
2486 2487
}

2488 2489
static inline void skb_reset_transport_header(struct sk_buff *skb)
{
2490
	skb->transport_header = skb->data - skb->head;
2491 2492
}

2493 2494 2495
static inline void skb_set_transport_header(struct sk_buff *skb,
					    const int offset)
{
2496 2497
	skb_reset_transport_header(skb);
	skb->transport_header += offset;
2498 2499
}

2500 2501
static inline unsigned char *skb_network_header(const struct sk_buff *skb)
{
2502
	return skb->head + skb->network_header;
2503 2504
}

2505 2506
static inline void skb_reset_network_header(struct sk_buff *skb)
{
2507
	skb->network_header = skb->data - skb->head;
2508 2509
}

2510 2511
static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
{
2512 2513
	skb_reset_network_header(skb);
	skb->network_header += offset;
2514 2515
}

2516
static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
2517
{
2518
	return skb->head + skb->mac_header;
2519 2520
}

2521 2522 2523 2524 2525
static inline int skb_mac_offset(const struct sk_buff *skb)
{
	return skb_mac_header(skb) - skb->data;
}

2526 2527 2528 2529 2530
static inline u32 skb_mac_header_len(const struct sk_buff *skb)
{
	return skb->network_header - skb->mac_header;
}

2531
static inline int skb_mac_header_was_set(const struct sk_buff *skb)
2532
{
C
Cong Wang 已提交
2533
	return skb->mac_header != (typeof(skb->mac_header))~0U;
2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546
}

static inline void skb_reset_mac_header(struct sk_buff *skb)
{
	skb->mac_header = skb->data - skb->head;
}

static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
{
	skb_reset_mac_header(skb);
	skb->mac_header += offset;
}

2547 2548 2549 2550 2551
static inline void skb_pop_mac_header(struct sk_buff *skb)
{
	skb->mac_header = skb->network_header;
}

2552
static inline void skb_probe_transport_header(struct sk_buff *skb)
2553
{
2554
	struct flow_keys_basic keys;
2555 2556 2557

	if (skb_transport_header_was_set(skb))
		return;
2558

2559 2560
	if (skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
					     NULL, 0, 0, 0, 0))
2561
		skb_set_transport_header(skb, keys.control.thoff);
2562 2563
}

2564 2565 2566 2567 2568 2569 2570 2571 2572 2573
static inline void skb_mac_header_rebuild(struct sk_buff *skb)
{
	if (skb_mac_header_was_set(skb)) {
		const unsigned char *old_mac = skb_mac_header(skb);

		skb_set_mac_header(skb, -skb->mac_len);
		memmove(skb_mac_header(skb), old_mac, skb->mac_len);
	}
}

2574 2575 2576 2577 2578
static inline int skb_checksum_start_offset(const struct sk_buff *skb)
{
	return skb->csum_start - skb_headroom(skb);
}

2579 2580 2581 2582 2583
static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
{
	return skb->head + skb->csum_start;
}

2584 2585 2586 2587 2588 2589 2590 2591 2592 2593
static inline int skb_transport_offset(const struct sk_buff *skb)
{
	return skb_transport_header(skb) - skb->data;
}

static inline u32 skb_network_header_len(const struct sk_buff *skb)
{
	return skb->transport_header - skb->network_header;
}

2594 2595 2596 2597 2598
static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
{
	return skb->inner_transport_header - skb->inner_network_header;
}

2599 2600 2601 2602
static inline int skb_network_offset(const struct sk_buff *skb)
{
	return skb_network_header(skb) - skb->data;
}
2603

2604 2605 2606 2607 2608
static inline int skb_inner_network_offset(const struct sk_buff *skb)
{
	return skb_inner_network_header(skb) - skb->data;
}

2609 2610 2611 2612 2613
static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
{
	return pskb_may_pull(skb, skb_network_offset(skb) + len);
}

L
Linus Torvalds 已提交
2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624
/*
 * CPUs often take a performance hit when accessing unaligned memory
 * locations. The actual performance hit varies, it can be small if the
 * hardware handles it or large if we have to take an exception and fix it
 * in software.
 *
 * Since an ethernet header is 14 bytes network drivers often end up with
 * the IP header at an unaligned offset. The IP header can be aligned by
 * shifting the start of the packet by 2 bytes. Drivers should do this
 * with:
 *
2625
 * skb_reserve(skb, NET_IP_ALIGN);
L
Linus Torvalds 已提交
2626 2627 2628 2629
 *
 * The downside to this alignment of the IP header is that the DMA is now
 * unaligned. On some architectures the cost of an unaligned DMA is high
 * and this cost outweighs the gains made by aligning the IP header.
2630
 *
L
Linus Torvalds 已提交
2631 2632 2633 2634 2635 2636 2637
 * Since this trade off varies between architectures, we allow NET_IP_ALIGN
 * to be overridden.
 */
#ifndef NET_IP_ALIGN
#define NET_IP_ALIGN	2
#endif

2638 2639 2640 2641
/*
 * The networking layer reserves some headroom in skb data (via
 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
 * the header has to grow. In the default case, if the header has to grow
2642
 * 32 bytes or less we avoid the reallocation.
2643 2644 2645 2646 2647 2648 2649
 *
 * Unfortunately this headroom changes the DMA alignment of the resulting
 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
 * on some architectures. An architecture can override this value,
 * perhaps setting it to a cacheline in size (since that will maintain
 * cacheline alignment of the DMA). It must be a power of 2.
 *
2650
 * Various parts of the networking layer expect at least 32 bytes of
2651
 * headroom, you should not reduce this.
2652 2653 2654 2655
 *
 * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
 * to reduce average number of cache lines per packet.
 * get_rps_cpus() for example only access one 64 bytes aligned block :
2656
 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
2657 2658
 */
#ifndef NET_SKB_PAD
2659
#define NET_SKB_PAD	max(32, L1_CACHE_BYTES)
2660 2661
#endif

2662
int ___pskb_trim(struct sk_buff *skb, unsigned int len);
L
Linus Torvalds 已提交
2663

2664
static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
L
Linus Torvalds 已提交
2665
{
2666
	if (WARN_ON(skb_is_nonlinear(skb)))
2667
		return;
2668 2669
	skb->len = len;
	skb_set_tail_pointer(skb, len);
L
Linus Torvalds 已提交
2670 2671
}

2672 2673 2674 2675 2676
static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
{
	__skb_set_length(skb, len);
}

2677
void skb_trim(struct sk_buff *skb, unsigned int len);
L
Linus Torvalds 已提交
2678 2679 2680

static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
{
2681 2682 2683 2684
	if (skb->data_len)
		return ___pskb_trim(skb, len);
	__skb_trim(skb, len);
	return 0;
L
Linus Torvalds 已提交
2685 2686 2687 2688 2689 2690 2691
}

static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
{
	return (len < skb->len) ? __pskb_trim(skb, len) : 0;
}

2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706
/**
 *	pskb_trim_unique - remove end from a paged unique (not cloned) buffer
 *	@skb: buffer to alter
 *	@len: new length
 *
 *	This is identical to pskb_trim except that the caller knows that
 *	the skb is not cloned so we should never get an error due to out-
 *	of-memory.
 */
static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
{
	int err = pskb_trim(skb, len);
	BUG_ON(err);
}

2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720
static inline int __skb_grow(struct sk_buff *skb, unsigned int len)
{
	unsigned int diff = len - skb->len;

	if (skb_tailroom(skb) < diff) {
		int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb),
					   GFP_ATOMIC);
		if (ret)
			return ret;
	}
	__skb_set_length(skb, len);
	return 0;
}

L
Linus Torvalds 已提交
2721 2722 2723 2724 2725 2726 2727 2728 2729 2730
/**
 *	skb_orphan - orphan a buffer
 *	@skb: buffer to orphan
 *
 *	If a buffer currently has an owner then we call the owner's
 *	destructor function and make the @skb unowned. The buffer continues
 *	to exist but is no longer charged to its former owner.
 */
static inline void skb_orphan(struct sk_buff *skb)
{
E
Eric Dumazet 已提交
2731
	if (skb->destructor) {
L
Linus Torvalds 已提交
2732
		skb->destructor(skb);
E
Eric Dumazet 已提交
2733 2734
		skb->destructor = NULL;
		skb->sk		= NULL;
2735 2736
	} else {
		BUG_ON(skb->sk);
E
Eric Dumazet 已提交
2737
	}
L
Linus Torvalds 已提交
2738 2739
}

2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750
/**
 *	skb_orphan_frags - orphan the frags contained in a buffer
 *	@skb: buffer to orphan frags from
 *	@gfp_mask: allocation mask for replacement pages
 *
 *	For each frag in the SKB which needs a destructor (i.e. has an
 *	owner) create a copy of that frag and release the original
 *	page by calling the destructor.
 */
static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
{
W
Willem de Bruijn 已提交
2751 2752
	if (likely(!skb_zcopy(skb)))
		return 0;
2753 2754
	if (!skb_zcopy_is_nouarg(skb) &&
	    skb_uarg(skb)->callback == sock_zerocopy_callback)
W
Willem de Bruijn 已提交
2755 2756 2757 2758 2759 2760 2761 2762
		return 0;
	return skb_copy_ubufs(skb, gfp_mask);
}

/* Frags must be orphaned, even if refcounted, if skb might loop to rx path */
static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask)
{
	if (likely(!skb_zcopy(skb)))
2763 2764 2765 2766
		return 0;
	return skb_copy_ubufs(skb, gfp_mask);
}

L
Linus Torvalds 已提交
2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780
/**
 *	__skb_queue_purge - empty a list
 *	@list: list to empty
 *
 *	Delete all buffers on an &sk_buff list. Each buffer is removed from
 *	the list and one reference dropped. This function does not take the
 *	list lock and the caller must hold the relevant locks to use it.
 */
static inline void __skb_queue_purge(struct sk_buff_head *list)
{
	struct sk_buff *skb;
	while ((skb = __skb_dequeue(list)) != NULL)
		kfree_skb(skb);
}
2781
void skb_queue_purge(struct sk_buff_head *list);
L
Linus Torvalds 已提交
2782

2783
unsigned int skb_rbtree_purge(struct rb_root *root);
2784

2785
void *netdev_alloc_frag(unsigned int fragsz);
L
Linus Torvalds 已提交
2786

2787 2788
struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
				   gfp_t gfp_mask);
2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803

/**
 *	netdev_alloc_skb - allocate an skbuff for rx on a specific device
 *	@dev: network device to receive on
 *	@length: length to allocate
 *
 *	Allocate a new &sk_buff and assign it a usage count of one. The
 *	buffer has unspecified headroom built in. Users should allocate
 *	the headroom they think they need without accounting for the
 *	built in space. The built in space is used for optimisations.
 *
 *	%NULL is returned if there is no free memory. Although this function
 *	allocates memory it can be called from an interrupt.
 */
static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
2804
					       unsigned int length)
2805 2806 2807 2808
{
	return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
}

2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822
/* legacy helper around __netdev_alloc_skb() */
static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
					      gfp_t gfp_mask)
{
	return __netdev_alloc_skb(NULL, length, gfp_mask);
}

/* legacy helper around netdev_alloc_skb() */
static inline struct sk_buff *dev_alloc_skb(unsigned int length)
{
	return netdev_alloc_skb(NULL, length);
}


2823 2824
static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
		unsigned int length, gfp_t gfp)
2825
{
2826
	struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
2827 2828 2829 2830 2831 2832

	if (NET_IP_ALIGN && skb)
		skb_reserve(skb, NET_IP_ALIGN);
	return skb;
}

2833 2834 2835 2836 2837 2838
static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
		unsigned int length)
{
	return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
}

2839 2840
static inline void skb_free_frag(void *addr)
{
2841
	page_frag_free(addr);
2842 2843
}

2844
void *napi_alloc_frag(unsigned int fragsz);
2845 2846 2847 2848 2849 2850 2851
struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
				 unsigned int length, gfp_t gfp_mask);
static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
					     unsigned int length)
{
	return __napi_alloc_skb(napi, length, GFP_ATOMIC);
}
2852 2853 2854
void napi_consume_skb(struct sk_buff *skb, int budget);

void __kfree_skb_flush(void);
2855
void __kfree_skb_defer(struct sk_buff *skb);
2856

2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876
/**
 * __dev_alloc_pages - allocate page for network Rx
 * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
 * @order: size of the allocation
 *
 * Allocate a new page.
 *
 * %NULL is returned if there is no free memory.
*/
static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
					     unsigned int order)
{
	/* This piece of code contains several assumptions.
	 * 1.  This is for device Rx, therefor a cold page is preferred.
	 * 2.  The expectation is the user wants a compound page.
	 * 3.  If requesting a order 0 page it will not be compound
	 *     due to the check to see if order has a value in prep_new_page
	 * 4.  __GFP_MEMALLOC is ignored if __GFP_NOMEMALLOC is set due to
	 *     code in gfp_to_alloc_flags that should be enforcing this.
	 */
M
Mel Gorman 已提交
2877
	gfp_mask |= __GFP_COMP | __GFP_MEMALLOC;
2878 2879 2880 2881 2882 2883

	return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
}

static inline struct page *dev_alloc_pages(unsigned int order)
{
2884
	return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order);
2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901
}

/**
 * __dev_alloc_page - allocate a page for network Rx
 * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
 *
 * Allocate a new page.
 *
 * %NULL is returned if there is no free memory.
 */
static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
{
	return __dev_alloc_pages(gfp_mask, 0);
}

static inline struct page *dev_alloc_page(void)
{
2902
	return dev_alloc_pages(0);
2903 2904
}

2905 2906 2907 2908 2909 2910 2911 2912
/**
 *	skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page
 *	@page: The page that was allocated from skb_alloc_page
 *	@skb: The skb that may need pfmemalloc set
 */
static inline void skb_propagate_pfmemalloc(struct page *page,
					     struct sk_buff *skb)
{
2913
	if (page_is_pfmemalloc(page))
2914 2915 2916
		skb->pfmemalloc = true;
}

2917 2918 2919 2920 2921 2922
/**
 * skb_frag_off() - Returns the offset of a skb fragment
 * @frag: the paged fragment
 */
static inline unsigned int skb_frag_off(const skb_frag_t *frag)
{
2923
	return frag->bv_offset;
2924 2925 2926 2927 2928 2929 2930 2931 2932
}

/**
 * skb_frag_off_add() - Increments the offset of a skb fragment by @delta
 * @frag: skb fragment
 * @delta: value to add
 */
static inline void skb_frag_off_add(skb_frag_t *frag, int delta)
{
2933
	frag->bv_offset += delta;
2934 2935 2936 2937 2938 2939 2940 2941 2942
}

/**
 * skb_frag_off_set() - Sets the offset of a skb fragment
 * @frag: skb fragment
 * @offset: offset of fragment
 */
static inline void skb_frag_off_set(skb_frag_t *frag, unsigned int offset)
{
2943
	frag->bv_offset = offset;
2944 2945 2946 2947 2948 2949 2950 2951 2952 2953
}

/**
 * skb_frag_off_copy() - Sets the offset of a skb fragment from another fragment
 * @fragto: skb fragment where offset is set
 * @fragfrom: skb fragment offset is copied from
 */
static inline void skb_frag_off_copy(skb_frag_t *fragto,
				     const skb_frag_t *fragfrom)
{
2954
	fragto->bv_offset = fragfrom->bv_offset;
2955 2956
}

2957
/**
2958
 * skb_frag_page - retrieve the page referred to by a paged fragment
2959 2960 2961 2962 2963 2964
 * @frag: the paged fragment
 *
 * Returns the &struct page associated with @frag.
 */
static inline struct page *skb_frag_page(const skb_frag_t *frag)
{
2965
	return frag->bv_page;
2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022
}

/**
 * __skb_frag_ref - take an addition reference on a paged fragment.
 * @frag: the paged fragment
 *
 * Takes an additional reference on the paged fragment @frag.
 */
static inline void __skb_frag_ref(skb_frag_t *frag)
{
	get_page(skb_frag_page(frag));
}

/**
 * skb_frag_ref - take an addition reference on a paged fragment of an skb.
 * @skb: the buffer
 * @f: the fragment offset.
 *
 * Takes an additional reference on the @f'th paged fragment of @skb.
 */
static inline void skb_frag_ref(struct sk_buff *skb, int f)
{
	__skb_frag_ref(&skb_shinfo(skb)->frags[f]);
}

/**
 * __skb_frag_unref - release a reference on a paged fragment.
 * @frag: the paged fragment
 *
 * Releases a reference on the paged fragment @frag.
 */
static inline void __skb_frag_unref(skb_frag_t *frag)
{
	put_page(skb_frag_page(frag));
}

/**
 * skb_frag_unref - release a reference on a paged fragment of an skb.
 * @skb: the buffer
 * @f: the fragment offset
 *
 * Releases a reference on the @f'th paged fragment of @skb.
 */
static inline void skb_frag_unref(struct sk_buff *skb, int f)
{
	__skb_frag_unref(&skb_shinfo(skb)->frags[f]);
}

/**
 * skb_frag_address - gets the address of the data contained in a paged fragment
 * @frag: the paged fragment buffer
 *
 * Returns the address of the data within @frag. The page must already
 * be mapped.
 */
static inline void *skb_frag_address(const skb_frag_t *frag)
{
3023
	return page_address(skb_frag_page(frag)) + skb_frag_off(frag);
3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038
}

/**
 * skb_frag_address_safe - gets the address of the data contained in a paged fragment
 * @frag: the paged fragment buffer
 *
 * Returns the address of the data within @frag. Checks that the page
 * is mapped and returns %NULL otherwise.
 */
static inline void *skb_frag_address_safe(const skb_frag_t *frag)
{
	void *ptr = page_address(skb_frag_page(frag));
	if (unlikely(!ptr))
		return NULL;

3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050
	return ptr + skb_frag_off(frag);
}

/**
 * skb_frag_page_copy() - sets the page in a fragment from another fragment
 * @fragto: skb fragment where page is set
 * @fragfrom: skb fragment page is copied from
 */
static inline void skb_frag_page_copy(skb_frag_t *fragto,
				      const skb_frag_t *fragfrom)
{
	fragto->bv_page = fragfrom->bv_page;
3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061
}

/**
 * __skb_frag_set_page - sets the page contained in a paged fragment
 * @frag: the paged fragment
 * @page: the page to set
 *
 * Sets the fragment @frag to contain @page.
 */
static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
{
3062
	frag->bv_page = page;
3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078
}

/**
 * skb_frag_set_page - sets the page contained in a paged fragment of an skb
 * @skb: the buffer
 * @f: the fragment offset
 * @page: the page to set
 *
 * Sets the @f'th fragment of @skb to contain @page.
 */
static inline void skb_frag_set_page(struct sk_buff *skb, int f,
				     struct page *page)
{
	__skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
}

E
Eric Dumazet 已提交
3079 3080
bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);

3081 3082
/**
 * skb_frag_dma_map - maps a paged fragment via the DMA API
3083
 * @dev: the device to map the fragment to
3084 3085 3086 3087
 * @frag: the paged fragment to map
 * @offset: the offset within the fragment (starting at the
 *          fragment's own offset)
 * @size: the number of bytes to map
3088
 * @dir: the direction of the mapping (``PCI_DMA_*``)
3089 3090 3091 3092 3093 3094 3095 3096 3097
 *
 * Maps the page associated with @frag to @device.
 */
static inline dma_addr_t skb_frag_dma_map(struct device *dev,
					  const skb_frag_t *frag,
					  size_t offset, size_t size,
					  enum dma_data_direction dir)
{
	return dma_map_page(dev, skb_frag_page(frag),
3098
			    skb_frag_off(frag) + offset, size, dir);
3099 3100
}

E
Eric Dumazet 已提交
3101 3102 3103 3104 3105 3106
static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
					gfp_t gfp_mask)
{
	return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
}

3107 3108 3109 3110 3111 3112 3113 3114

static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
						  gfp_t gfp_mask)
{
	return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
}


3115 3116 3117 3118 3119 3120 3121 3122
/**
 *	skb_clone_writable - is the header of a clone writable
 *	@skb: buffer to check
 *	@len: length up to which to write
 *
 *	Returns true if modifying the header part of the cloned buffer
 *	does not requires the data to be copied.
 */
3123
static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
3124 3125 3126 3127 3128
{
	return !skb_header_cloned(skb) &&
	       skb_headroom(skb) + len <= skb->hdr_len;
}

3129 3130 3131 3132 3133 3134 3135
static inline int skb_try_make_writable(struct sk_buff *skb,
					unsigned int write_len)
{
	return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
	       pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
}

H
Herbert Xu 已提交
3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149
static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
			    int cloned)
{
	int delta = 0;

	if (headroom > skb_headroom(skb))
		delta = headroom - skb_headroom(skb);

	if (delta || cloned)
		return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
					GFP_ATOMIC);
	return 0;
}

L
Linus Torvalds 已提交
3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163
/**
 *	skb_cow - copy header of skb when it is required
 *	@skb: buffer to cow
 *	@headroom: needed headroom
 *
 *	If the skb passed lacks sufficient headroom or its data part
 *	is shared, data is reallocated. If reallocation fails, an error
 *	is returned and original skb is not changed.
 *
 *	The result is skb with writable area skb->head...skb->tail
 *	and at least @headroom of space at head.
 */
static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
{
H
Herbert Xu 已提交
3164 3165
	return __skb_cow(skb, headroom, skb_cloned(skb));
}
L
Linus Torvalds 已提交
3166

H
Herbert Xu 已提交
3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179
/**
 *	skb_cow_head - skb_cow but only making the head writable
 *	@skb: buffer to cow
 *	@headroom: needed headroom
 *
 *	This function is identical to skb_cow except that we replace the
 *	skb_cloned check by skb_header_cloned.  It should be used when
 *	you only need to push on some header and do not need to modify
 *	the data.
 */
static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
{
	return __skb_cow(skb, headroom, skb_header_cloned(skb));
L
Linus Torvalds 已提交
3180 3181 3182 3183 3184 3185 3186 3187 3188
}

/**
 *	skb_padto	- pad an skbuff up to a minimal size
 *	@skb: buffer to pad
 *	@len: minimal length
 *
 *	Pads up a buffer to ensure the trailing bytes exist and are
 *	blanked. If the buffer already contains sufficient data it
3189 3190
 *	is untouched. Otherwise it is extended. Returns zero on
 *	success. The skb is freed on error.
L
Linus Torvalds 已提交
3191
 */
3192
static inline int skb_padto(struct sk_buff *skb, unsigned int len)
L
Linus Torvalds 已提交
3193 3194 3195
{
	unsigned int size = skb->len;
	if (likely(size >= len))
3196
		return 0;
G
Gerrit Renker 已提交
3197
	return skb_pad(skb, len - size);
L
Linus Torvalds 已提交
3198 3199
}

3200
/**
3201
 *	__skb_put_padto - increase size and pad an skbuff up to a minimal size
3202 3203
 *	@skb: buffer to pad
 *	@len: minimal length
3204
 *	@free_on_error: free buffer on error
3205 3206 3207 3208
 *
 *	Pads up a buffer to ensure the trailing bytes exist and are
 *	blanked. If the buffer already contains sufficient data it
 *	is untouched. Otherwise it is extended. Returns zero on
3209
 *	success. The skb is freed on error if @free_on_error is true.
3210
 */
3211 3212
static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len,
				  bool free_on_error)
3213 3214 3215 3216 3217
{
	unsigned int size = skb->len;

	if (unlikely(size < len)) {
		len -= size;
3218
		if (__skb_pad(skb, len, free_on_error))
3219 3220 3221 3222 3223 3224
			return -ENOMEM;
		__skb_put(skb, len);
	}
	return 0;
}

3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239
/**
 *	skb_put_padto - increase size and pad an skbuff up to a minimal size
 *	@skb: buffer to pad
 *	@len: minimal length
 *
 *	Pads up a buffer to ensure the trailing bytes exist and are
 *	blanked. If the buffer already contains sufficient data it
 *	is untouched. Otherwise it is extended. Returns zero on
 *	success. The skb is freed on error.
 */
static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
{
	return __skb_put_padto(skb, len, true);
}

L
Linus Torvalds 已提交
3240
static inline int skb_add_data(struct sk_buff *skb,
3241
			       struct iov_iter *from, int copy)
L
Linus Torvalds 已提交
3242 3243 3244 3245
{
	const int off = skb->len;

	if (skb->ip_summed == CHECKSUM_NONE) {
3246
		__wsum csum = 0;
3247 3248
		if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy,
					         &csum, from)) {
L
Linus Torvalds 已提交
3249 3250 3251
			skb->csum = csum_block_add(skb->csum, csum, off);
			return 0;
		}
3252
	} else if (copy_from_iter_full(skb_put(skb, copy), copy, from))
L
Linus Torvalds 已提交
3253 3254 3255 3256 3257 3258
		return 0;

	__skb_trim(skb, off);
	return -EFAULT;
}

3259 3260
static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
				    const struct page *page, int off)
L
Linus Torvalds 已提交
3261
{
W
Willem de Bruijn 已提交
3262 3263
	if (skb_zcopy(skb))
		return false;
L
Linus Torvalds 已提交
3264
	if (i) {
3265
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
L
Linus Torvalds 已提交
3266

3267
		return page == skb_frag_page(frag) &&
3268
		       off == skb_frag_off(frag) + skb_frag_size(frag);
L
Linus Torvalds 已提交
3269
	}
3270
	return false;
L
Linus Torvalds 已提交
3271 3272
}

H
Herbert Xu 已提交
3273 3274 3275 3276 3277
static inline int __skb_linearize(struct sk_buff *skb)
{
	return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
}

L
Linus Torvalds 已提交
3278 3279 3280 3281 3282 3283 3284
/**
 *	skb_linearize - convert paged skb to linear one
 *	@skb: buffer to linarize
 *
 *	If there is no free memory -ENOMEM is returned, otherwise zero
 *	is returned and the old skb data released.
 */
H
Herbert Xu 已提交
3285 3286 3287 3288 3289
static inline int skb_linearize(struct sk_buff *skb)
{
	return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
}

3290 3291 3292 3293 3294 3295 3296 3297 3298
/**
 * skb_has_shared_frag - can any frag be overwritten
 * @skb: buffer to test
 *
 * Return true if the skb has at least one frag that might be modified
 * by an external entity (as in vmsplice()/sendfile())
 */
static inline bool skb_has_shared_frag(const struct sk_buff *skb)
{
3299 3300
	return skb_is_nonlinear(skb) &&
	       skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
3301 3302
}

H
Herbert Xu 已提交
3303 3304 3305 3306 3307 3308 3309 3310
/**
 *	skb_linearize_cow - make sure skb is linear and writable
 *	@skb: buffer to process
 *
 *	If there is no free memory -ENOMEM is returned, otherwise zero
 *	is returned and the old skb data released.
 */
static inline int skb_linearize_cow(struct sk_buff *skb)
L
Linus Torvalds 已提交
3311
{
H
Herbert Xu 已提交
3312 3313
	return skb_is_nonlinear(skb) || skb_cloned(skb) ?
	       __skb_linearize(skb) : 0;
L
Linus Torvalds 已提交
3314 3315
}

3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327
static __always_inline void
__skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
		     unsigned int off)
{
	if (skb->ip_summed == CHECKSUM_COMPLETE)
		skb->csum = csum_block_sub(skb->csum,
					   csum_partial(start, len, 0), off);
	else if (skb->ip_summed == CHECKSUM_PARTIAL &&
		 skb_checksum_start_offset(skb) < 0)
		skb->ip_summed = CHECKSUM_NONE;
}

L
Linus Torvalds 已提交
3328 3329 3330 3331 3332 3333 3334
/**
 *	skb_postpull_rcsum - update checksum for received skb after pull
 *	@skb: buffer to update
 *	@start: start of data before pull
 *	@len: length of data pulled
 *
 *	After doing a pull on a received packet, you need to call this to
3335 3336
 *	update the CHECKSUM_COMPLETE checksum, or set ip_summed to
 *	CHECKSUM_NONE so that it can be recomputed from scratch.
L
Linus Torvalds 已提交
3337 3338
 */
static inline void skb_postpull_rcsum(struct sk_buff *skb,
3339
				      const void *start, unsigned int len)
L
Linus Torvalds 已提交
3340
{
3341
	__skb_postpull_rcsum(skb, start, len, 0);
L
Linus Torvalds 已提交
3342 3343
}

3344 3345 3346 3347 3348 3349 3350 3351
static __always_inline void
__skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
		     unsigned int off)
{
	if (skb->ip_summed == CHECKSUM_COMPLETE)
		skb->csum = csum_block_add(skb->csum,
					   csum_partial(start, len, 0), off);
}
3352

3353 3354 3355 3356 3357 3358 3359 3360 3361
/**
 *	skb_postpush_rcsum - update checksum for received skb after push
 *	@skb: buffer to update
 *	@start: start of data after push
 *	@len: length of data pushed
 *
 *	After doing a push on a received packet, you need to call this to
 *	update the CHECKSUM_COMPLETE checksum.
 */
3362 3363 3364
static inline void skb_postpush_rcsum(struct sk_buff *skb,
				      const void *start, unsigned int len)
{
3365
	__skb_postpush_rcsum(skb, start, len, 0);
3366 3367
}

3368
void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
3369

3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380
/**
 *	skb_push_rcsum - push skb and update receive checksum
 *	@skb: buffer to update
 *	@len: length of data pulled
 *
 *	This function performs an skb_push on the packet and updates
 *	the CHECKSUM_COMPLETE checksum.  It should be used on
 *	receive path processing instead of skb_push unless you know
 *	that the checksum difference is zero (e.g., a valid IP header)
 *	or you are setting ip_summed to CHECKSUM_NONE.
 */
3381
static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len)
3382 3383 3384 3385 3386 3387
{
	skb_push(skb, len);
	skb_postpush_rcsum(skb, skb->data, len);
	return skb->data;
}

3388
int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
3389 3390 3391 3392 3393 3394 3395
/**
 *	pskb_trim_rcsum - trim received skb and update checksum
 *	@skb: buffer to trim
 *	@len: new length
 *
 *	This is exactly the same as pskb_trim except that it ensures the
 *	checksum of received packets are still valid after the operation.
3396
 *	It can change skb pointers.
3397 3398 3399 3400 3401 3402
 */

static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
{
	if (likely(len >= skb->len))
		return 0;
3403
	return pskb_trim_rcsum_slow(skb, len);
3404 3405
}

3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420
static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
{
	if (skb->ip_summed == CHECKSUM_COMPLETE)
		skb->ip_summed = CHECKSUM_NONE;
	__skb_trim(skb, len);
	return 0;
}

static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
{
	if (skb->ip_summed == CHECKSUM_COMPLETE)
		skb->ip_summed = CHECKSUM_NONE;
	return __skb_grow(skb, len);
}

3421 3422 3423 3424 3425 3426
#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
#define skb_rb_first(root) rb_to_skb(rb_first(root))
#define skb_rb_last(root)  rb_to_skb(rb_last(root))
#define skb_rb_next(skb)   rb_to_skb(rb_next(&(skb)->rbnode))
#define skb_rb_prev(skb)   rb_to_skb(rb_prev(&(skb)->rbnode))

L
Linus Torvalds 已提交
3427 3428
#define skb_queue_walk(queue, skb) \
		for (skb = (queue)->next;					\
3429
		     skb != (struct sk_buff *)(queue);				\
L
Linus Torvalds 已提交
3430 3431
		     skb = skb->next)

3432 3433 3434 3435 3436
#define skb_queue_walk_safe(queue, skb, tmp)					\
		for (skb = (queue)->next, tmp = skb->next;			\
		     skb != (struct sk_buff *)(queue);				\
		     skb = tmp, tmp = skb->next)

3437
#define skb_queue_walk_from(queue, skb)						\
3438
		for (; skb != (struct sk_buff *)(queue);			\
3439 3440
		     skb = skb->next)

3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452
#define skb_rbtree_walk(skb, root)						\
		for (skb = skb_rb_first(root); skb != NULL;			\
		     skb = skb_rb_next(skb))

#define skb_rbtree_walk_from(skb)						\
		for (; skb != NULL;						\
		     skb = skb_rb_next(skb))

#define skb_rbtree_walk_from_safe(skb, tmp)					\
		for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL);	\
		     skb = tmp)

3453 3454 3455 3456 3457
#define skb_queue_walk_from_safe(queue, skb, tmp)				\
		for (tmp = skb->next;						\
		     skb != (struct sk_buff *)(queue);				\
		     skb = tmp, tmp = skb->next)

3458 3459
#define skb_queue_reverse_walk(queue, skb) \
		for (skb = (queue)->prev;					\
3460
		     skb != (struct sk_buff *)(queue);				\
3461 3462
		     skb = skb->prev)

3463 3464 3465 3466 3467 3468 3469 3470 3471
#define skb_queue_reverse_walk_safe(queue, skb, tmp)				\
		for (skb = (queue)->prev, tmp = skb->prev;			\
		     skb != (struct sk_buff *)(queue);				\
		     skb = tmp, tmp = skb->prev)

#define skb_queue_reverse_walk_from_safe(queue, skb, tmp)			\
		for (tmp = skb->prev;						\
		     skb != (struct sk_buff *)(queue);				\
		     skb = tmp, tmp = skb->prev)
L
Linus Torvalds 已提交
3472

3473
static inline bool skb_has_frag_list(const struct sk_buff *skb)
3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485
{
	return skb_shinfo(skb)->frag_list != NULL;
}

static inline void skb_frag_list_init(struct sk_buff *skb)
{
	skb_shinfo(skb)->frag_list = NULL;
}

#define skb_walk_frags(skb, iter)	\
	for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)

3486

3487 3488
int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue,
				int *err, long *timeo_p,
3489
				const struct sk_buff *skb);
3490 3491 3492
struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
					  struct sk_buff_head *queue,
					  unsigned int flags,
3493
					  int *off, int *err,
3494
					  struct sk_buff **last);
3495 3496
struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
					struct sk_buff_head *queue,
3497
					unsigned int flags, int *off, int *err,
3498
					struct sk_buff **last);
3499 3500
struct sk_buff *__skb_recv_datagram(struct sock *sk,
				    struct sk_buff_head *sk_queue,
3501
				    unsigned int flags, int *off, int *err);
3502 3503
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
				  int *err);
3504 3505
__poll_t datagram_poll(struct file *file, struct socket *sock,
			   struct poll_table_struct *wait);
A
Al Viro 已提交
3506 3507
int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
			   struct iov_iter *to, int size);
3508 3509 3510
static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
					struct msghdr *msg, int size)
{
3511
	return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
3512
}
3513 3514
int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
				   struct msghdr *msg);
3515 3516 3517
int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
			   struct iov_iter *to, int len,
			   struct ahash_request *hash);
3518 3519 3520
int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
				 struct iov_iter *from, int len);
int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
3521
void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
3522 3523 3524 3525 3526 3527
void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
static inline void skb_free_datagram_locked(struct sock *sk,
					    struct sk_buff *skb)
{
	__skb_free_datagram_locked(sk, skb, 0);
}
3528 3529 3530 3531 3532
int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
			      int len, __wsum csum);
3533
int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
3534
		    struct pipe_inode_info *pipe, unsigned int len,
A
Al Viro 已提交
3535
		    unsigned int flags);
3536 3537
int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
			 int len);
3538
void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
3539
unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
3540 3541
int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
		 int len, int hlen);
3542 3543 3544
void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
void skb_scrub_packet(struct sk_buff *skb, bool xnet);
3545
bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
3546
bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
3547
struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
3548 3549
struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features,
				 unsigned int offset);
3550
struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
3551
int skb_ensure_writable(struct sk_buff *skb, int write_len);
3552
int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
3553 3554
int skb_vlan_pop(struct sk_buff *skb);
int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
3555
int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
3556
		  int mac_len, bool ethernet);
3557 3558
int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
		 bool ethernet);
3559
int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse);
3560
int skb_mpls_dec_ttl(struct sk_buff *skb);
3561 3562
struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
			     gfp_t gfp);
3563

A
Al Viro 已提交
3564 3565
static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
{
3566
	return copy_from_iter_full(data, len, &msg->msg_iter) ? 0 : -EFAULT;
A
Al Viro 已提交
3567 3568
}

A
Al Viro 已提交
3569 3570
static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
{
3571
	return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
A
Al Viro 已提交
3572 3573
}

3574 3575 3576 3577 3578
struct skb_checksum_ops {
	__wsum (*update)(const void *mem, int len, __wsum wsum);
	__wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
};

3579 3580
extern const struct skb_checksum_ops *crc32c_csum_stub __read_mostly;

3581 3582 3583 3584 3585
__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
		      __wsum csum, const struct skb_checksum_ops *ops);
__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
		    __wsum csum);

3586 3587 3588
static inline void * __must_check
__skb_header_pointer(const struct sk_buff *skb, int offset,
		     int len, void *data, int hlen, void *buffer)
L
Linus Torvalds 已提交
3589
{
3590
	if (hlen - offset >= len)
3591
		return data + offset;
L
Linus Torvalds 已提交
3592

3593 3594
	if (!skb ||
	    skb_copy_bits(skb, offset, buffer, len) < 0)
L
Linus Torvalds 已提交
3595 3596 3597 3598 3599
		return NULL;

	return buffer;
}

3600 3601
static inline void * __must_check
skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
3602 3603 3604 3605 3606
{
	return __skb_header_pointer(skb, offset, len, skb->data,
				    skb_headlen(skb), buffer);
}

3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624
/**
 *	skb_needs_linearize - check if we need to linearize a given skb
 *			      depending on the given device features.
 *	@skb: socket buffer to check
 *	@features: net device features
 *
 *	Returns true if either:
 *	1. skb has frag_list and the device doesn't support FRAGLIST, or
 *	2. skb is fragmented and the device does not support SG.
 */
static inline bool skb_needs_linearize(struct sk_buff *skb,
				       netdev_features_t features)
{
	return skb_is_nonlinear(skb) &&
	       ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
		(skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
}

3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638
static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
					     void *to,
					     const unsigned int len)
{
	memcpy(to, skb->data, len);
}

static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
						    const int offset, void *to,
						    const unsigned int len)
{
	memcpy(to, skb->data + offset, len);
}

3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653
static inline void skb_copy_to_linear_data(struct sk_buff *skb,
					   const void *from,
					   const unsigned int len)
{
	memcpy(skb->data, from, len);
}

static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
						  const int offset,
						  const void *from,
						  const unsigned int len)
{
	memcpy(skb->data + offset, from, len);
}

3654
void skb_init(void);
L
Linus Torvalds 已提交
3655

3656 3657 3658 3659 3660
static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
{
	return skb->tstamp;
}

3661 3662 3663
/**
 *	skb_get_timestamp - get timestamp from a skb
 *	@skb: skb to get stamp from
3664
 *	@stamp: pointer to struct __kernel_old_timeval to store stamp in
3665 3666 3667 3668 3669
 *
 *	Timestamps are stored in the skb as offsets to a base timestamp.
 *	This function converts the offset back to a struct timeval and stores
 *	it in stamp.
 */
3670
static inline void skb_get_timestamp(const struct sk_buff *skb,
3671
				     struct __kernel_old_timeval *stamp)
3672
{
3673
	*stamp = ns_to_kernel_old_timeval(skb->tstamp);
3674 3675
}

3676 3677 3678 3679 3680 3681 3682 3683 3684
static inline void skb_get_new_timestamp(const struct sk_buff *skb,
					 struct __kernel_sock_timeval *stamp)
{
	struct timespec64 ts = ktime_to_timespec64(skb->tstamp);

	stamp->tv_sec = ts.tv_sec;
	stamp->tv_usec = ts.tv_nsec / 1000;
}

3685
static inline void skb_get_timestampns(const struct sk_buff *skb,
3686
				       struct __kernel_old_timespec *stamp)
3687
{
3688 3689 3690 3691
	struct timespec64 ts = ktime_to_timespec64(skb->tstamp);

	stamp->tv_sec = ts.tv_sec;
	stamp->tv_nsec = ts.tv_nsec;
3692 3693
}

3694 3695 3696 3697 3698 3699 3700 3701 3702
static inline void skb_get_new_timestampns(const struct sk_buff *skb,
					   struct __kernel_timespec *stamp)
{
	struct timespec64 ts = ktime_to_timespec64(skb->tstamp);

	stamp->tv_sec = ts.tv_sec;
	stamp->tv_nsec = ts.tv_nsec;
}

3703
static inline void __net_timestamp(struct sk_buff *skb)
3704
{
3705
	skb->tstamp = ktime_get_real();
3706 3707
}

3708 3709 3710 3711 3712
static inline ktime_t net_timedelta(ktime_t t)
{
	return ktime_sub(ktime_get_real(), t);
}

3713 3714
static inline ktime_t net_invalid_timestamp(void)
{
T
Thomas Gleixner 已提交
3715
	return 0;
3716
}
3717

3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741
static inline u8 skb_metadata_len(const struct sk_buff *skb)
{
	return skb_shinfo(skb)->meta_len;
}

static inline void *skb_metadata_end(const struct sk_buff *skb)
{
	return skb_mac_header(skb);
}

static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
					  const struct sk_buff *skb_b,
					  u8 meta_len)
{
	const void *a = skb_metadata_end(skb_a);
	const void *b = skb_metadata_end(skb_b);
	/* Using more efficient varaiant than plain call to memcmp(). */
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
	u64 diffs = 0;

	switch (meta_len) {
#define __it(x, op) (x -= sizeof(u##op))
#define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op))
	case 32: diffs |= __it_diff(a, b, 64);
3742
		 /* fall through */
3743
	case 24: diffs |= __it_diff(a, b, 64);
3744
		 /* fall through */
3745
	case 16: diffs |= __it_diff(a, b, 64);
3746
		 /* fall through */
3747 3748 3749
	case  8: diffs |= __it_diff(a, b, 64);
		break;
	case 28: diffs |= __it_diff(a, b, 64);
3750
		 /* fall through */
3751
	case 20: diffs |= __it_diff(a, b, 64);
3752
		 /* fall through */
3753
	case 12: diffs |= __it_diff(a, b, 64);
3754
		 /* fall through */
3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786
	case  4: diffs |= __it_diff(a, b, 32);
		break;
	}
	return diffs;
#else
	return memcmp(a - meta_len, b - meta_len, meta_len);
#endif
}

static inline bool skb_metadata_differs(const struct sk_buff *skb_a,
					const struct sk_buff *skb_b)
{
	u8 len_a = skb_metadata_len(skb_a);
	u8 len_b = skb_metadata_len(skb_b);

	if (!(len_a | len_b))
		return false;

	return len_a != len_b ?
	       true : __skb_metadata_differs(skb_a, skb_b, len_a);
}

static inline void skb_metadata_set(struct sk_buff *skb, u8 meta_len)
{
	skb_shinfo(skb)->meta_len = meta_len;
}

static inline void skb_metadata_clear(struct sk_buff *skb)
{
	skb_metadata_set(skb, 0);
}

3787 3788
struct sk_buff *skb_clone_sk(struct sk_buff *skb);

3789 3790
#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING

3791 3792
void skb_clone_tx_timestamp(struct sk_buff *skb);
bool skb_defer_rx_timestamp(struct sk_buff *skb);
3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809

#else /* CONFIG_NETWORK_PHY_TIMESTAMPING */

static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
{
}

static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
{
	return false;
}

#endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */

/**
 * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps
 *
3810 3811
 * PHY drivers may accept clones of transmitted packets for
 * timestamping via their phy_driver.txtstamp method. These drivers
3812 3813
 * must call this function to return the skb back to the stack with a
 * timestamp.
3814
 *
3815
 * @skb: clone of the the original outgoing packet
3816
 * @hwtstamps: hardware time stamps
3817 3818 3819 3820 3821
 *
 */
void skb_complete_tx_timestamp(struct sk_buff *skb,
			       struct skb_shared_hwtstamps *hwtstamps);

3822 3823 3824 3825
void __skb_tstamp_tx(struct sk_buff *orig_skb,
		     struct skb_shared_hwtstamps *hwtstamps,
		     struct sock *sk, int tstype);

3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836
/**
 * skb_tstamp_tx - queue clone of skb with send time stamps
 * @orig_skb:	the original outgoing packet
 * @hwtstamps:	hardware time stamps, may be NULL if not available
 *
 * If the skb has a socket associated, then this function clones the
 * skb (thus sharing the actual data and optional structures), stores
 * the optional hardware time stamping information (if non NULL) or
 * generates a software time stamp (otherwise), then queues the clone
 * to the error queue of the socket.  Errors are silently ignored.
 */
3837 3838
void skb_tstamp_tx(struct sk_buff *orig_skb,
		   struct skb_shared_hwtstamps *hwtstamps);
3839

3840 3841 3842 3843
/**
 * skb_tx_timestamp() - Driver hook for transmit timestamping
 *
 * Ethernet MAC Drivers should call this function in their hard_xmit()
3844
 * function immediately before giving the sk_buff to the MAC hardware.
3845
 *
3846 3847 3848 3849
 * Specifically, one should make absolutely sure that this function is
 * called before TX completion of this packet can trigger.  Otherwise
 * the packet could potentially already be freed.
 *
3850 3851 3852 3853
 * @skb: A socket buffer.
 */
static inline void skb_tx_timestamp(struct sk_buff *skb)
{
3854
	skb_clone_tx_timestamp(skb);
3855 3856
	if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP)
		skb_tstamp_tx(skb, NULL);
3857 3858
}

3859 3860 3861 3862 3863 3864 3865 3866 3867
/**
 * skb_complete_wifi_ack - deliver skb with wifi status
 *
 * @skb: the original outgoing packet
 * @acked: ack status
 *
 */
void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);

3868 3869
__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
__sum16 __skb_checksum_complete(struct sk_buff *skb);
3870

3871 3872
static inline int skb_csum_unnecessary(const struct sk_buff *skb)
{
3873 3874 3875 3876
	return ((skb->ip_summed == CHECKSUM_UNNECESSARY) ||
		skb->csum_valid ||
		(skb->ip_summed == CHECKSUM_PARTIAL &&
		 skb_checksum_start_offset(skb) >= 0));
3877 3878
}

3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894
/**
 *	skb_checksum_complete - Calculate checksum of an entire packet
 *	@skb: packet to process
 *
 *	This function calculates the checksum over the entire packet plus
 *	the value of skb->csum.  The latter can be used to supply the
 *	checksum of a pseudo header as used by TCP/UDP.  It returns the
 *	checksum.
 *
 *	For protocols that contain complete checksums such as ICMP/TCP/UDP,
 *	this function can be used to verify that checksum on received
 *	packets.  In that case the function should return zero if the
 *	checksum is correct.  In particular, this function will return zero
 *	if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
 *	hardware has already verified the correctness of the checksum.
 */
3895
static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
3896
{
3897 3898
	return skb_csum_unnecessary(skb) ?
	       0 : __skb_checksum_complete(skb);
3899 3900
}

3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921
static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
{
	if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
		if (skb->csum_level == 0)
			skb->ip_summed = CHECKSUM_NONE;
		else
			skb->csum_level--;
	}
}

static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
{
	if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
		if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
			skb->csum_level++;
	} else if (skb->ip_summed == CHECKSUM_NONE) {
		skb->ip_summed = CHECKSUM_UNNECESSARY;
		skb->csum_level = 0;
	}
}

3922 3923 3924 3925 3926 3927 3928 3929 3930
/* Check if we need to perform checksum complete validation.
 *
 * Returns true if checksum complete is needed, false otherwise
 * (either checksum is unnecessary or zero checksum is allowed).
 */
static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
						  bool zero_okay,
						  __sum16 check)
{
3931 3932
	if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
		skb->csum_valid = 1;
3933
		__skb_decr_checksum_unnecessary(skb);
3934 3935 3936 3937 3938 3939
		return false;
	}

	return true;
}

3940
/* For small packets <= CHECKSUM_BREAK perform checksum complete directly
3941 3942 3943 3944
 * in checksum_init.
 */
#define CHECKSUM_BREAK 76

3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956
/* Unset checksum-complete
 *
 * Unset checksum complete can be done when packet is being modified
 * (uncompressed for instance) and checksum-complete value is
 * invalidated.
 */
static inline void skb_checksum_complete_unset(struct sk_buff *skb)
{
	if (skb->ip_summed == CHECKSUM_COMPLETE)
		skb->ip_summed = CHECKSUM_NONE;
}

3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971
/* Validate (init) checksum based on checksum complete.
 *
 * Return values:
 *   0: checksum is validated or try to in skb_checksum_complete. In the latter
 *	case the ip_summed will not be CHECKSUM_UNNECESSARY and the pseudo
 *	checksum is stored in skb->csum for use in __skb_checksum_complete
 *   non-zero: value of invalid checksum
 *
 */
static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
						       bool complete,
						       __wsum psum)
{
	if (skb->ip_summed == CHECKSUM_COMPLETE) {
		if (!csum_fold(csum_add(psum, skb->csum))) {
3972
			skb->csum_valid = 1;
3973 3974 3975 3976 3977 3978
			return 0;
		}
	}

	skb->csum = psum;

3979 3980 3981 3982 3983 3984 3985
	if (complete || skb->len <= CHECKSUM_BREAK) {
		__sum16 csum;

		csum = __skb_checksum_complete(skb);
		skb->csum_valid = !csum;
		return csum;
	}
3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008

	return 0;
}

static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
{
	return 0;
}

/* Perform checksum validate (init). Note that this is a macro since we only
 * want to calculate the pseudo header which is an input function if necessary.
 * First we try to validate without any computation (checksum unnecessary) and
 * then calculate based on checksum complete calling the function to compute
 * pseudo header.
 *
 * Return values:
 *   0: checksum is validated or try to in skb_checksum_complete
 *   non-zero: value of invalid checksum
 */
#define __skb_checksum_validate(skb, proto, complete,			\
				zero_okay, check, compute_pseudo)	\
({									\
	__sum16 __ret = 0;						\
4009
	skb->csum_valid = 0;						\
4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026
	if (__skb_checksum_validate_needed(skb, zero_okay, check))	\
		__ret = __skb_checksum_validate_complete(skb,		\
				complete, compute_pseudo(skb, proto));	\
	__ret;								\
})

#define skb_checksum_init(skb, proto, compute_pseudo)			\
	__skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)

#define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo)	\
	__skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)

#define skb_checksum_validate(skb, proto, compute_pseudo)		\
	__skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)

#define skb_checksum_validate_zero_check(skb, proto, check,		\
					 compute_pseudo)		\
4027
	__skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
4028 4029 4030 4031

#define skb_checksum_simple_validate(skb)				\
	__skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)

4032 4033
static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
{
4034
	return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid);
4035 4036
}

4037
static inline void __skb_checksum_convert(struct sk_buff *skb, __wsum pseudo)
4038 4039 4040 4041 4042
{
	skb->csum = ~pseudo;
	skb->ip_summed = CHECKSUM_COMPLETE;
}

4043
#define skb_checksum_try_convert(skb, proto, compute_pseudo)	\
4044 4045
do {									\
	if (__skb_checksum_convert_check(skb))				\
4046
		__skb_checksum_convert(skb, compute_pseudo(skb, proto)); \
4047 4048
} while (0)

4049 4050 4051 4052 4053 4054 4055 4056
static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
					      u16 start, u16 offset)
{
	skb->ip_summed = CHECKSUM_PARTIAL;
	skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
	skb->csum_offset = offset - start;
}

4057 4058 4059 4060 4061 4062
/* Update skbuf and packet to reflect the remote checksum offload operation.
 * When called, ptr indicates the starting point for skb->csum when
 * ip_summed is CHECKSUM_COMPLETE. If we need create checksum complete
 * here, skb_postpull_rcsum is done so skb->csum start is ptr.
 */
static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
4063
				       int start, int offset, bool nopartial)
4064 4065 4066
{
	__wsum delta;

4067 4068 4069 4070 4071
	if (!nopartial) {
		skb_remcsum_adjust_partial(skb, ptr, start, offset);
		return;
	}

4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082
	 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
		__skb_checksum_complete(skb);
		skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
	}

	delta = remcsum_adjust(ptr, skb->csum, start, offset);

	/* Adjust skb->csum since we changed the packet */
	skb->csum = csum_add(skb->csum, delta);
}

4083 4084 4085
static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb)
{
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
4086
	return (void *)(skb->_nfct & NFCT_PTRMASK);
4087 4088 4089 4090 4091
#else
	return NULL;
#endif
}

4092
static inline unsigned long skb_get_nfct(const struct sk_buff *skb)
L
Linus Torvalds 已提交
4093
{
4094 4095 4096 4097 4098
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
	return skb->_nfct;
#else
	return 0UL;
#endif
L
Linus Torvalds 已提交
4099
}
4100 4101

static inline void skb_set_nfct(struct sk_buff *skb, unsigned long nfct)
L
Linus Torvalds 已提交
4102
{
4103 4104
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
	skb->_nfct = nfct;
4105
#endif
4106
}
4107 4108 4109 4110 4111

#ifdef CONFIG_SKB_EXTENSIONS
enum skb_ext_id {
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
	SKB_EXT_BRIDGE_NF,
4112 4113 4114
#endif
#ifdef CONFIG_XFRM
	SKB_EXT_SEC_PATH,
4115 4116 4117
#endif
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
	TC_SKB_EXT,
4118 4119 4120
#endif
#if IS_ENABLED(CONFIG_MPTCP)
	SKB_EXT_MPTCP,
4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138
#endif
	SKB_EXT_NUM, /* must be last */
};

/**
 *	struct skb_ext - sk_buff extensions
 *	@refcnt: 1 on allocation, deallocated on 0
 *	@offset: offset to add to @data to obtain extension address
 *	@chunks: size currently allocated, stored in SKB_EXT_ALIGN_SHIFT units
 *	@data: start of extension data, variable sized
 *
 *	Note: offsets/lengths are stored in chunks of 8 bytes, this allows
 *	to use 'u8' types while allowing up to 2kb worth of extension data.
 */
struct skb_ext {
	refcount_t refcnt;
	u8 offset[SKB_EXT_NUM]; /* in chunks of 8 bytes */
	u8 chunks;		/* same */
4139
	char data[] __aligned(8);
4140 4141
};

4142
struct skb_ext *__skb_ext_alloc(gfp_t flags);
4143 4144
void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id,
		    struct skb_ext *ext);
4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199
void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id);
void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id);
void __skb_ext_put(struct skb_ext *ext);

static inline void skb_ext_put(struct sk_buff *skb)
{
	if (skb->active_extensions)
		__skb_ext_put(skb->extensions);
}

static inline void __skb_ext_copy(struct sk_buff *dst,
				  const struct sk_buff *src)
{
	dst->active_extensions = src->active_extensions;

	if (src->active_extensions) {
		struct skb_ext *ext = src->extensions;

		refcount_inc(&ext->refcnt);
		dst->extensions = ext;
	}
}

static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *src)
{
	skb_ext_put(dst);
	__skb_ext_copy(dst, src);
}

static inline bool __skb_ext_exist(const struct skb_ext *ext, enum skb_ext_id i)
{
	return !!ext->offset[i];
}

static inline bool skb_ext_exist(const struct sk_buff *skb, enum skb_ext_id id)
{
	return skb->active_extensions & (1 << id);
}

static inline void skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
{
	if (skb_ext_exist(skb, id))
		__skb_ext_del(skb, id);
}

static inline void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id)
{
	if (skb_ext_exist(skb, id)) {
		struct skb_ext *ext = skb->extensions;

		return (void *)ext + (ext->offset[id] << 3);
	}

	return NULL;
}
4200 4201 4202 4203 4204 4205 4206 4207

static inline void skb_ext_reset(struct sk_buff *skb)
{
	if (unlikely(skb->active_extensions)) {
		__skb_ext_put(skb->extensions);
		skb->active_extensions = 0;
	}
}
4208 4209 4210 4211 4212

static inline bool skb_has_extensions(struct sk_buff *skb)
{
	return unlikely(skb->active_extensions);
}
4213 4214
#else
static inline void skb_ext_put(struct sk_buff *skb) {}
4215
static inline void skb_ext_reset(struct sk_buff *skb) {}
4216 4217 4218
static inline void skb_ext_del(struct sk_buff *skb, int unused) {}
static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {}
static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {}
4219
static inline bool skb_has_extensions(struct sk_buff *skb) { return false; }
4220 4221
#endif /* CONFIG_SKB_EXTENSIONS */

4222
static inline void nf_reset_ct(struct sk_buff *skb)
4223
{
4224
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4225 4226
	nf_conntrack_put(skb_nfct(skb));
	skb->_nfct = 0;
4227
#endif
4228 4229
}

4230 4231
static inline void nf_reset_trace(struct sk_buff *skb)
{
4232
#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
G
Gao feng 已提交
4233 4234
	skb->nf_trace = 0;
#endif
4235 4236
}

4237 4238 4239 4240 4241 4242 4243
static inline void ipvs_reset(struct sk_buff *skb)
{
#if IS_ENABLED(CONFIG_IP_VS)
	skb->ipvs_property = 0;
#endif
}

4244
/* Note: This doesn't put any conntrack info in dst. */
4245 4246
static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
			     bool copy)
4247
{
4248
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4249 4250
	dst->_nfct = src->_nfct;
	nf_conntrack_get(skb_nfct(src));
4251
#endif
4252
#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
4253 4254
	if (copy)
		dst->nf_trace = src->nf_trace;
4255
#endif
4256 4257
}

4258 4259 4260
static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
{
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4261
	nf_conntrack_put(skb_nfct(dst));
4262
#endif
4263
	__nf_copy(dst, src, true);
4264 4265
}

4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283
#ifdef CONFIG_NETWORK_SECMARK
static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
{
	to->secmark = from->secmark;
}

static inline void skb_init_secmark(struct sk_buff *skb)
{
	skb->secmark = 0;
}
#else
static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
{ }

static inline void skb_init_secmark(struct sk_buff *skb)
{ }
#endif

4284 4285 4286
static inline int secpath_exists(const struct sk_buff *skb)
{
#ifdef CONFIG_XFRM
4287
	return skb_ext_exist(skb, SKB_EXT_SEC_PATH);
4288 4289 4290 4291 4292
#else
	return 0;
#endif
}

4293 4294 4295
static inline bool skb_irq_freeable(const struct sk_buff *skb)
{
	return !skb->destructor &&
4296
		!secpath_exists(skb) &&
4297
		!skb_nfct(skb) &&
4298 4299 4300 4301
		!skb->_skb_refdst &&
		!skb_has_frag_list(skb);
}

4302 4303 4304 4305 4306
static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
{
	skb->queue_mapping = queue_mapping;
}

4307
static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
4308 4309 4310 4311
{
	return skb->queue_mapping;
}

4312 4313 4314 4315 4316
static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
{
	to->queue_mapping = from->queue_mapping;
}

4317 4318 4319 4320 4321
static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
{
	skb->queue_mapping = rx_queue + 1;
}

4322
static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
4323 4324 4325 4326
{
	return skb->queue_mapping - 1;
}

4327
static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
4328
{
E
Eric Dumazet 已提交
4329
	return skb->queue_mapping != 0;
4330 4331
}

4332 4333 4334 4335 4336 4337 4338 4339 4340 4341
static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val)
{
	skb->dst_pending_confirm = val;
}

static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb)
{
	return skb->dst_pending_confirm != 0;
}

4342
static inline struct sec_path *skb_sec_path(const struct sk_buff *skb)
4343
{
4344
#ifdef CONFIG_XFRM
4345
	return skb_ext_find(skb, SKB_EXT_SEC_PATH);
4346 4347 4348
#else
	return NULL;
#endif
4349
}
4350

4351 4352 4353
/* Keeps track of mac header offset relative to skb->head.
 * It is useful for TSO of Tunneling protocol. e.g. GRE.
 * For non-tunnel skb it points to skb_mac_header() and for
4354 4355 4356
 * tunnel skb it points to outer mac header.
 * Keeps track of level of encapsulation of network headers.
 */
4357
struct skb_gso_cb {
4358 4359 4360 4361
	union {
		int	mac_offset;
		int	data_offset;
	};
4362
	int	encap_level;
4363
	__wsum	csum;
4364
	__u16	csum_start;
4365
};
C
Cambda Zhu 已提交
4366 4367
#define SKB_GSO_CB_OFFSET	32
#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_GSO_CB_OFFSET))
4368 4369 4370 4371 4372 4373 4374

static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
{
	return (skb_mac_header(inner_skb) - inner_skb->head) -
		SKB_GSO_CB(inner_skb)->mac_offset;
}

4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389
static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
{
	int new_headroom, headroom;
	int ret;

	headroom = skb_headroom(skb);
	ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
	if (ret)
		return ret;

	new_headroom = skb_headroom(skb);
	SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
	return 0;
}

4390 4391 4392 4393 4394 4395 4396 4397 4398 4399
static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
{
	/* Do not update partial checksums if remote checksum is enabled. */
	if (skb->remcsum_offload)
		return;

	SKB_GSO_CB(skb)->csum = res;
	SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
}

4400 4401 4402 4403 4404 4405 4406 4407 4408 4409
/* Compute the checksum for a gso segment. First compute the checksum value
 * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and
 * then add in skb->csum (checksum from csum_start to end of packet).
 * skb->csum and csum_start are then updated to reflect the checksum of the
 * resultant packet starting from the transport header-- the resultant checksum
 * is in the res argument (i.e. normally zero or ~ of checksum of a pseudo
 * header.
 */
static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
{
4410 4411 4412
	unsigned char *csum_start = skb_transport_header(skb);
	int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
	__wsum partial = SKB_GSO_CB(skb)->csum;
4413

4414 4415
	SKB_GSO_CB(skb)->csum = res;
	SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
4416

4417
	return csum_fold(csum_partial(csum_start, plen, partial));
4418 4419
}

4420
static inline bool skb_is_gso(const struct sk_buff *skb)
H
Herbert Xu 已提交
4421 4422 4423 4424
{
	return skb_shinfo(skb)->gso_size;
}

4425
/* Note: Should be called only if skb_is_gso(skb) is true */
4426
static inline bool skb_is_gso_v6(const struct sk_buff *skb)
B
Brice Goglin 已提交
4427 4428 4429 4430
{
	return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
}

4431 4432 4433 4434 4435 4436
/* Note: Should be called only if skb_is_gso(skb) is true */
static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
{
	return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
}

4437
/* Note: Should be called only if skb_is_gso(skb) is true */
4438 4439
static inline bool skb_is_gso_tcp(const struct sk_buff *skb)
{
4440
	return skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6);
4441 4442
}

4443 4444 4445 4446 4447 4448 4449
static inline void skb_gso_reset(struct sk_buff *skb)
{
	skb_shinfo(skb)->gso_size = 0;
	skb_shinfo(skb)->gso_segs = 0;
	skb_shinfo(skb)->gso_type = 0;
}

4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465
static inline void skb_increase_gso_size(struct skb_shared_info *shinfo,
					 u16 increment)
{
	if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
		return;
	shinfo->gso_size += increment;
}

static inline void skb_decrease_gso_size(struct skb_shared_info *shinfo,
					 u16 decrement)
{
	if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
		return;
	shinfo->gso_size -= decrement;
}

4466
void __skb_warn_lro_forwarding(const struct sk_buff *skb);
4467 4468 4469 4470 4471

static inline bool skb_warn_if_lro(const struct sk_buff *skb)
{
	/* LRO sets gso_size but not gso_type, whereas if GSO is really
	 * wanted then gso_type will be set. */
4472 4473
	const struct skb_shared_info *shinfo = skb_shinfo(skb);

4474 4475
	if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
	    unlikely(shinfo->gso_type == 0)) {
4476 4477 4478 4479 4480 4481
		__skb_warn_lro_forwarding(skb);
		return true;
	}
	return false;
}

4482 4483 4484 4485 4486 4487 4488
static inline void skb_forward_csum(struct sk_buff *skb)
{
	/* Unfortunately we don't support this one.  Any brave souls? */
	if (skb->ip_summed == CHECKSUM_COMPLETE)
		skb->ip_summed = CHECKSUM_NONE;
}

4489 4490 4491 4492 4493 4494 4495 4496
/**
 * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE
 * @skb: skb to check
 *
 * fresh skbs have their ip_summed set to CHECKSUM_NONE.
 * Instead of forcing ip_summed to CHECKSUM_NONE, we can
 * use this helper, to document places where we make this assertion.
 */
4497
static inline void skb_checksum_none_assert(const struct sk_buff *skb)
4498 4499 4500 4501 4502 4503
{
#ifdef DEBUG
	BUG_ON(skb->ip_summed != CHECKSUM_NONE);
#endif
}

4504
bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
4505

P
Paul Durrant 已提交
4506
int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
4507 4508 4509
struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
				     unsigned int transport_len,
				     __sum16(*skb_chkf)(struct sk_buff *skb));
P
Paul Durrant 已提交
4510

4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523
/**
 * skb_head_is_locked - Determine if the skb->head is locked down
 * @skb: skb to check
 *
 * The head on skbs build around a head frag can be removed if they are
 * not cloned.  This function returns true if the skb head is locked down
 * due to either being allocated via kmalloc, or by being a clone with
 * multiple references to the head.
 */
static inline bool skb_head_is_locked(const struct sk_buff *skb)
{
	return !skb->head_frag || skb_cloned(skb);
}
4524

4525 4526 4527
/* Local Checksum Offload.
 * Compute outer checksum based on the assumption that the
 * inner checksum will be offloaded later.
4528
 * See Documentation/networking/checksum-offloads.rst for
4529
 * explanation of how this works.
4530 4531 4532 4533 4534 4535
 * Fill in outer checksum adjustment (e.g. with sum of outer
 * pseudo-header) before calling.
 * Also ensure that inner checksum is in linear data area.
 */
static inline __wsum lco_csum(struct sk_buff *skb)
{
4536 4537 4538
	unsigned char *csum_start = skb_checksum_start(skb);
	unsigned char *l4_hdr = skb_transport_header(skb);
	__wsum partial;
4539 4540

	/* Start with complement of inner checksum adjustment */
4541 4542 4543
	partial = ~csum_unfold(*(__force __sum16 *)(csum_start +
						    skb->csum_offset));

4544
	/* Add in checksum of our headers (incl. outer checksum
4545
	 * adjustment filled in by caller) and return result.
4546
	 */
4547
	return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
4548 4549
}

4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575
static inline bool skb_is_redirected(const struct sk_buff *skb)
{
#ifdef CONFIG_NET_REDIRECT
	return skb->redirected;
#else
	return false;
#endif
}

static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress)
{
#ifdef CONFIG_NET_REDIRECT
	skb->redirected = 1;
	skb->from_ingress = from_ingress;
	if (skb->from_ingress)
		skb->tstamp = 0;
#endif
}

static inline void skb_reset_redirect(struct sk_buff *skb)
{
#ifdef CONFIG_NET_REDIRECT
	skb->redirected = 0;
#endif
}

L
Linus Torvalds 已提交
4576 4577
#endif	/* __KERNEL__ */
#endif	/* _LINUX_SKBUFF_H */