skbuff.h 127.2 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0-or-later */
L
Linus Torvalds 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15
/*
 *	Definitions for the 'struct sk_buff' memory handlers.
 *
 *	Authors:
 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
 *		Florian La Roche, <rzsfl@rz.uni-sb.de>
 */

#ifndef _LINUX_SKBUFF_H
#define _LINUX_SKBUFF_H

#include <linux/kernel.h>
#include <linux/compiler.h>
#include <linux/time.h>
16
#include <linux/bug.h>
17
#include <linux/bvec.h>
L
Linus Torvalds 已提交
18
#include <linux/cache.h>
E
Eric Dumazet 已提交
19
#include <linux/rbtree.h>
20
#include <linux/socket.h>
21
#include <linux/refcount.h>
L
Linus Torvalds 已提交
22

A
Arun Sharma 已提交
23
#include <linux/atomic.h>
L
Linus Torvalds 已提交
24 25 26
#include <asm/types.h>
#include <linux/spinlock.h>
#include <linux/net.h>
27
#include <linux/textsearch.h>
L
Linus Torvalds 已提交
28
#include <net/checksum.h>
29
#include <linux/rcupdate.h>
30
#include <linux/hrtimer.h>
31
#include <linux/dma-mapping.h>
32
#include <linux/netdev_features.h>
33
#include <linux/sched.h>
34
#include <linux/sched/clock.h>
35
#include <net/flow_dissector.h>
36
#include <linux/splice.h>
37
#include <linux/in6.h>
38
#include <linux/if_packet.h>
39
#include <net/flow.h>
40 41 42
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <linux/netfilter/nf_conntrack_common.h>
#endif
L
Linus Torvalds 已提交
43

44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
/* The interface for checksum offload between the stack and networking drivers
 * is as follows...
 *
 * A. IP checksum related features
 *
 * Drivers advertise checksum offload capabilities in the features of a device.
 * From the stack's point of view these are capabilities offered by the driver,
 * a driver typically only advertises features that it is capable of offloading
 * to its device.
 *
 * The checksum related features are:
 *
 *	NETIF_F_HW_CSUM	- The driver (or its device) is able to compute one
 *			  IP (one's complement) checksum for any combination
 *			  of protocols or protocol layering. The checksum is
 *			  computed and set in a packet per the CHECKSUM_PARTIAL
 *			  interface (see below).
 *
 *	NETIF_F_IP_CSUM - Driver (device) is only able to checksum plain
 *			  TCP or UDP packets over IPv4. These are specifically
 *			  unencapsulated packets of the form IPv4|TCP or
 *			  IPv4|UDP where the Protocol field in the IPv4 header
 *			  is TCP or UDP. The IPv4 header may contain IP options
 *			  This feature cannot be set in features for a device
 *			  with NETIF_F_HW_CSUM also set. This feature is being
 *			  DEPRECATED (see below).
 *
 *	NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain
 *			  TCP or UDP packets over IPv6. These are specifically
 *			  unencapsulated packets of the form IPv6|TCP or
 *			  IPv4|UDP where the Next Header field in the IPv6
 *			  header is either TCP or UDP. IPv6 extension headers
 *			  are not supported with this feature. This feature
 *			  cannot be set in features for a device with
 *			  NETIF_F_HW_CSUM also set. This feature is being
 *			  DEPRECATED (see below).
 *
 *	NETIF_F_RXCSUM - Driver (device) performs receive checksum offload.
 *			 This flag is used only used to disable the RX checksum
 *			 feature for a device. The stack will accept receive
 *			 checksum indication in packets received on a device
 *			 regardless of whether NETIF_F_RXCSUM is set.
 *
 * B. Checksumming of received packets by device. Indication of checksum
 *    verification is in set skb->ip_summed. Possible values are:
89 90 91
 *
 * CHECKSUM_NONE:
 *
92
 *   Device did not checksum this packet e.g. due to lack of capabilities.
93 94 95 96 97 98 99
 *   The packet contains full (though not verified) checksum in packet but
 *   not in skb->csum. Thus, skb->csum is undefined in this case.
 *
 * CHECKSUM_UNNECESSARY:
 *
 *   The hardware you're dealing with doesn't calculate the full checksum
 *   (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums
100 101
 *   for specific protocols. For such packets it will set CHECKSUM_UNNECESSARY
 *   if their checksums are okay. skb->csum is still undefined in this case
102 103
 *   though. A driver or device must never modify the checksum field in the
 *   packet even if checksum is verified.
104 105 106 107 108 109 110 111
 *
 *   CHECKSUM_UNNECESSARY is applicable to following protocols:
 *     TCP: IPv6 and IPv4.
 *     UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a
 *       zero UDP checksum for either IPv4 or IPv6, the networking stack
 *       may perform further validation in this case.
 *     GRE: only if the checksum is present in the header.
 *     SCTP: indicates the CRC in SCTP header has been validated.
112
 *     FCOE: indicates the CRC in FC frame has been validated.
113 114 115 116 117 118 119 120 121 122
 *
 *   skb->csum_level indicates the number of consecutive checksums found in
 *   the packet minus one that have been verified as CHECKSUM_UNNECESSARY.
 *   For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet
 *   and a device is able to verify the checksums for UDP (possibly zero),
 *   GRE (checksum flag is set), and TCP-- skb->csum_level would be set to
 *   two. If the device were only able to verify the UDP checksum and not
 *   GRE, either because it doesn't support GRE checksum of because GRE
 *   checksum is bad, skb->csum_level would be set to zero (TCP checksum is
 *   not considered in this case).
123 124 125 126 127 128 129
 *
 * CHECKSUM_COMPLETE:
 *
 *   This is the most generic way. The device supplied checksum of the _whole_
 *   packet as seen by netif_rx() and fills out in skb->csum. Meaning, the
 *   hardware doesn't need to parse L3/L4 headers to implement this.
 *
130 131 132 133
 *   Notes:
 *   - Even if device supports only some protocols, but is able to produce
 *     skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY.
 *   - CHECKSUM_COMPLETE is not applicable to SCTP and FCoE protocols.
134 135 136
 *
 * CHECKSUM_PARTIAL:
 *
137 138
 *   A checksum is set up to be offloaded to a device as described in the
 *   output description for CHECKSUM_PARTIAL. This may occur on a packet
139
 *   received directly from another Linux OS, e.g., a virtualized Linux kernel
140 141 142 143 144 145
 *   on the same host, or it may be set in the input path in GRO or remote
 *   checksum offload. For the purposes of checksum verification, the checksum
 *   referred to by skb->csum_start + skb->csum_offset and any preceding
 *   checksums in the packet are considered verified. Any checksums in the
 *   packet that are after the checksum being offloaded are not considered to
 *   be verified.
146
 *
147 148
 * C. Checksumming on transmit for non-GSO. The stack requests checksum offload
 *    in the skb->ip_summed for a packet. Values are:
149 150 151
 *
 * CHECKSUM_PARTIAL:
 *
152
 *   The driver is required to checksum the packet as seen by hard_start_xmit()
153
 *   from skb->csum_start up to the end, and to record/write the checksum at
154 155 156 157 158 159 160 161 162 163 164 165 166 167
 *   offset skb->csum_start + skb->csum_offset. A driver may verify that the
 *   csum_start and csum_offset values are valid values given the length and
 *   offset of the packet, however they should not attempt to validate that the
 *   checksum refers to a legitimate transport layer checksum-- it is the
 *   purview of the stack to validate that csum_start and csum_offset are set
 *   correctly.
 *
 *   When the stack requests checksum offload for a packet, the driver MUST
 *   ensure that the checksum is set correctly. A driver can either offload the
 *   checksum calculation to the device, or call skb_checksum_help (in the case
 *   that the device does not support offload for a particular checksum).
 *
 *   NETIF_F_IP_CSUM and NETIF_F_IPV6_CSUM are being deprecated in favor of
 *   NETIF_F_HW_CSUM. New devices should use NETIF_F_HW_CSUM to indicate
168 169 170 171 172
 *   checksum offload capability.
 *   skb_csum_hwoffload_help() can be called to resolve CHECKSUM_PARTIAL based
 *   on network device checksumming capabilities: if a packet does not match
 *   them, skb_checksum_help or skb_crc32c_help (depending on the value of
 *   csum_not_inet, see item D.) is called to resolve the checksum.
173
 *
174
 * CHECKSUM_NONE:
175
 *
176 177
 *   The skb was already checksummed by the protocol, or a checksum is not
 *   required.
178 179 180
 *
 * CHECKSUM_UNNECESSARY:
 *
181 182
 *   This has the same meaning on as CHECKSUM_NONE for checksum offload on
 *   output.
183
 *
184 185 186 187 188 189 190 191
 * CHECKSUM_COMPLETE:
 *   Not used in checksum output. If a driver observes a packet with this value
 *   set in skbuff, if should treat as CHECKSUM_NONE being set.
 *
 * D. Non-IP checksum (CRC) offloads
 *
 *   NETIF_F_SCTP_CRC - This feature indicates that a device is capable of
 *     offloading the SCTP CRC in a packet. To perform this offload the stack
192 193 194 195 196 197 198
 *     will set set csum_start and csum_offset accordingly, set ip_summed to
 *     CHECKSUM_PARTIAL and set csum_not_inet to 1, to provide an indication in
 *     the skbuff that the CHECKSUM_PARTIAL refers to CRC32c.
 *     A driver that supports both IP checksum offload and SCTP CRC32c offload
 *     must verify which offload is configured for a packet by testing the
 *     value of skb->csum_not_inet; skb_crc32c_csum_help is provided to resolve
 *     CHECKSUM_PARTIAL on skbs where csum_not_inet is set to 1.
199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
 *
 *   NETIF_F_FCOE_CRC - This feature indicates that a device is capable of
 *     offloading the FCOE CRC in a packet. To perform this offload the stack
 *     will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset
 *     accordingly. Note the there is no indication in the skbuff that the
 *     CHECKSUM_PARTIAL refers to an FCOE checksum, a driver that supports
 *     both IP checksum offload and FCOE CRC offload must verify which offload
 *     is configured for a packet presumably by inspecting packet headers.
 *
 * E. Checksumming on output with GSO.
 *
 * In the case of a GSO packet (skb_is_gso(skb) is true), checksum offload
 * is implied by the SKB_GSO_* flags in gso_type. Most obviously, if the
 * gso_type is SKB_GSO_TCPV4 or SKB_GSO_TCPV6, TCP checksum offload as
 * part of the GSO operation is implied. If a checksum is being offloaded
 * with GSO then ip_summed is CHECKSUM_PARTIAL, csum_start and csum_offset
 * are set to refer to the outermost checksum being offload (two offloaded
 * checksums are possible with UDP encapsulation).
217 218
 */

219
/* Don't change this without changing skb_csum_unnecessary! */
220 221 222 223
#define CHECKSUM_NONE		0
#define CHECKSUM_UNNECESSARY	1
#define CHECKSUM_COMPLETE	2
#define CHECKSUM_PARTIAL	3
L
Linus Torvalds 已提交
224

225 226 227
/* Maximum value in skb->csum_level */
#define SKB_MAX_CSUM_LEVEL	3

228
#define SKB_DATA_ALIGN(X)	ALIGN(X, SMP_CACHE_BYTES)
229
#define SKB_WITH_OVERHEAD(X)	\
230
	((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
231 232
#define SKB_MAX_ORDER(X, ORDER) \
	SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
L
Linus Torvalds 已提交
233 234 235
#define SKB_MAX_HEAD(X)		(SKB_MAX_ORDER((X), 0))
#define SKB_MAX_ALLOC		(SKB_MAX_ORDER(0, 2))

E
Eric Dumazet 已提交
236 237 238 239 240
/* return minimum truesize of one skb containing X bytes of data */
#define SKB_TRUESIZE(X) ((X) +						\
			 SKB_DATA_ALIGN(sizeof(struct sk_buff)) +	\
			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))

L
Linus Torvalds 已提交
241
struct net_device;
242
struct scatterlist;
J
Jens Axboe 已提交
243
struct pipe_inode_info;
H
Herbert Xu 已提交
244
struct iov_iter;
245
struct napi_struct;
246 247
struct bpf_prog;
union bpf_attr;
248
struct skb_ext;
L
Linus Torvalds 已提交
249

250
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
L
Linus Torvalds 已提交
251
struct nf_bridge_info {
252 253 254 255
	enum {
		BRNF_PROTO_UNCHANGED,
		BRNF_PROTO_8021Q,
		BRNF_PROTO_PPPOE
256
	} orig_proto:8;
257 258 259
	u8			pkt_otherhost:1;
	u8			in_prerouting:1;
	u8			bridged_dnat:1;
260
	__u16			frag_max_size;
261
	struct net_device	*physindev;
262 263 264

	/* always valid & non-NULL from FORWARD on, for physdev match */
	struct net_device	*physoutdev;
265
	union {
266
		/* prerouting: detect dnat in orig/reply direction */
267 268
		__be32          ipv4_daddr;
		struct in6_addr ipv6_daddr;
269 270 271 272 273 274

		/* after prerouting + nat detected: store original source
		 * mac since neigh resolution overwrites it, only used while
		 * skb is out in neigh layer.
		 */
		char neigh_header[8];
275
	};
L
Linus Torvalds 已提交
276 277 278
};
#endif

279 280 281 282 283 284 285 286 287 288
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
/* Chain in tc_skb_ext will be used to share the tc chain with
 * ovs recirc_id. It will be set to the current chain by tc
 * and read by ovs to recirc_id.
 */
struct tc_skb_ext {
	__u32 chain;
};
#endif

L
Linus Torvalds 已提交
289 290 291 292 293 294 295 296 297 298 299
struct sk_buff_head {
	/* These two members must be first. */
	struct sk_buff	*next;
	struct sk_buff	*prev;

	__u32		qlen;
	spinlock_t	lock;
};

struct sk_buff;

300 301 302 303 304 305
/* To allow 64K frame to be packed as single skb without frag_list we
 * require 64K/PAGE_SIZE pages plus 1 additional page to allow for
 * buffers which do not start on a page boundary.
 *
 * Since GRO uses frags we allocate at least 16 regardless of page
 * size.
306
 */
307
#if (65536/PAGE_SIZE + 1) < 16
308
#define MAX_SKB_FRAGS 16UL
309
#else
310
#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
311
#endif
H
Hans Westgaard Ry 已提交
312
extern int sysctl_max_skb_frags;
L
Linus Torvalds 已提交
313

314 315 316 317 318
/* Set skb_shinfo(skb)->gso_size to this in case you want skb_segment to
 * segment using its current segmentation instead.
 */
#define GSO_BY_FRAGS	0xFFFF

319
typedef struct bio_vec skb_frag_t;
L
Linus Torvalds 已提交
320

321
/**
322
 * skb_frag_size() - Returns the size of a skb fragment
323 324
 * @frag: skb fragment
 */
E
Eric Dumazet 已提交
325 326
static inline unsigned int skb_frag_size(const skb_frag_t *frag)
{
327
	return frag->bv_len;
E
Eric Dumazet 已提交
328 329
}

330
/**
331
 * skb_frag_size_set() - Sets the size of a skb fragment
332 333 334
 * @frag: skb fragment
 * @size: size of fragment
 */
E
Eric Dumazet 已提交
335 336
static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
{
337
	frag->bv_len = size;
E
Eric Dumazet 已提交
338 339
}

340
/**
341
 * skb_frag_size_add() - Increments the size of a skb fragment by @delta
342 343 344
 * @frag: skb fragment
 * @delta: value to add
 */
E
Eric Dumazet 已提交
345 346
static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
{
347
	frag->bv_len += delta;
E
Eric Dumazet 已提交
348 349
}

350
/**
351
 * skb_frag_size_sub() - Decrements the size of a skb fragment by @delta
352 353 354
 * @frag: skb fragment
 * @delta: value to subtract
 */
E
Eric Dumazet 已提交
355 356
static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
{
357
	frag->bv_len -= delta;
E
Eric Dumazet 已提交
358 359
}

360 361 362 363
/**
 * skb_frag_must_loop - Test if %p is a high memory page
 * @p: fragment's page
 */
364 365 366 367 368 369 370 371 372 373 374 375 376
static inline bool skb_frag_must_loop(struct page *p)
{
#if defined(CONFIG_HIGHMEM)
	if (PageHighMem(p))
		return true;
#endif
	return false;
}

/**
 *	skb_frag_foreach_page - loop over pages in a fragment
 *
 *	@f:		skb frag to operate on
377
 *	@f_off:		offset from start of f->bv_page
378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
 *	@f_len:		length from f_off to loop over
 *	@p:		(temp var) current page
 *	@p_off:		(temp var) offset from start of current page,
 *	                           non-zero only on first page.
 *	@p_len:		(temp var) length in current page,
 *				   < PAGE_SIZE only on first and last page.
 *	@copied:	(temp var) length so far, excluding current p_len.
 *
 *	A fragment can hold a compound page, in which case per-page
 *	operations, notably kmap_atomic, must be called for each
 *	regular page.
 */
#define skb_frag_foreach_page(f, f_off, f_len, p, p_off, p_len, copied)	\
	for (p = skb_frag_page(f) + ((f_off) >> PAGE_SHIFT),		\
	     p_off = (f_off) & (PAGE_SIZE - 1),				\
	     p_len = skb_frag_must_loop(p) ?				\
	     min_t(u32, f_len, PAGE_SIZE - p_off) : f_len,		\
	     copied = 0;						\
	     copied < f_len;						\
	     copied += p_len, p++, p_off = 0,				\
	     p_len = min_t(u32, f_len - copied, PAGE_SIZE))		\

400 401 402
#define HAVE_HW_TIME_STAMP

/**
403
 * struct skb_shared_hwtstamps - hardware time stamps
404 405 406 407
 * @hwtstamp:	hardware time stamp transformed into duration
 *		since arbitrary point in time
 *
 * Software time stamps generated by ktime_get_real() are stored in
408
 * skb->tstamp.
409 410 411 412 413 414 415 416 417 418 419
 *
 * hwtstamps can only be compared against other hwtstamps from
 * the same device.
 *
 * This structure is attached to packets as part of the
 * &skb_shared_info. Use skb_hwtstamps() to get a pointer.
 */
struct skb_shared_hwtstamps {
	ktime_t	hwtstamp;
};

420 421 422 423 424
/* Definitions for tx_flags in struct skb_shared_info */
enum {
	/* generate hardware time stamp */
	SKBTX_HW_TSTAMP = 1 << 0,

425
	/* generate software time stamp when queueing packet to NIC */
426 427 428 429 430
	SKBTX_SW_TSTAMP = 1 << 1,

	/* device driver is going to provide hardware time stamp */
	SKBTX_IN_PROGRESS = 1 << 2,

431
	/* device driver supports TX zero-copy buffers */
E
Eric Dumazet 已提交
432
	SKBTX_DEV_ZEROCOPY = 1 << 3,
433 434

	/* generate wifi status information (where possible) */
E
Eric Dumazet 已提交
435
	SKBTX_WIFI_STATUS = 1 << 4,
436 437 438 439 440 441 442

	/* This indicates at least one fragment might be overwritten
	 * (as in vmsplice(), sendfile() ...)
	 * If we need to compute a TX checksum, we'll need to copy
	 * all frags to avoid possible bad checksum
	 */
	SKBTX_SHARED_FRAG = 1 << 5,
443 444 445

	/* generate software time stamp when entering packet scheduling */
	SKBTX_SCHED_TSTAMP = 1 << 6,
446 447
};

W
Willem de Bruijn 已提交
448
#define SKBTX_ZEROCOPY_FRAG	(SKBTX_DEV_ZEROCOPY | SKBTX_SHARED_FRAG)
449
#define SKBTX_ANY_SW_TSTAMP	(SKBTX_SW_TSTAMP    | \
450
				 SKBTX_SCHED_TSTAMP)
451 452
#define SKBTX_ANY_TSTAMP	(SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)

453 454 455
/*
 * The callback notifies userspace to release buffers when skb DMA is done in
 * lower device, the skb last reference should be 0 when calling this.
456 457
 * The zerocopy_success argument is true if zero copy transmit occurred,
 * false on data copy or out of memory error caused by data copy attempt.
458 459
 * The ctx field is used to track device context.
 * The desc field is used to track userspace buffer index.
460 461
 */
struct ubuf_info {
462
	void (*callback)(struct ubuf_info *, bool zerocopy_success);
463 464 465 466 467 468 469 470 471 472 473 474
	union {
		struct {
			unsigned long desc;
			void *ctx;
		};
		struct {
			u32 id;
			u16 len;
			u16 zerocopy:1;
			u32 bytelen;
		};
	};
475
	refcount_t refcnt;
476 477 478 479 480

	struct mmpin {
		struct user_struct *user;
		unsigned int num_pg;
	} mmp;
481 482
};

W
Willem de Bruijn 已提交
483 484
#define skb_uarg(SKB)	((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))

485 486 487
int mm_account_pinned_pages(struct mmpin *mmp, size_t size);
void mm_unaccount_pinned_pages(struct mmpin *mmp);

W
Willem de Bruijn 已提交
488
struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size);
489 490
struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
					struct ubuf_info *uarg);
W
Willem de Bruijn 已提交
491 492 493

static inline void sock_zerocopy_get(struct ubuf_info *uarg)
{
494
	refcount_inc(&uarg->refcnt);
W
Willem de Bruijn 已提交
495 496 497
}

void sock_zerocopy_put(struct ubuf_info *uarg);
498
void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
W
Willem de Bruijn 已提交
499 500 501

void sock_zerocopy_callback(struct ubuf_info *uarg, bool success);

W
Willem de Bruijn 已提交
502
int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len);
W
Willem de Bruijn 已提交
503 504 505 506
int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
			     struct msghdr *msg, int len,
			     struct ubuf_info *uarg);

L
Linus Torvalds 已提交
507 508 509 510
/* This data is invariant across clones and lives at
 * the end of the header data, ie. at skb->end.
 */
struct skb_shared_info {
511 512 513
	__u8		__unused;
	__u8		meta_len;
	__u8		nr_frags;
514
	__u8		tx_flags;
515 516 517
	unsigned short	gso_size;
	/* Warning: this field is not always filled in (UFO)! */
	unsigned short	gso_segs;
L
Linus Torvalds 已提交
518
	struct sk_buff	*frag_list;
519
	struct skb_shared_hwtstamps hwtstamps;
520
	unsigned int	gso_type;
521
	u32		tskey;
E
Eric Dumazet 已提交
522 523 524 525 526 527

	/*
	 * Warning : all fields before dataref are cleared in __alloc_skb()
	 */
	atomic_t	dataref;

J
Johann Baudy 已提交
528 529 530
	/* Intermediate layers must ensure that destructor_arg
	 * remains valid until skb destructor */
	void *		destructor_arg;
531

532 533
	/* must be last field, see pskb_expand_head() */
	skb_frag_t	frags[MAX_SKB_FRAGS];
L
Linus Torvalds 已提交
534 535 536 537
};

/* We divide dataref into two halves.  The higher 16 bits hold references
 * to the payload part of skb->data.  The lower 16 bits hold references to
538 539
 * the entire skb->data.  A clone of a headerless skb holds the length of
 * the header in skb->hdr_len.
L
Linus Torvalds 已提交
540 541 542 543 544 545 546 547 548 549
 *
 * All users must obey the rule that the skb->data reference count must be
 * greater than or equal to the payload reference count.
 *
 * Holding a reference to the payload part means that the user does not
 * care about modifications to the header part of skb->data.
 */
#define SKB_DATAREF_SHIFT 16
#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)

550 551

enum {
552 553 554
	SKB_FCLONE_UNAVAILABLE,	/* skb has no fclone (from head_cache) */
	SKB_FCLONE_ORIG,	/* orig skb (from fclone_cache) */
	SKB_FCLONE_CLONE,	/* companion fclone skb (from fclone_cache) */
555 556
};

557 558
enum {
	SKB_GSO_TCPV4 = 1 << 0,
559 560

	/* This indicates the skb is from an untrusted source. */
561
	SKB_GSO_DODGY = 1 << 1,
M
Michael Chan 已提交
562 563

	/* This indicates the tcp segment has CWR set. */
564
	SKB_GSO_TCP_ECN = 1 << 2,
H
Herbert Xu 已提交
565

566
	SKB_GSO_TCP_FIXEDID = 1 << 3,
567

568
	SKB_GSO_TCPV6 = 1 << 4,
569

570
	SKB_GSO_FCOE = 1 << 5,
571

572
	SKB_GSO_GRE = 1 << 6,
S
Simon Horman 已提交
573

574
	SKB_GSO_GRE_CSUM = 1 << 7,
E
Eric Dumazet 已提交
575

576
	SKB_GSO_IPXIP4 = 1 << 8,
E
Eric Dumazet 已提交
577

578
	SKB_GSO_IPXIP6 = 1 << 9,
579

580
	SKB_GSO_UDP_TUNNEL = 1 << 10,
T
Tom Herbert 已提交
581

582
	SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
583

584
	SKB_GSO_PARTIAL = 1 << 12,
585

586
	SKB_GSO_TUNNEL_REMCSUM = 1 << 13,
M
Marcelo Ricardo Leitner 已提交
587

588
	SKB_GSO_SCTP = 1 << 14,
S
Steffen Klassert 已提交
589

590
	SKB_GSO_ESP = 1 << 15,
591 592

	SKB_GSO_UDP = 1 << 16,
W
Willem de Bruijn 已提交
593 594

	SKB_GSO_UDP_L4 = 1 << 17,
595 596
};

597 598 599 600 601 602 603 604 605 606
#if BITS_PER_LONG > 32
#define NET_SKBUFF_DATA_USES_OFFSET 1
#endif

#ifdef NET_SKBUFF_DATA_USES_OFFSET
typedef unsigned int sk_buff_data_t;
#else
typedef unsigned char *sk_buff_data_t;
#endif

607
/**
L
Linus Torvalds 已提交
608 609 610
 *	struct sk_buff - socket buffer
 *	@next: Next buffer in list
 *	@prev: Previous buffer in list
611
 *	@tstamp: Time we arrived/left
E
Eric Dumazet 已提交
612
 *	@rbnode: RB tree node, alternative to next/prev for netem/tcp
613
 *	@sk: Socket we are owned by
L
Linus Torvalds 已提交
614
 *	@dev: Device we arrived on/are leaving by
615
 *	@cb: Control buffer. Free for use by every layer. Put private vars here
E
Eric Dumazet 已提交
616
 *	@_skb_refdst: destination entry (with norefcount bit)
617
 *	@sp: the security path, used for xfrm
L
Linus Torvalds 已提交
618 619 620
 *	@len: Length of actual data
 *	@data_len: Data length
 *	@mac_len: Length of link layer header
621
 *	@hdr_len: writable header length of cloned skb
622 623 624
 *	@csum: Checksum (must include start/offset pair)
 *	@csum_start: Offset from skb->head where checksumming should start
 *	@csum_offset: Offset from csum_start where checksum should be stored
625
 *	@priority: Packet queueing priority
W
WANG Cong 已提交
626
 *	@ignore_df: allow local fragmentation
L
Linus Torvalds 已提交
627
 *	@cloned: Head may be cloned (check refcnt to be sure)
628
 *	@ip_summed: Driver fed us an IP checksum
L
Linus Torvalds 已提交
629 630
 *	@nohdr: Payload reference only, must not modify header
 *	@pkt_type: Packet class
631 632
 *	@fclone: skbuff clone status
 *	@ipvs_property: skbuff is owned by ipvs
633 634
 *	@offload_fwd_mark: Packet was L2-forwarded in hardware
 *	@offload_l3_fwd_mark: Packet was L3-forwarded in hardware
635
 *	@tc_skip_classify: do not classify packet. set by IFB device
636
 *	@tc_at_ingress: used within tc_classify to distinguish in/egress
637 638
 *	@tc_redirected: packet was redirected by a tc action
 *	@tc_from_ingress: if tc_redirected, tc_at_ingress at time of redirect
639 640
 *	@peeked: this packet has been seen already, so stats have been
 *		done for it, don't do them again
641
 *	@nf_trace: netfilter packet trace flag
642 643
 *	@protocol: Packet protocol from driver
 *	@destructor: Destruct function
644
 *	@tcp_tsorted_anchor: list structure for TCP (tp->tsorted_sent_queue)
645
 *	@_nfct: Associated connection, if any (with nfctinfo bits)
L
Linus Torvalds 已提交
646
 *	@nf_bridge: Saved data about a bridged frame - see br_netfilter.c
647
 *	@skb_iif: ifindex of device we arrived on
L
Linus Torvalds 已提交
648
 *	@tc_index: Traffic control index
649
 *	@hash: the packet hash
650
 *	@queue_mapping: Queue mapping for multiqueue devices
651
 *	@pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
652
 *	@active_extensions: active extensions (skb_ext_id types)
653
 *	@ndisc_nodetype: router type (from link layer)
654
 *	@ooo_okay: allow the mapping of a socket to a queue to be changed
655
 *	@l4_hash: indicate hash is a canonical 4-tuple hash over transport
656
 *		ports.
657
 *	@sw_hash: indicates hash was computed in software stack
658 659
 *	@wifi_acked_valid: wifi_acked was set
 *	@wifi_acked: whether frame was acked on wifi or not
660
 *	@no_fcs:  Request NIC to treat last 4 bytes as Ethernet FCS
661
 *	@csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL
662
 *	@dst_pending_confirm: need to confirm neighbour
663
 *	@decrypted: Decrypted SKB
664
 *	@napi_id: id of the NAPI struct this skb came from
665
 *	@secmark: security marking
666
 *	@mark: Generic packet mark
667
 *	@vlan_proto: vlan encapsulation protocol
668
 *	@vlan_tci: vlan tag control information
S
Simon Horman 已提交
669
 *	@inner_protocol: Protocol (encapsulation)
670 671
 *	@inner_transport_header: Inner transport layer header (encapsulation)
 *	@inner_network_header: Network layer header (encapsulation)
672
 *	@inner_mac_header: Link layer header (encapsulation)
673 674 675 676 677 678 679 680 681
 *	@transport_header: Transport layer header
 *	@network_header: Network layer header
 *	@mac_header: Link layer header
 *	@tail: Tail pointer
 *	@end: End pointer
 *	@head: Head of buffer
 *	@data: Data head pointer
 *	@truesize: Buffer size
 *	@users: User count - see {datagram,tcp}.c
682
 *	@extensions: allocated extensions, valid if active_extensions is nonzero
L
Linus Torvalds 已提交
683 684 685
 */

struct sk_buff {
686
	union {
E
Eric Dumazet 已提交
687 688 689 690 691 692
		struct {
			/* These two members must be first. */
			struct sk_buff		*next;
			struct sk_buff		*prev;

			union {
E
Eric Dumazet 已提交
693 694 695 696 697 698
				struct net_device	*dev;
				/* Some protocols might use this space to store information,
				 * while device pointer would be NULL.
				 * UDP receive path is one user.
				 */
				unsigned long		dev_scratch;
E
Eric Dumazet 已提交
699 700
			};
		};
701
		struct rb_node		rbnode; /* used in netem, ip4 defrag, and tcp stack */
702
		struct list_head	list;
703
	};
704 705 706 707 708

	union {
		struct sock		*sk;
		int			ip_defrag_offset;
	};
L
Linus Torvalds 已提交
709

710
	union {
E
Eric Dumazet 已提交
711
		ktime_t		tstamp;
712
		u64		skb_mstamp_ns; /* earliest departure time */
713
	};
L
Linus Torvalds 已提交
714 715 716 717 718 719
	/*
	 * This is the control buffer. It is free to use for every
	 * layer. Please put your private variables there. If you
	 * want to keep them across layers you have to do a skb_clone()
	 * first. This is owned by whoever has the skb queued ATM.
	 */
720
	char			cb[48] __aligned(8);
L
Linus Torvalds 已提交
721

722 723 724 725 726 727 728 729
	union {
		struct {
			unsigned long	_skb_refdst;
			void		(*destructor)(struct sk_buff *skb);
		};
		struct list_head	tcp_tsorted_anchor;
	};

730
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
731
	unsigned long		 _nfct;
732
#endif
L
Linus Torvalds 已提交
733
	unsigned int		len,
734 735 736
				data_len;
	__u16			mac_len,
				hdr_len;
737 738 739 740 741

	/* Following fields are _not_ copied in __copy_skb_header()
	 * Note that queue_mapping is here mostly to fill a hole.
	 */
	__u16			queue_mapping;
742 743 744 745 746 747 748 749 750 751

/* if you move cloned around you also must adapt those constants */
#ifdef __BIG_ENDIAN_BITFIELD
#define CLONED_MASK	(1 << 7)
#else
#define CLONED_MASK	1
#endif
#define CLONED_OFFSET()		offsetof(struct sk_buff, __cloned_offset)

	__u8			__cloned_offset[0];
752
	__u8			cloned:1,
753
				nohdr:1,
754
				fclone:2,
755
				peeked:1,
756
				head_frag:1,
757
				pfmemalloc:1;
758 759 760
#ifdef CONFIG_SKB_EXTENSIONS
	__u8			active_extensions;
#endif
761 762 763
	/* fields enclosed in headers_start/headers_end are copied
	 * using a single memcpy() in __copy_skb_header()
	 */
764
	/* private: */
765
	__u32			headers_start[0];
766
	/* public: */
767

768 769 770 771 772
/* if you move pkt_type around you also must adapt those constants */
#ifdef __BIG_ENDIAN_BITFIELD
#define PKT_TYPE_MAX	(7 << 5)
#else
#define PKT_TYPE_MAX	7
L
Linus Torvalds 已提交
773
#endif
774
#define PKT_TYPE_OFFSET()	offsetof(struct sk_buff, __pkt_type_offset)
775

776
	__u8			__pkt_type_offset[0];
777 778 779 780
	__u8			pkt_type:3;
	__u8			ignore_df:1;
	__u8			nf_trace:1;
	__u8			ip_summed:2;
781
	__u8			ooo_okay:1;
782

783
	__u8			l4_hash:1;
784
	__u8			sw_hash:1;
785 786
	__u8			wifi_acked_valid:1;
	__u8			wifi_acked:1;
787
	__u8			no_fcs:1;
788
	/* Indicates the inner headers are valid in the skbuff. */
789
	__u8			encapsulation:1;
790
	__u8			encap_hdr_csum:1;
791
	__u8			csum_valid:1;
792

M
Michał Mirosław 已提交
793 794 795 796 797 798 799 800
#ifdef __BIG_ENDIAN_BITFIELD
#define PKT_VLAN_PRESENT_BIT	7
#else
#define PKT_VLAN_PRESENT_BIT	0
#endif
#define PKT_VLAN_PRESENT_OFFSET()	offsetof(struct sk_buff, __pkt_vlan_present_offset)
	__u8			__pkt_vlan_present_offset[0];
	__u8			vlan_present:1;
801
	__u8			csum_complete_sw:1;
802
	__u8			csum_level:2;
803
	__u8			csum_not_inet:1;
804
	__u8			dst_pending_confirm:1;
805 806 807
#ifdef CONFIG_IPV6_NDISC_NODETYPE
	__u8			ndisc_nodetype:2;
#endif
808

M
Michał Mirosław 已提交
809
	__u8			ipvs_property:1;
T
Tom Herbert 已提交
810
	__u8			inner_protocol_type:1;
811
	__u8			remcsum_offload:1;
812 813
#ifdef CONFIG_NET_SWITCHDEV
	__u8			offload_fwd_mark:1;
814
	__u8			offload_l3_fwd_mark:1;
815
#endif
816 817
#ifdef CONFIG_NET_CLS_ACT
	__u8			tc_skip_classify:1;
818
	__u8			tc_at_ingress:1;
819 820
	__u8			tc_redirected:1;
	__u8			tc_from_ingress:1;
821
#endif
822 823 824
#ifdef CONFIG_TLS_DEVICE
	__u8			decrypted:1;
#endif
825 826 827 828

#ifdef CONFIG_NET_SCHED
	__u16			tc_index;	/* traffic control index */
#endif
829

830 831 832 833 834 835 836 837 838 839 840 841
	union {
		__wsum		csum;
		struct {
			__u16	csum_start;
			__u16	csum_offset;
		};
	};
	__u32			priority;
	int			skb_iif;
	__u32			hash;
	__be16			vlan_proto;
	__u16			vlan_tci;
E
Eric Dumazet 已提交
842 843 844 845 846
#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
	union {
		unsigned int	napi_id;
		unsigned int	sender_cpu;
	};
847
#endif
848
#ifdef CONFIG_NETWORK_SECMARK
849
	__u32		secmark;
850 851
#endif

852 853
	union {
		__u32		mark;
E
Eric Dumazet 已提交
854
		__u32		reserved_tailroom;
855
	};
L
Linus Torvalds 已提交
856

T
Tom Herbert 已提交
857 858 859 860 861
	union {
		__be16		inner_protocol;
		__u8		inner_ipproto;
	};

862 863 864
	__u16			inner_transport_header;
	__u16			inner_network_header;
	__u16			inner_mac_header;
865 866

	__be16			protocol;
867 868 869
	__u16			transport_header;
	__u16			network_header;
	__u16			mac_header;
870

871
	/* private: */
872
	__u32			headers_end[0];
873
	/* public: */
874

L
Linus Torvalds 已提交
875
	/* These elements must be at the end, see alloc_skb() for details.  */
876
	sk_buff_data_t		tail;
877
	sk_buff_data_t		end;
L
Linus Torvalds 已提交
878
	unsigned char		*head,
879
				*data;
880
	unsigned int		truesize;
881
	refcount_t		users;
882 883 884 885 886

#ifdef CONFIG_SKB_EXTENSIONS
	/* only useable after checking ->active_extensions != 0 */
	struct skb_ext		*extensions;
#endif
L
Linus Torvalds 已提交
887 888 889 890 891 892 893
};

#ifdef __KERNEL__
/*
 *	Handling routines are only of interest to the kernel
 */

894 895
#define SKB_ALLOC_FCLONE	0x01
#define SKB_ALLOC_RX		0x02
896
#define SKB_ALLOC_NAPI		0x04
897

898 899 900 901
/**
 * skb_pfmemalloc - Test if the skb was allocated from PFMEMALLOC reserves
 * @skb: buffer
 */
902 903 904 905 906
static inline bool skb_pfmemalloc(const struct sk_buff *skb)
{
	return unlikely(skb->pfmemalloc);
}

E
Eric Dumazet 已提交
907 908 909 910 911 912 913 914 915 916 917 918 919
/*
 * skb might have a dst pointer attached, refcounted or not.
 * _skb_refdst low order bit is set if refcount was _not_ taken
 */
#define SKB_DST_NOREF	1UL
#define SKB_DST_PTRMASK	~(SKB_DST_NOREF)

/**
 * skb_dst - returns skb dst_entry
 * @skb: buffer
 *
 * Returns skb dst_entry, regardless of reference taken or not.
 */
E
Eric Dumazet 已提交
920 921
static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
{
922
	/* If refdst was not refcounted, check we still are in a
E
Eric Dumazet 已提交
923 924 925 926 927 928
	 * rcu_read_lock section
	 */
	WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
		!rcu_read_lock_held() &&
		!rcu_read_lock_bh_held());
	return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
E
Eric Dumazet 已提交
929 930
}

E
Eric Dumazet 已提交
931 932 933 934 935 936 937 938
/**
 * skb_dst_set - sets skb dst
 * @skb: buffer
 * @dst: dst entry
 *
 * Sets skb dst, assuming a reference was taken on dst and should
 * be released by skb_dst_drop()
 */
E
Eric Dumazet 已提交
939 940
static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
{
E
Eric Dumazet 已提交
941 942 943
	skb->_skb_refdst = (unsigned long)dst;
}

944 945 946 947 948 949 950 951 952 953 954 955
/**
 * skb_dst_set_noref - sets skb dst, hopefully, without taking reference
 * @skb: buffer
 * @dst: dst entry
 *
 * Sets skb dst, assuming a reference was not taken on dst.
 * If dst entry is cached, we do not take reference and dst_release
 * will be avoided by refdst_drop. If dst entry is not cached, we take
 * reference, so that last dst_release can destroy the dst immediately.
 */
static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
{
956 957
	WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
	skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
958
}
E
Eric Dumazet 已提交
959 960

/**
L
Lucas De Marchi 已提交
961
 * skb_dst_is_noref - Test if skb dst isn't refcounted
E
Eric Dumazet 已提交
962 963 964 965 966
 * @skb: buffer
 */
static inline bool skb_dst_is_noref(const struct sk_buff *skb)
{
	return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
E
Eric Dumazet 已提交
967 968
}

969 970 971 972
/**
 * skb_rtable - Returns the skb &rtable
 * @skb: buffer
 */
E
Eric Dumazet 已提交
973 974
static inline struct rtable *skb_rtable(const struct sk_buff *skb)
{
E
Eric Dumazet 已提交
975
	return (struct rtable *)skb_dst(skb);
E
Eric Dumazet 已提交
976 977
}

978 979 980 981 982 983 984 985 986
/* For mangling skb->pkt_type from user space side from applications
 * such as nft, tc, etc, we only allow a conservative subset of
 * possible pkt_types to be set.
*/
static inline bool skb_pkt_type_ok(u32 ptype)
{
	return ptype <= PACKET_OTHERHOST;
}

987 988 989 990
/**
 * skb_napi_id - Returns the skb's NAPI id
 * @skb: buffer
 */
991 992 993 994 995 996 997 998 999
static inline unsigned int skb_napi_id(const struct sk_buff *skb)
{
#ifdef CONFIG_NET_RX_BUSY_POLL
	return skb->napi_id;
#else
	return 0;
#endif
}

1000 1001 1002 1003 1004 1005
/**
 * skb_unref - decrement the skb's reference count
 * @skb: buffer
 *
 * Returns true if we can free the skb.
 */
1006 1007 1008 1009
static inline bool skb_unref(struct sk_buff *skb)
{
	if (unlikely(!skb))
		return false;
1010
	if (likely(refcount_read(&skb->users) == 1))
1011
		smp_rmb();
1012
	else if (likely(!refcount_dec_and_test(&skb->users)))
1013 1014 1015 1016 1017
		return false;

	return true;
}

P
Paolo Abeni 已提交
1018
void skb_release_head_state(struct sk_buff *skb);
1019 1020
void kfree_skb(struct sk_buff *skb);
void kfree_skb_list(struct sk_buff *segs);
1021
void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt);
1022 1023
void skb_tx_error(struct sk_buff *skb);
void consume_skb(struct sk_buff *skb);
1024
void __consume_stateless_skb(struct sk_buff *skb);
1025
void  __kfree_skb(struct sk_buff *skb);
1026
extern struct kmem_cache *skbuff_head_cache;
E
Eric Dumazet 已提交
1027

1028 1029 1030
void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
		      bool *fragstolen, int *delta_truesize);
E
Eric Dumazet 已提交
1031

1032 1033
struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
			    int node);
E
Eric Dumazet 已提交
1034
struct sk_buff *__build_skb(void *data, unsigned int frag_size);
1035
struct sk_buff *build_skb(void *data, unsigned int frag_size);
1036 1037
struct sk_buff *build_skb_around(struct sk_buff *skb,
				 void *data, unsigned int frag_size);
1038 1039 1040 1041 1042 1043 1044 1045

/**
 * alloc_skb - allocate a network buffer
 * @size: size to allocate
 * @priority: allocation mask
 *
 * This function is a convenient wrapper around __alloc_skb().
 */
1046
static inline struct sk_buff *alloc_skb(unsigned int size,
A
Al Viro 已提交
1047
					gfp_t priority)
1048
{
E
Eric Dumazet 已提交
1049
	return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
1050 1051
}

1052 1053 1054 1055 1056
struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
				     unsigned long data_len,
				     int max_page_order,
				     int *errcode,
				     gfp_t gfp_mask);
1057
struct sk_buff *alloc_skb_for_msg(struct sk_buff *first);
1058

1059 1060 1061 1062 1063 1064
/* Layout of fast clones : [skb1][skb2][fclone_ref] */
struct sk_buff_fclones {
	struct sk_buff	skb1;

	struct sk_buff	skb2;

1065
	refcount_t	fclone_ref;
1066 1067 1068 1069
};

/**
 *	skb_fclone_busy - check if fclone is busy
1070
 *	@sk: socket
1071 1072
 *	@skb: buffer
 *
M
Masanari Iida 已提交
1073
 * Returns true if skb is a fast clone, and its clone is not freed.
1074 1075
 * Some drivers call skb_orphan() in their ndo_start_xmit(),
 * so we also check that this didnt happen.
1076
 */
1077 1078
static inline bool skb_fclone_busy(const struct sock *sk,
				   const struct sk_buff *skb)
1079 1080 1081 1082 1083 1084
{
	const struct sk_buff_fclones *fclones;

	fclones = container_of(skb, struct sk_buff_fclones, skb1);

	return skb->fclone == SKB_FCLONE_ORIG &&
1085
	       refcount_read(&fclones->fclone_ref) > 1 &&
1086
	       fclones->skb2.sk == sk;
1087 1088
}

1089 1090 1091 1092 1093 1094 1095
/**
 * alloc_skb_fclone - allocate a network buffer from fclone cache
 * @size: size to allocate
 * @priority: allocation mask
 *
 * This function is a convenient wrapper around __alloc_skb().
 */
1096
static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
A
Al Viro 已提交
1097
					       gfp_t priority)
1098
{
1099
	return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
1100 1101
}

1102
struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
1103
void skb_headers_offset_update(struct sk_buff *skb, int off);
1104 1105
int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
1106
void skb_copy_header(struct sk_buff *new, const struct sk_buff *old);
1107
struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
1108 1109 1110 1111 1112 1113 1114
struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
				   gfp_t gfp_mask, bool fclone);
static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
					  gfp_t gfp_mask)
{
	return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
}
1115 1116 1117 1118 1119 1120

int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
				     unsigned int headroom);
struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
				int newtailroom, gfp_t priority);
1121 1122 1123 1124
int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
				     int offset, int len);
int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
			      int offset, int len);
1125
int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error);

/**
 *	skb_pad			-	zero pad the tail of an skb
 *	@skb: buffer to pad
 *	@pad: space to pad
 *
 *	Ensure that a buffer is followed by a padding area that is zero
 *	filled. Used by network drivers which may DMA or transfer data
 *	beyond the buffer end onto the wire.
 *
 *	May return error in out of memory cases. The skb is freed on error.
 */
static inline int skb_pad(struct sk_buff *skb, int pad)
{
	return __skb_pad(skb, pad, true);
}
1143
#define dev_kfree_skb(a)	consume_skb(a)
L
Linus Torvalds 已提交
1144

1145 1146 1147
int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
			 int offset, size_t size);

E
Eric Dumazet 已提交
1148
struct skb_seq_state {
1149 1150 1151 1152 1153 1154 1155 1156 1157
	__u32		lower_offset;
	__u32		upper_offset;
	__u32		frag_idx;
	__u32		stepped_offset;
	struct sk_buff	*root_skb;
	struct sk_buff	*cur_skb;
	__u8		*frag_data;
};

1158 1159 1160 1161 1162
void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
			  unsigned int to, struct skb_seq_state *st);
unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
			  struct skb_seq_state *st);
void skb_abort_seq_read(struct skb_seq_state *st);
1163

1164
unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
1165
			   unsigned int to, struct ts_config *config);
1166

T
Tom Herbert 已提交
1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199
/*
 * Packet hash types specify the type of hash in skb_set_hash.
 *
 * Hash types refer to the protocol layer addresses which are used to
 * construct a packet's hash. The hashes are used to differentiate or identify
 * flows of the protocol layer for the hash type. Hash types are either
 * layer-2 (L2), layer-3 (L3), or layer-4 (L4).
 *
 * Properties of hashes:
 *
 * 1) Two packets in different flows have different hash values
 * 2) Two packets in the same flow should have the same hash value
 *
 * A hash at a higher layer is considered to be more specific. A driver should
 * set the most specific hash possible.
 *
 * A driver cannot indicate a more specific hash than the layer at which a hash
 * was computed. For instance an L3 hash cannot be set as an L4 hash.
 *
 * A driver may indicate a hash level which is less specific than the
 * actual layer the hash was computed on. For instance, a hash computed
 * at L4 may be considered an L3 hash. This should only be done if the
 * driver can't unambiguously determine that the HW computed the hash at
 * the higher layer. Note that the "should" in the second property above
 * permits this.
 */
enum pkt_hash_types {
	PKT_HASH_TYPE_NONE,	/* Undefined type */
	PKT_HASH_TYPE_L2,	/* Input: src_MAC, dest_MAC */
	PKT_HASH_TYPE_L3,	/* Input: src_IP, dst_IP */
	PKT_HASH_TYPE_L4,	/* Input: src_IP, dst_IP, src_port, dst_port */
};

1200
static inline void skb_clear_hash(struct sk_buff *skb)
T
Tom Herbert 已提交
1201
{
1202
	skb->hash = 0;
1203
	skb->sw_hash = 0;
1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217
	skb->l4_hash = 0;
}

static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
{
	if (!skb->l4_hash)
		skb_clear_hash(skb);
}

static inline void
__skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
{
	skb->l4_hash = is_l4;
	skb->sw_hash = is_sw;
1218
	skb->hash = hash;
T
Tom Herbert 已提交
1219 1220
}

1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233
static inline void
skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
{
	/* Used by drivers to set hash from HW */
	__skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
}

static inline void
__skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
{
	__skb_set_hash(skb, hash, true, is_l4);
}

1234
void __skb_get_hash(struct sk_buff *skb);
1235
u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
1236 1237
u32 skb_get_poff(const struct sk_buff *skb);
u32 __skb_get_poff(const struct sk_buff *skb, void *data,
1238
		   const struct flow_keys_basic *keys, int hlen);
1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251
__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
			    void *data, int hlen_proto);

static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
					int thoff, u8 ip_proto)
{
	return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
}

void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
			     const struct flow_dissector_key *key,
			     unsigned int key_count);

1252
#ifdef CONFIG_NET
1253 1254
int skb_flow_dissector_prog_query(const union bpf_attr *attr,
				  union bpf_attr __user *uattr);
1255 1256 1257 1258
int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr,
				       struct bpf_prog *prog);

int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr);
1259
#else
1260 1261 1262 1263 1264 1265
static inline int skb_flow_dissector_prog_query(const union bpf_attr *attr,
						union bpf_attr __user *uattr)
{
	return -EOPNOTSUPP;
}

1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276
static inline int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr,
						     struct bpf_prog *prog)
{
	return -EOPNOTSUPP;
}

static inline int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
{
	return -EOPNOTSUPP;
}
#endif
1277

1278 1279
struct bpf_flow_dissector;
bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
1280
		      __be16 proto, int nhoff, int hlen, unsigned int flags);
1281

1282 1283
bool __skb_flow_dissect(const struct net *net,
			const struct sk_buff *skb,
1284 1285
			struct flow_dissector *flow_dissector,
			void *target_container,
1286 1287
			void *data, __be16 proto, int nhoff, int hlen,
			unsigned int flags);
1288 1289 1290

static inline bool skb_flow_dissect(const struct sk_buff *skb,
				    struct flow_dissector *flow_dissector,
1291
				    void *target_container, unsigned int flags)
1292
{
1293 1294
	return __skb_flow_dissect(NULL, skb, flow_dissector,
				  target_container, NULL, 0, 0, 0, flags);
1295 1296 1297
}

static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
1298 1299
					      struct flow_keys *flow,
					      unsigned int flags)
1300 1301
{
	memset(flow, 0, sizeof(*flow));
1302 1303
	return __skb_flow_dissect(NULL, skb, &flow_keys_dissector,
				  flow, NULL, 0, 0, 0, flags);
1304 1305
}

1306
static inline bool
1307 1308
skb_flow_dissect_flow_keys_basic(const struct net *net,
				 const struct sk_buff *skb,
1309 1310 1311
				 struct flow_keys_basic *flow, void *data,
				 __be16 proto, int nhoff, int hlen,
				 unsigned int flags)
1312 1313
{
	memset(flow, 0, sizeof(*flow));
1314
	return __skb_flow_dissect(net, skb, &flow_keys_basic_dissector, flow,
1315
				  data, proto, nhoff, hlen, flags);
1316 1317
}

1318 1319 1320 1321
void skb_flow_dissect_meta(const struct sk_buff *skb,
			   struct flow_dissector *flow_dissector,
			   void *target_container);

1322 1323 1324 1325 1326 1327 1328 1329 1330 1331
/* Gets a skb connection tracking info, ctinfo map should be a
 * a map of mapsize to translate enum ip_conntrack_info states
 * to user states.
 */
void
skb_flow_dissect_ct(const struct sk_buff *skb,
		    struct flow_dissector *flow_dissector,
		    void *target_container,
		    u16 *ctinfo_map,
		    size_t mapsize);
1332 1333 1334 1335 1336
void
skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
			     struct flow_dissector *flow_dissector,
			     void *target_container);

1337
static inline __u32 skb_get_hash(struct sk_buff *skb)
1338
{
1339
	if (!skb->l4_hash && !skb->sw_hash)
1340
		__skb_get_hash(skb);
1341

1342
	return skb->hash;
1343 1344
}

1345
static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
1346
{
1347 1348
	if (!skb->l4_hash && !skb->sw_hash) {
		struct flow_keys keys;
1349
		__u32 hash = __get_hash_from_flowi6(fl6, &keys);
1350

1351
		__skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1352
	}
1353 1354 1355 1356

	return skb->hash;
}

1357 1358
__u32 skb_get_hash_perturb(const struct sk_buff *skb,
			   const siphash_key_t *perturb);
T
Tom Herbert 已提交
1359

T
Tom Herbert 已提交
1360 1361
static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
{
1362
	return skb->hash;
T
Tom Herbert 已提交
1363 1364
}

1365 1366
static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
{
1367
	to->hash = from->hash;
1368
	to->sw_hash = from->sw_hash;
1369
	to->l4_hash = from->l4_hash;
1370 1371
};

1372 1373 1374 1375 1376 1377 1378 1379
static inline void skb_copy_decrypted(struct sk_buff *to,
				      const struct sk_buff *from)
{
#ifdef CONFIG_TLS_DEVICE
	to->decrypted = from->decrypted;
#endif
}

1380 1381 1382 1383 1384
#ifdef NET_SKBUFF_DATA_USES_OFFSET
static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
{
	return skb->head + skb->end;
}
1385 1386 1387 1388 1389

static inline unsigned int skb_end_offset(const struct sk_buff *skb)
{
	return skb->end;
}
1390 1391 1392 1393 1394
#else
static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
{
	return skb->end;
}
1395 1396 1397 1398 1399

static inline unsigned int skb_end_offset(const struct sk_buff *skb)
{
	return skb->end - skb->head;
}
1400 1401
#endif

L
Linus Torvalds 已提交
1402
/* Internal */
1403
#define skb_shinfo(SKB)	((struct skb_shared_info *)(skb_end_pointer(SKB)))
L
Linus Torvalds 已提交
1404

1405 1406 1407 1408 1409
static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
{
	return &skb_shinfo(skb)->hwtstamps;
}

W
Willem de Bruijn 已提交
1410 1411 1412 1413 1414 1415 1416
static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
{
	bool is_zcopy = skb && skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY;

	return is_zcopy ? skb_uarg(skb) : NULL;
}

1417 1418
static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg,
				 bool *have_ref)
W
Willem de Bruijn 已提交
1419 1420
{
	if (skb && uarg && !skb_zcopy(skb)) {
1421 1422 1423 1424
		if (unlikely(have_ref && *have_ref))
			*have_ref = false;
		else
			sock_zerocopy_get(uarg);
W
Willem de Bruijn 已提交
1425 1426 1427 1428 1429
		skb_shinfo(skb)->destructor_arg = uarg;
		skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
	}
}

1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445
static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val)
{
	skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL);
	skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
}

static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb)
{
	return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL;
}

static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb)
{
	return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL);
}

W
Willem de Bruijn 已提交
1446 1447 1448 1449 1450 1451
/* Release a reference on a zerocopy structure */
static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
{
	struct ubuf_info *uarg = skb_zcopy(skb);

	if (uarg) {
1452 1453 1454
		if (skb_zcopy_is_nouarg(skb)) {
			/* no notification callback */
		} else if (uarg->callback == sock_zerocopy_callback) {
1455 1456
			uarg->zerocopy = uarg->zerocopy && zerocopy;
			sock_zerocopy_put(uarg);
1457
		} else {
1458 1459 1460
			uarg->callback(uarg, zerocopy);
		}

W
Willem de Bruijn 已提交
1461 1462 1463 1464 1465 1466 1467 1468 1469 1470
		skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
	}
}

/* Abort a zerocopy operation and revert zckey on error in send syscall */
static inline void skb_zcopy_abort(struct sk_buff *skb)
{
	struct ubuf_info *uarg = skb_zcopy(skb);

	if (uarg) {
1471
		sock_zerocopy_put_abort(uarg, false);
W
Willem de Bruijn 已提交
1472 1473 1474 1475
		skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
	}
}

1476 1477 1478 1479 1480
static inline void skb_mark_not_on_list(struct sk_buff *skb)
{
	skb->next = NULL;
}

1481 1482 1483 1484 1485 1486
static inline void skb_list_del_init(struct sk_buff *skb)
{
	__list_del_entry(&skb->list);
	skb_mark_not_on_list(skb);
}

L
Linus Torvalds 已提交
1487 1488 1489 1490 1491 1492 1493 1494
/**
 *	skb_queue_empty - check if a queue is empty
 *	@list: queue head
 *
 *	Returns true if the queue is empty, false otherwise.
 */
static inline int skb_queue_empty(const struct sk_buff_head *list)
{
1495
	return list->next == (const struct sk_buff *) list;
L
Linus Torvalds 已提交
1496 1497
}

E
Eric Dumazet 已提交
1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510
/**
 *	skb_queue_empty_lockless - check if a queue is empty
 *	@list: queue head
 *
 *	Returns true if the queue is empty, false otherwise.
 *	This variant can be used in lockless contexts.
 */
static inline bool skb_queue_empty_lockless(const struct sk_buff_head *list)
{
	return READ_ONCE(list->next) == (const struct sk_buff *) list;
}


D
David S. Miller 已提交
1511 1512 1513 1514 1515 1516 1517 1518 1519 1520
/**
 *	skb_queue_is_last - check if skb is the last entry in the queue
 *	@list: queue head
 *	@skb: buffer
 *
 *	Returns true if @skb is the last buffer on the list.
 */
static inline bool skb_queue_is_last(const struct sk_buff_head *list,
				     const struct sk_buff *skb)
{
1521
	return skb->next == (const struct sk_buff *) list;
D
David S. Miller 已提交
1522 1523
}

1524 1525 1526 1527 1528 1529 1530 1531 1532 1533
/**
 *	skb_queue_is_first - check if skb is the first entry in the queue
 *	@list: queue head
 *	@skb: buffer
 *
 *	Returns true if @skb is the first buffer on the list.
 */
static inline bool skb_queue_is_first(const struct sk_buff_head *list,
				      const struct sk_buff *skb)
{
1534
	return skb->prev == (const struct sk_buff *) list;
1535 1536
}

D
David S. Miller 已提交
1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554
/**
 *	skb_queue_next - return the next packet in the queue
 *	@list: queue head
 *	@skb: current buffer
 *
 *	Return the next packet in @list after @skb.  It is only valid to
 *	call this if skb_queue_is_last() evaluates to false.
 */
static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
					     const struct sk_buff *skb)
{
	/* This BUG_ON may seem severe, but if we just return then we
	 * are going to dereference garbage.
	 */
	BUG_ON(skb_queue_is_last(list, skb));
	return skb->next;
}

1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572
/**
 *	skb_queue_prev - return the prev packet in the queue
 *	@list: queue head
 *	@skb: current buffer
 *
 *	Return the prev packet in @list before @skb.  It is only valid to
 *	call this if skb_queue_is_first() evaluates to false.
 */
static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
					     const struct sk_buff *skb)
{
	/* This BUG_ON may seem severe, but if we just return then we
	 * are going to dereference garbage.
	 */
	BUG_ON(skb_queue_is_first(list, skb));
	return skb->prev;
}

L
Linus Torvalds 已提交
1573 1574 1575 1576 1577 1578 1579 1580 1581
/**
 *	skb_get - reference buffer
 *	@skb: buffer to reference
 *
 *	Makes another reference to a socket buffer and returns a pointer
 *	to the buffer.
 */
static inline struct sk_buff *skb_get(struct sk_buff *skb)
{
1582
	refcount_inc(&skb->users);
L
Linus Torvalds 已提交
1583 1584 1585 1586
	return skb;
}

/*
1587
 * If users == 1, we are the only owner and can avoid redundant atomic changes.
L
Linus Torvalds 已提交
1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603
 */

/**
 *	skb_cloned - is the buffer a clone
 *	@skb: buffer to check
 *
 *	Returns true if the buffer was generated with skb_clone() and is
 *	one of multiple shared copies of the buffer. Cloned buffers are
 *	shared data so must not be written to under normal circumstances.
 */
static inline int skb_cloned(const struct sk_buff *skb)
{
	return skb->cloned &&
	       (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
}

1604 1605
static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
{
1606
	might_sleep_if(gfpflags_allow_blocking(pri));
1607 1608 1609 1610 1611 1612 1613

	if (skb_cloned(skb))
		return pskb_expand_head(skb, 0, 0, pri);

	return 0;
}

L
Linus Torvalds 已提交
1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632
/**
 *	skb_header_cloned - is the header a clone
 *	@skb: buffer to check
 *
 *	Returns true if modifying the header part of the buffer requires
 *	the data to be copied.
 */
static inline int skb_header_cloned(const struct sk_buff *skb)
{
	int dataref;

	if (!skb->cloned)
		return 0;

	dataref = atomic_read(&skb_shinfo(skb)->dataref);
	dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
	return dataref != 1;
}

1633 1634 1635 1636 1637 1638 1639 1640 1641 1642
static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
{
	might_sleep_if(gfpflags_allow_blocking(pri));

	if (skb_header_cloned(skb))
		return pskb_expand_head(skb, 0, 0, pri);

	return 0;
}

1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653
/**
 *	__skb_header_release - release reference to header
 *	@skb: buffer to operate on
 */
static inline void __skb_header_release(struct sk_buff *skb)
{
	skb->nohdr = 1;
	atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
}


L
Linus Torvalds 已提交
1654 1655 1656 1657 1658 1659 1660 1661 1662
/**
 *	skb_shared - is the buffer shared
 *	@skb: buffer to check
 *
 *	Returns true if more than one person has a reference to this
 *	buffer.
 */
static inline int skb_shared(const struct sk_buff *skb)
{
1663
	return refcount_read(&skb->users) != 1;
L
Linus Torvalds 已提交
1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678
}

/**
 *	skb_share_check - check if buffer is shared and if so clone it
 *	@skb: buffer to check
 *	@pri: priority for memory allocation
 *
 *	If the buffer is shared the buffer is cloned and the old copy
 *	drops a reference. A new clone with a single reference is returned.
 *	If the buffer is not shared the original buffer is returned. When
 *	being called from interrupt status or with spinlocks held pri must
 *	be GFP_ATOMIC.
 *
 *	NULL is returned on a memory allocation failure.
 */
1679
static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
L
Linus Torvalds 已提交
1680
{
1681
	might_sleep_if(gfpflags_allow_blocking(pri));
L
Linus Torvalds 已提交
1682 1683
	if (skb_shared(skb)) {
		struct sk_buff *nskb = skb_clone(skb, pri);
1684 1685 1686 1687 1688

		if (likely(nskb))
			consume_skb(skb);
		else
			kfree_skb(skb);
L
Linus Torvalds 已提交
1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713
		skb = nskb;
	}
	return skb;
}

/*
 *	Copy shared buffers into a new sk_buff. We effectively do COW on
 *	packets to handle cases where we have a local reader and forward
 *	and a couple of other messy ones. The normal one is tcpdumping
 *	a packet thats being forwarded.
 */

/**
 *	skb_unshare - make a copy of a shared buffer
 *	@skb: buffer to check
 *	@pri: priority for memory allocation
 *
 *	If the socket buffer is a clone then this function creates a new
 *	copy of the data, drops a reference count on the old copy and returns
 *	the new copy with the reference count at 1. If the buffer is not a clone
 *	the original buffer is returned. When called with a spinlock held or
 *	from interrupt state @pri must be %GFP_ATOMIC
 *
 *	%NULL is returned on a memory allocation failure.
 */
1714
static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
A
Al Viro 已提交
1715
					  gfp_t pri)
L
Linus Torvalds 已提交
1716
{
1717
	might_sleep_if(gfpflags_allow_blocking(pri));
L
Linus Torvalds 已提交
1718 1719
	if (skb_cloned(skb)) {
		struct sk_buff *nskb = skb_copy(skb, pri);
1720 1721 1722 1723 1724 1725

		/* Free our shared copy */
		if (likely(nskb))
			consume_skb(skb);
		else
			kfree_skb(skb);
L
Linus Torvalds 已提交
1726 1727 1728 1729 1730 1731
		skb = nskb;
	}
	return skb;
}

/**
1732
 *	skb_peek - peek at the head of an &sk_buff_head
L
Linus Torvalds 已提交
1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743
 *	@list_: list to peek at
 *
 *	Peek an &sk_buff. Unlike most other operations you _MUST_
 *	be careful with this one. A peek leaves the buffer on the
 *	list and someone else may run off with it. You must hold
 *	the appropriate locks or have a private queue to do this.
 *
 *	Returns %NULL for an empty list or a pointer to the head element.
 *	The reference count is not incremented and the reference is therefore
 *	volatile. Use with caution.
 */
1744
static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
L
Linus Torvalds 已提交
1745
{
1746 1747 1748 1749 1750
	struct sk_buff *skb = list_->next;

	if (skb == (struct sk_buff *)list_)
		skb = NULL;
	return skb;
L
Linus Torvalds 已提交
1751 1752
}

1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763
/**
 *	__skb_peek - peek at the head of a non-empty &sk_buff_head
 *	@list_: list to peek at
 *
 *	Like skb_peek(), but the caller knows that the list is not empty.
 */
static inline struct sk_buff *__skb_peek(const struct sk_buff_head *list_)
{
	return list_->next;
}

P
Pavel Emelyanov 已提交
1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776
/**
 *	skb_peek_next - peek skb following the given one from a queue
 *	@skb: skb to start from
 *	@list_: list to peek at
 *
 *	Returns %NULL when the end of the list is met or a pointer to the
 *	next element. The reference count is not incremented and the
 *	reference is therefore volatile. Use with caution.
 */
static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
		const struct sk_buff_head *list_)
{
	struct sk_buff *next = skb->next;
1777

P
Pavel Emelyanov 已提交
1778 1779 1780 1781 1782
	if (next == (struct sk_buff *)list_)
		next = NULL;
	return next;
}

L
Linus Torvalds 已提交
1783
/**
1784
 *	skb_peek_tail - peek at the tail of an &sk_buff_head
L
Linus Torvalds 已提交
1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795
 *	@list_: list to peek at
 *
 *	Peek an &sk_buff. Unlike most other operations you _MUST_
 *	be careful with this one. A peek leaves the buffer on the
 *	list and someone else may run off with it. You must hold
 *	the appropriate locks or have a private queue to do this.
 *
 *	Returns %NULL for an empty list or a pointer to the tail element.
 *	The reference count is not incremented and the reference is therefore
 *	volatile. Use with caution.
 */
1796
static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
L
Linus Torvalds 已提交
1797
{
1798 1799 1800 1801 1802 1803
	struct sk_buff *skb = list_->prev;

	if (skb == (struct sk_buff *)list_)
		skb = NULL;
	return skb;

L
Linus Torvalds 已提交
1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816
}

/**
 *	skb_queue_len	- get queue length
 *	@list_: list to measure
 *
 *	Return the length of an &sk_buff queue.
 */
static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
{
	return list_->qlen;
}

1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832
/**
 *	__skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
 *	@list: queue to initialize
 *
 *	This initializes only the list and queue length aspects of
 *	an sk_buff_head object.  This allows to initialize the list
 *	aspects of an sk_buff_head without reinitializing things like
 *	the spinlock.  It can also be used for on-stack sk_buff_head
 *	objects where the spinlock is known to not be used.
 */
static inline void __skb_queue_head_init(struct sk_buff_head *list)
{
	list->prev = list->next = (struct sk_buff *)list;
	list->qlen = 0;
}

1833 1834 1835 1836 1837 1838 1839 1840
/*
 * This function creates a split out lock class for each invocation;
 * this is needed for now since a whole lot of users of the skb-queue
 * infrastructure in drivers have different locking usage (in hardirq)
 * than the networking core (in softirq only). In the long run either the
 * network layer or drivers should need annotation to consolidate the
 * main types of usage into 3 classes.
 */
L
Linus Torvalds 已提交
1841 1842 1843
static inline void skb_queue_head_init(struct sk_buff_head *list)
{
	spin_lock_init(&list->lock);
1844
	__skb_queue_head_init(list);
L
Linus Torvalds 已提交
1845 1846
}

1847 1848 1849 1850 1851 1852 1853
static inline void skb_queue_head_init_class(struct sk_buff_head *list,
		struct lock_class_key *class)
{
	skb_queue_head_init(list);
	lockdep_set_class(&list->lock, class);
}

L
Linus Torvalds 已提交
1854
/*
1855
 *	Insert an sk_buff on a list.
L
Linus Torvalds 已提交
1856 1857 1858 1859
 *
 *	The "__skb_xxxx()" functions are the non-atomic ones that
 *	can only be called with interrupts disabled.
 */
1860 1861 1862 1863
static inline void __skb_insert(struct sk_buff *newsk,
				struct sk_buff *prev, struct sk_buff *next,
				struct sk_buff_head *list)
{
E
Eric Dumazet 已提交
1864 1865 1866 1867 1868
	/* see skb_queue_empty_lockless() for the opposite READ_ONCE() */
	WRITE_ONCE(newsk->next, next);
	WRITE_ONCE(newsk->prev, prev);
	WRITE_ONCE(next->prev, newsk);
	WRITE_ONCE(prev->next, newsk);
1869 1870
	list->qlen++;
}
L
Linus Torvalds 已提交
1871

1872 1873 1874 1875 1876 1877 1878
static inline void __skb_queue_splice(const struct sk_buff_head *list,
				      struct sk_buff *prev,
				      struct sk_buff *next)
{
	struct sk_buff *first = list->next;
	struct sk_buff *last = list->prev;

E
Eric Dumazet 已提交
1879 1880
	WRITE_ONCE(first->prev, prev);
	WRITE_ONCE(prev->next, first);
1881

E
Eric Dumazet 已提交
1882 1883
	WRITE_ONCE(last->next, next);
	WRITE_ONCE(next->prev, last);
1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895
}

/**
 *	skb_queue_splice - join two skb lists, this is designed for stacks
 *	@list: the new list to add
 *	@head: the place to add it in the first list
 */
static inline void skb_queue_splice(const struct sk_buff_head *list,
				    struct sk_buff_head *head)
{
	if (!skb_queue_empty(list)) {
		__skb_queue_splice(list, (struct sk_buff *) head, head->next);
1896
		head->qlen += list->qlen;
1897 1898 1899 1900
	}
}

/**
E
Eric Dumazet 已提交
1901
 *	skb_queue_splice_init - join two skb lists and reinitialise the emptied list
1902 1903 1904 1905 1906 1907 1908 1909 1910 1911
 *	@list: the new list to add
 *	@head: the place to add it in the first list
 *
 *	The list at @list is reinitialised
 */
static inline void skb_queue_splice_init(struct sk_buff_head *list,
					 struct sk_buff_head *head)
{
	if (!skb_queue_empty(list)) {
		__skb_queue_splice(list, (struct sk_buff *) head, head->next);
1912
		head->qlen += list->qlen;
1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926
		__skb_queue_head_init(list);
	}
}

/**
 *	skb_queue_splice_tail - join two skb lists, each list being a queue
 *	@list: the new list to add
 *	@head: the place to add it in the first list
 */
static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
					 struct sk_buff_head *head)
{
	if (!skb_queue_empty(list)) {
		__skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1927
		head->qlen += list->qlen;
1928 1929 1930 1931
	}
}

/**
E
Eric Dumazet 已提交
1932
 *	skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list
1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943
 *	@list: the new list to add
 *	@head: the place to add it in the first list
 *
 *	Each of the lists is a queue.
 *	The list at @list is reinitialised
 */
static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
					      struct sk_buff_head *head)
{
	if (!skb_queue_empty(list)) {
		__skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1944
		head->qlen += list->qlen;
1945 1946 1947 1948
		__skb_queue_head_init(list);
	}
}

L
Linus Torvalds 已提交
1949
/**
1950
 *	__skb_queue_after - queue a buffer at the list head
L
Linus Torvalds 已提交
1951
 *	@list: list to use
1952
 *	@prev: place after this buffer
L
Linus Torvalds 已提交
1953 1954
 *	@newsk: buffer to queue
 *
1955
 *	Queue a buffer int the middle of a list. This function takes no locks
L
Linus Torvalds 已提交
1956 1957 1958 1959
 *	and you must therefore hold required locks before calling it.
 *
 *	A buffer cannot be placed on two lists at the same time.
 */
1960 1961 1962
static inline void __skb_queue_after(struct sk_buff_head *list,
				     struct sk_buff *prev,
				     struct sk_buff *newsk)
L
Linus Torvalds 已提交
1963
{
1964
	__skb_insert(newsk, prev, prev->next, list);
L
Linus Torvalds 已提交
1965 1966
}

1967 1968
void skb_append(struct sk_buff *old, struct sk_buff *newsk,
		struct sk_buff_head *list);
1969

1970 1971 1972 1973 1974 1975 1976
static inline void __skb_queue_before(struct sk_buff_head *list,
				      struct sk_buff *next,
				      struct sk_buff *newsk)
{
	__skb_insert(newsk, next->prev, next, list);
}

1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991
/**
 *	__skb_queue_head - queue a buffer at the list head
 *	@list: list to use
 *	@newsk: buffer to queue
 *
 *	Queue a buffer at the start of a list. This function takes no locks
 *	and you must therefore hold required locks before calling it.
 *
 *	A buffer cannot be placed on two lists at the same time.
 */
static inline void __skb_queue_head(struct sk_buff_head *list,
				    struct sk_buff *newsk)
{
	__skb_queue_after(list, (struct sk_buff *)list, newsk);
}
1992
void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1993

L
Linus Torvalds 已提交
1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006
/**
 *	__skb_queue_tail - queue a buffer at the list tail
 *	@list: list to use
 *	@newsk: buffer to queue
 *
 *	Queue a buffer at the end of a list. This function takes no locks
 *	and you must therefore hold required locks before calling it.
 *
 *	A buffer cannot be placed on two lists at the same time.
 */
static inline void __skb_queue_tail(struct sk_buff_head *list,
				   struct sk_buff *newsk)
{
2007
	__skb_queue_before(list, (struct sk_buff *)list, newsk);
L
Linus Torvalds 已提交
2008
}
2009
void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
L
Linus Torvalds 已提交
2010 2011 2012 2013 2014

/*
 * remove sk_buff from list. _Must_ be called atomically, and with
 * the list known..
 */
2015
void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
L
Linus Torvalds 已提交
2016 2017 2018 2019 2020 2021 2022 2023
static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
{
	struct sk_buff *next, *prev;

	list->qlen--;
	next	   = skb->next;
	prev	   = skb->prev;
	skb->next  = skb->prev = NULL;
E
Eric Dumazet 已提交
2024 2025
	WRITE_ONCE(next->prev, prev);
	WRITE_ONCE(prev->next, next);
L
Linus Torvalds 已提交
2026 2027
}

2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042
/**
 *	__skb_dequeue - remove from the head of the queue
 *	@list: list to dequeue from
 *
 *	Remove the head of the list. This function does not take any locks
 *	so must be used with appropriate locks held only. The head item is
 *	returned or %NULL if the list is empty.
 */
static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
{
	struct sk_buff *skb = skb_peek(list);
	if (skb)
		__skb_unlink(skb, list);
	return skb;
}
2043
struct sk_buff *skb_dequeue(struct sk_buff_head *list);
L
Linus Torvalds 已提交
2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059

/**
 *	__skb_dequeue_tail - remove from the tail of the queue
 *	@list: list to dequeue from
 *
 *	Remove the tail of the list. This function does not take any locks
 *	so must be used with appropriate locks held only. The tail item is
 *	returned or %NULL if the list is empty.
 */
static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
{
	struct sk_buff *skb = skb_peek_tail(list);
	if (skb)
		__skb_unlink(skb, list);
	return skb;
}
2060
struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
L
Linus Torvalds 已提交
2061 2062


2063
static inline bool skb_is_nonlinear(const struct sk_buff *skb)
L
Linus Torvalds 已提交
2064 2065 2066 2067 2068 2069 2070 2071 2072
{
	return skb->data_len;
}

static inline unsigned int skb_headlen(const struct sk_buff *skb)
{
	return skb->len - skb->data_len;
}

2073
static inline unsigned int __skb_pagelen(const struct sk_buff *skb)
L
Linus Torvalds 已提交
2074
{
2075
	unsigned int i, len = 0;
L
Linus Torvalds 已提交
2076

2077
	for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--)
E
Eric Dumazet 已提交
2078
		len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
2079 2080 2081 2082 2083 2084
	return len;
}

static inline unsigned int skb_pagelen(const struct sk_buff *skb)
{
	return skb_headlen(skb) + __skb_pagelen(skb);
L
Linus Torvalds 已提交
2085 2086
}

2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101
/**
 * __skb_fill_page_desc - initialise a paged fragment in an skb
 * @skb: buffer containing fragment to be initialised
 * @i: paged fragment index to initialise
 * @page: the page to use for this fragment
 * @off: the offset to the data with @page
 * @size: the length of the data
 *
 * Initialises the @i'th fragment of @skb to point to &size bytes at
 * offset @off within @page.
 *
 * Does not take any additional reference on the fragment.
 */
static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
					struct page *page, int off, int size)
L
Linus Torvalds 已提交
2102 2103 2104
{
	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];

2105
	/*
2106 2107 2108
	 * Propagate page pfmemalloc to the skb if we can. The problem is
	 * that not all callers have unique ownership of the page but rely
	 * on page_is_pfmemalloc doing the right thing(tm).
2109
	 */
2110
	frag->bv_page		  = page;
2111
	frag->bv_offset		  = off;
E
Eric Dumazet 已提交
2112
	skb_frag_size_set(frag, size);
2113 2114

	page = compound_head(page);
2115
	if (page_is_pfmemalloc(page))
2116
		skb->pfmemalloc	= true;
2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127
}

/**
 * skb_fill_page_desc - initialise a paged fragment in an skb
 * @skb: buffer containing fragment to be initialised
 * @i: paged fragment index to initialise
 * @page: the page to use for this fragment
 * @off: the offset to the data with @page
 * @size: the length of the data
 *
 * As per __skb_fill_page_desc() -- initialises the @i'th fragment of
M
Mathias Krause 已提交
2128
 * @skb to point to @size bytes at offset @off within @page. In
2129 2130 2131 2132 2133 2134 2135 2136
 * addition updates @skb such that @i is the last fragment.
 *
 * Does not take any additional reference on the fragment.
 */
static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
				      struct page *page, int off, int size)
{
	__skb_fill_page_desc(skb, i, page, off, size);
L
Linus Torvalds 已提交
2137 2138 2139
	skb_shinfo(skb)->nr_frags = i + 1;
}

2140 2141
void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
		     int size, unsigned int truesize);
P
Peter Zijlstra 已提交
2142

J
Jason Wang 已提交
2143 2144 2145
void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
			  unsigned int truesize);

L
Linus Torvalds 已提交
2146 2147
#define SKB_LINEAR_ASSERT(skb)  BUG_ON(skb_is_nonlinear(skb))

2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163
#ifdef NET_SKBUFF_DATA_USES_OFFSET
static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
{
	return skb->head + skb->tail;
}

static inline void skb_reset_tail_pointer(struct sk_buff *skb)
{
	skb->tail = skb->data - skb->head;
}

static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
{
	skb_reset_tail_pointer(skb);
	skb->tail += offset;
}
2164

2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179
#else /* NET_SKBUFF_DATA_USES_OFFSET */
static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
{
	return skb->tail;
}

static inline void skb_reset_tail_pointer(struct sk_buff *skb)
{
	skb->tail = skb->data;
}

static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
{
	skb->tail = skb->data + offset;
}
2180

2181 2182
#endif /* NET_SKBUFF_DATA_USES_OFFSET */

L
Linus Torvalds 已提交
2183 2184 2185
/*
 *	Add data to an sk_buff
 */
2186 2187 2188
void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
void *skb_put(struct sk_buff *skb, unsigned int len);
static inline void *__skb_put(struct sk_buff *skb, unsigned int len)
L
Linus Torvalds 已提交
2189
{
2190
	void *tmp = skb_tail_pointer(skb);
L
Linus Torvalds 已提交
2191 2192 2193 2194 2195 2196
	SKB_LINEAR_ASSERT(skb);
	skb->tail += len;
	skb->len  += len;
	return tmp;
}

2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218
static inline void *__skb_put_zero(struct sk_buff *skb, unsigned int len)
{
	void *tmp = __skb_put(skb, len);

	memset(tmp, 0, len);
	return tmp;
}

static inline void *__skb_put_data(struct sk_buff *skb, const void *data,
				   unsigned int len)
{
	void *tmp = __skb_put(skb, len);

	memcpy(tmp, data, len);
	return tmp;
}

static inline void __skb_put_u8(struct sk_buff *skb, u8 val)
{
	*(u8 *)__skb_put(skb, 1) = val;
}

2219
static inline void *skb_put_zero(struct sk_buff *skb, unsigned int len)
2220
{
2221
	void *tmp = skb_put(skb, len);
2222 2223 2224 2225 2226 2227

	memset(tmp, 0, len);

	return tmp;
}

2228 2229 2230 2231 2232 2233 2234 2235 2236 2237
static inline void *skb_put_data(struct sk_buff *skb, const void *data,
				 unsigned int len)
{
	void *tmp = skb_put(skb, len);

	memcpy(tmp, data, len);

	return tmp;
}

2238 2239 2240 2241 2242
static inline void skb_put_u8(struct sk_buff *skb, u8 val)
{
	*(u8 *)skb_put(skb, 1) = val;
}

2243 2244
void *skb_push(struct sk_buff *skb, unsigned int len);
static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
L
Linus Torvalds 已提交
2245 2246 2247 2248 2249 2250
{
	skb->data -= len;
	skb->len  += len;
	return skb->data;
}

2251 2252
void *skb_pull(struct sk_buff *skb, unsigned int len);
static inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
L
Linus Torvalds 已提交
2253 2254 2255 2256 2257 2258
{
	skb->len -= len;
	BUG_ON(skb->len < skb->data_len);
	return skb->data += len;
}

2259
static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len)
2260 2261 2262 2263
{
	return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
}

2264
void *__pskb_pull_tail(struct sk_buff *skb, int delta);
L
Linus Torvalds 已提交
2265

2266
static inline void *__pskb_pull(struct sk_buff *skb, unsigned int len)
L
Linus Torvalds 已提交
2267 2268
{
	if (len > skb_headlen(skb) &&
G
Gerrit Renker 已提交
2269
	    !__pskb_pull_tail(skb, len - skb_headlen(skb)))
L
Linus Torvalds 已提交
2270 2271 2272 2273 2274
		return NULL;
	skb->len -= len;
	return skb->data += len;
}

2275
static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
L
Linus Torvalds 已提交
2276 2277 2278 2279
{
	return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
}

2280
static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len)
L
Linus Torvalds 已提交
2281 2282
{
	if (likely(len <= skb_headlen(skb)))
2283
		return true;
L
Linus Torvalds 已提交
2284
	if (unlikely(len > skb->len))
2285
		return false;
G
Gerrit Renker 已提交
2286
	return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
L
Linus Torvalds 已提交
2287 2288
}

2289 2290
void skb_condense(struct sk_buff *skb);

L
Linus Torvalds 已提交
2291 2292 2293 2294 2295 2296
/**
 *	skb_headroom - bytes at buffer head
 *	@skb: buffer to check
 *
 *	Return the number of bytes of free space at the head of an &sk_buff.
 */
2297
static inline unsigned int skb_headroom(const struct sk_buff *skb)
L
Linus Torvalds 已提交
2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309
{
	return skb->data - skb->head;
}

/**
 *	skb_tailroom - bytes at buffer end
 *	@skb: buffer to check
 *
 *	Return the number of bytes of free space at the tail of an sk_buff
 */
static inline int skb_tailroom(const struct sk_buff *skb)
{
2310
	return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
L
Linus Torvalds 已提交
2311 2312
}

2313 2314 2315 2316 2317 2318 2319 2320 2321
/**
 *	skb_availroom - bytes at buffer end
 *	@skb: buffer to check
 *
 *	Return the number of bytes of free space at the tail of an sk_buff
 *	allocated by sk_stream_alloc()
 */
static inline int skb_availroom(const struct sk_buff *skb)
{
E
Eric Dumazet 已提交
2322 2323 2324 2325
	if (skb_is_nonlinear(skb))
		return 0;

	return skb->end - skb->tail - skb->reserved_tailroom;
2326 2327
}

L
Linus Torvalds 已提交
2328 2329 2330 2331 2332 2333 2334 2335
/**
 *	skb_reserve - adjust headroom
 *	@skb: buffer to alter
 *	@len: bytes to move
 *
 *	Increase the headroom of an empty &sk_buff by reducing the tail
 *	room. This is only allowed for an empty buffer.
 */
2336
static inline void skb_reserve(struct sk_buff *skb, int len)
L
Linus Torvalds 已提交
2337 2338 2339 2340 2341
{
	skb->data += len;
	skb->tail += len;
}

2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365
/**
 *	skb_tailroom_reserve - adjust reserved_tailroom
 *	@skb: buffer to alter
 *	@mtu: maximum amount of headlen permitted
 *	@needed_tailroom: minimum amount of reserved_tailroom
 *
 *	Set reserved_tailroom so that headlen can be as large as possible but
 *	not larger than mtu and tailroom cannot be smaller than
 *	needed_tailroom.
 *	The required headroom should already have been reserved before using
 *	this function.
 */
static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
					unsigned int needed_tailroom)
{
	SKB_LINEAR_ASSERT(skb);
	if (mtu < skb_tailroom(skb) - needed_tailroom)
		/* use at most mtu */
		skb->reserved_tailroom = skb_tailroom(skb) - mtu;
	else
		/* use up to all available space */
		skb->reserved_tailroom = needed_tailroom;
}

T
Tom Herbert 已提交
2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382
#define ENCAP_TYPE_ETHER	0
#define ENCAP_TYPE_IPPROTO	1

static inline void skb_set_inner_protocol(struct sk_buff *skb,
					  __be16 protocol)
{
	skb->inner_protocol = protocol;
	skb->inner_protocol_type = ENCAP_TYPE_ETHER;
}

static inline void skb_set_inner_ipproto(struct sk_buff *skb,
					 __u8 ipproto)
{
	skb->inner_ipproto = ipproto;
	skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
}

2383 2384
static inline void skb_reset_inner_headers(struct sk_buff *skb)
{
2385
	skb->inner_mac_header = skb->mac_header;
2386 2387 2388 2389
	skb->inner_network_header = skb->network_header;
	skb->inner_transport_header = skb->transport_header;
}

2390 2391 2392 2393 2394
static inline void skb_reset_mac_len(struct sk_buff *skb)
{
	skb->mac_len = skb->network_header - skb->mac_header;
}

2395 2396 2397 2398 2399 2400
static inline unsigned char *skb_inner_transport_header(const struct sk_buff
							*skb)
{
	return skb->head + skb->inner_transport_header;
}

2401 2402 2403 2404 2405
static inline int skb_inner_transport_offset(const struct sk_buff *skb)
{
	return skb_inner_transport_header(skb) - skb->data;
}

2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434
static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
{
	skb->inner_transport_header = skb->data - skb->head;
}

static inline void skb_set_inner_transport_header(struct sk_buff *skb,
						   const int offset)
{
	skb_reset_inner_transport_header(skb);
	skb->inner_transport_header += offset;
}

static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
{
	return skb->head + skb->inner_network_header;
}

static inline void skb_reset_inner_network_header(struct sk_buff *skb)
{
	skb->inner_network_header = skb->data - skb->head;
}

static inline void skb_set_inner_network_header(struct sk_buff *skb,
						const int offset)
{
	skb_reset_inner_network_header(skb);
	skb->inner_network_header += offset;
}

2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450
static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
{
	return skb->head + skb->inner_mac_header;
}

static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
{
	skb->inner_mac_header = skb->data - skb->head;
}

static inline void skb_set_inner_mac_header(struct sk_buff *skb,
					    const int offset)
{
	skb_reset_inner_mac_header(skb);
	skb->inner_mac_header += offset;
}
2451 2452
static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
{
C
Cong Wang 已提交
2453
	return skb->transport_header != (typeof(skb->transport_header))~0U;
2454 2455
}

2456 2457
static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
{
2458
	return skb->head + skb->transport_header;
2459 2460
}

2461 2462
static inline void skb_reset_transport_header(struct sk_buff *skb)
{
2463
	skb->transport_header = skb->data - skb->head;
2464 2465
}

2466 2467 2468
static inline void skb_set_transport_header(struct sk_buff *skb,
					    const int offset)
{
2469 2470
	skb_reset_transport_header(skb);
	skb->transport_header += offset;
2471 2472
}

2473 2474
static inline unsigned char *skb_network_header(const struct sk_buff *skb)
{
2475
	return skb->head + skb->network_header;
2476 2477
}

2478 2479
static inline void skb_reset_network_header(struct sk_buff *skb)
{
2480
	skb->network_header = skb->data - skb->head;
2481 2482
}

2483 2484
static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
{
2485 2486
	skb_reset_network_header(skb);
	skb->network_header += offset;
2487 2488
}

2489
static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
2490
{
2491
	return skb->head + skb->mac_header;
2492 2493
}

2494 2495 2496 2497 2498
static inline int skb_mac_offset(const struct sk_buff *skb)
{
	return skb_mac_header(skb) - skb->data;
}

2499 2500 2501 2502 2503
static inline u32 skb_mac_header_len(const struct sk_buff *skb)
{
	return skb->network_header - skb->mac_header;
}

2504
static inline int skb_mac_header_was_set(const struct sk_buff *skb)
2505
{
C
Cong Wang 已提交
2506
	return skb->mac_header != (typeof(skb->mac_header))~0U;
2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519
}

static inline void skb_reset_mac_header(struct sk_buff *skb)
{
	skb->mac_header = skb->data - skb->head;
}

static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
{
	skb_reset_mac_header(skb);
	skb->mac_header += offset;
}

2520 2521 2522 2523 2524
static inline void skb_pop_mac_header(struct sk_buff *skb)
{
	skb->mac_header = skb->network_header;
}

2525
static inline void skb_probe_transport_header(struct sk_buff *skb)
2526
{
2527
	struct flow_keys_basic keys;
2528 2529 2530

	if (skb_transport_header_was_set(skb))
		return;
2531

2532 2533
	if (skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
					     NULL, 0, 0, 0, 0))
2534
		skb_set_transport_header(skb, keys.control.thoff);
2535 2536
}

2537 2538 2539 2540 2541 2542 2543 2544 2545 2546
static inline void skb_mac_header_rebuild(struct sk_buff *skb)
{
	if (skb_mac_header_was_set(skb)) {
		const unsigned char *old_mac = skb_mac_header(skb);

		skb_set_mac_header(skb, -skb->mac_len);
		memmove(skb_mac_header(skb), old_mac, skb->mac_len);
	}
}

2547 2548 2549 2550 2551
static inline int skb_checksum_start_offset(const struct sk_buff *skb)
{
	return skb->csum_start - skb_headroom(skb);
}

2552 2553 2554 2555 2556
static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
{
	return skb->head + skb->csum_start;
}

2557 2558 2559 2560 2561 2562 2563 2564 2565 2566
static inline int skb_transport_offset(const struct sk_buff *skb)
{
	return skb_transport_header(skb) - skb->data;
}

static inline u32 skb_network_header_len(const struct sk_buff *skb)
{
	return skb->transport_header - skb->network_header;
}

2567 2568 2569 2570 2571
static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
{
	return skb->inner_transport_header - skb->inner_network_header;
}

2572 2573 2574 2575
static inline int skb_network_offset(const struct sk_buff *skb)
{
	return skb_network_header(skb) - skb->data;
}
2576

2577 2578 2579 2580 2581
static inline int skb_inner_network_offset(const struct sk_buff *skb)
{
	return skb_inner_network_header(skb) - skb->data;
}

2582 2583 2584 2585 2586
static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
{
	return pskb_may_pull(skb, skb_network_offset(skb) + len);
}

L
Linus Torvalds 已提交
2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597
/*
 * CPUs often take a performance hit when accessing unaligned memory
 * locations. The actual performance hit varies, it can be small if the
 * hardware handles it or large if we have to take an exception and fix it
 * in software.
 *
 * Since an ethernet header is 14 bytes network drivers often end up with
 * the IP header at an unaligned offset. The IP header can be aligned by
 * shifting the start of the packet by 2 bytes. Drivers should do this
 * with:
 *
2598
 * skb_reserve(skb, NET_IP_ALIGN);
L
Linus Torvalds 已提交
2599 2600 2601 2602
 *
 * The downside to this alignment of the IP header is that the DMA is now
 * unaligned. On some architectures the cost of an unaligned DMA is high
 * and this cost outweighs the gains made by aligning the IP header.
2603
 *
L
Linus Torvalds 已提交
2604 2605 2606 2607 2608 2609 2610
 * Since this trade off varies between architectures, we allow NET_IP_ALIGN
 * to be overridden.
 */
#ifndef NET_IP_ALIGN
#define NET_IP_ALIGN	2
#endif

2611 2612 2613 2614
/*
 * The networking layer reserves some headroom in skb data (via
 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
 * the header has to grow. In the default case, if the header has to grow
2615
 * 32 bytes or less we avoid the reallocation.
2616 2617 2618 2619 2620 2621 2622
 *
 * Unfortunately this headroom changes the DMA alignment of the resulting
 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
 * on some architectures. An architecture can override this value,
 * perhaps setting it to a cacheline in size (since that will maintain
 * cacheline alignment of the DMA). It must be a power of 2.
 *
2623
 * Various parts of the networking layer expect at least 32 bytes of
2624
 * headroom, you should not reduce this.
2625 2626 2627 2628
 *
 * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
 * to reduce average number of cache lines per packet.
 * get_rps_cpus() for example only access one 64 bytes aligned block :
2629
 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
2630 2631
 */
#ifndef NET_SKB_PAD
2632
#define NET_SKB_PAD	max(32, L1_CACHE_BYTES)
2633 2634
#endif

2635
int ___pskb_trim(struct sk_buff *skb, unsigned int len);
L
Linus Torvalds 已提交
2636

2637
static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
L
Linus Torvalds 已提交
2638
{
2639
	if (WARN_ON(skb_is_nonlinear(skb)))
2640
		return;
2641 2642
	skb->len = len;
	skb_set_tail_pointer(skb, len);
L
Linus Torvalds 已提交
2643 2644
}

2645 2646 2647 2648 2649
static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
{
	__skb_set_length(skb, len);
}

2650
void skb_trim(struct sk_buff *skb, unsigned int len);
L
Linus Torvalds 已提交
2651 2652 2653

static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
{
2654 2655 2656 2657
	if (skb->data_len)
		return ___pskb_trim(skb, len);
	__skb_trim(skb, len);
	return 0;
L
Linus Torvalds 已提交
2658 2659 2660 2661 2662 2663 2664
}

static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
{
	return (len < skb->len) ? __pskb_trim(skb, len) : 0;
}

2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679
/**
 *	pskb_trim_unique - remove end from a paged unique (not cloned) buffer
 *	@skb: buffer to alter
 *	@len: new length
 *
 *	This is identical to pskb_trim except that the caller knows that
 *	the skb is not cloned so we should never get an error due to out-
 *	of-memory.
 */
static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
{
	int err = pskb_trim(skb, len);
	BUG_ON(err);
}

2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693
static inline int __skb_grow(struct sk_buff *skb, unsigned int len)
{
	unsigned int diff = len - skb->len;

	if (skb_tailroom(skb) < diff) {
		int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb),
					   GFP_ATOMIC);
		if (ret)
			return ret;
	}
	__skb_set_length(skb, len);
	return 0;
}

L
Linus Torvalds 已提交
2694 2695 2696 2697 2698 2699 2700 2701 2702 2703
/**
 *	skb_orphan - orphan a buffer
 *	@skb: buffer to orphan
 *
 *	If a buffer currently has an owner then we call the owner's
 *	destructor function and make the @skb unowned. The buffer continues
 *	to exist but is no longer charged to its former owner.
 */
static inline void skb_orphan(struct sk_buff *skb)
{
E
Eric Dumazet 已提交
2704
	if (skb->destructor) {
L
Linus Torvalds 已提交
2705
		skb->destructor(skb);
E
Eric Dumazet 已提交
2706 2707
		skb->destructor = NULL;
		skb->sk		= NULL;
2708 2709
	} else {
		BUG_ON(skb->sk);
E
Eric Dumazet 已提交
2710
	}
L
Linus Torvalds 已提交
2711 2712
}

2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723
/**
 *	skb_orphan_frags - orphan the frags contained in a buffer
 *	@skb: buffer to orphan frags from
 *	@gfp_mask: allocation mask for replacement pages
 *
 *	For each frag in the SKB which needs a destructor (i.e. has an
 *	owner) create a copy of that frag and release the original
 *	page by calling the destructor.
 */
static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
{
W
Willem de Bruijn 已提交
2724 2725
	if (likely(!skb_zcopy(skb)))
		return 0;
2726 2727
	if (!skb_zcopy_is_nouarg(skb) &&
	    skb_uarg(skb)->callback == sock_zerocopy_callback)
W
Willem de Bruijn 已提交
2728 2729 2730 2731 2732 2733 2734 2735
		return 0;
	return skb_copy_ubufs(skb, gfp_mask);
}

/* Frags must be orphaned, even if refcounted, if skb might loop to rx path */
static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask)
{
	if (likely(!skb_zcopy(skb)))
2736 2737 2738 2739
		return 0;
	return skb_copy_ubufs(skb, gfp_mask);
}

L
Linus Torvalds 已提交
2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753
/**
 *	__skb_queue_purge - empty a list
 *	@list: list to empty
 *
 *	Delete all buffers on an &sk_buff list. Each buffer is removed from
 *	the list and one reference dropped. This function does not take the
 *	list lock and the caller must hold the relevant locks to use it.
 */
static inline void __skb_queue_purge(struct sk_buff_head *list)
{
	struct sk_buff *skb;
	while ((skb = __skb_dequeue(list)) != NULL)
		kfree_skb(skb);
}
2754
void skb_queue_purge(struct sk_buff_head *list);
L
Linus Torvalds 已提交
2755

2756
unsigned int skb_rbtree_purge(struct rb_root *root);
2757

2758
void *netdev_alloc_frag(unsigned int fragsz);
L
Linus Torvalds 已提交
2759

2760 2761
struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
				   gfp_t gfp_mask);
2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776

/**
 *	netdev_alloc_skb - allocate an skbuff for rx on a specific device
 *	@dev: network device to receive on
 *	@length: length to allocate
 *
 *	Allocate a new &sk_buff and assign it a usage count of one. The
 *	buffer has unspecified headroom built in. Users should allocate
 *	the headroom they think they need without accounting for the
 *	built in space. The built in space is used for optimisations.
 *
 *	%NULL is returned if there is no free memory. Although this function
 *	allocates memory it can be called from an interrupt.
 */
static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
2777
					       unsigned int length)
2778 2779 2780 2781
{
	return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
}

2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795
/* legacy helper around __netdev_alloc_skb() */
static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
					      gfp_t gfp_mask)
{
	return __netdev_alloc_skb(NULL, length, gfp_mask);
}

/* legacy helper around netdev_alloc_skb() */
static inline struct sk_buff *dev_alloc_skb(unsigned int length)
{
	return netdev_alloc_skb(NULL, length);
}


2796 2797
static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
		unsigned int length, gfp_t gfp)
2798
{
2799
	struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
2800 2801 2802 2803 2804 2805

	if (NET_IP_ALIGN && skb)
		skb_reserve(skb, NET_IP_ALIGN);
	return skb;
}

2806 2807 2808 2809 2810 2811
static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
		unsigned int length)
{
	return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
}

2812 2813
static inline void skb_free_frag(void *addr)
{
2814
	page_frag_free(addr);
2815 2816
}

2817
void *napi_alloc_frag(unsigned int fragsz);
2818 2819 2820 2821 2822 2823 2824
struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
				 unsigned int length, gfp_t gfp_mask);
static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
					     unsigned int length)
{
	return __napi_alloc_skb(napi, length, GFP_ATOMIC);
}
2825 2826 2827
void napi_consume_skb(struct sk_buff *skb, int budget);

void __kfree_skb_flush(void);
2828
void __kfree_skb_defer(struct sk_buff *skb);
2829

2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849
/**
 * __dev_alloc_pages - allocate page for network Rx
 * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
 * @order: size of the allocation
 *
 * Allocate a new page.
 *
 * %NULL is returned if there is no free memory.
*/
static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
					     unsigned int order)
{
	/* This piece of code contains several assumptions.
	 * 1.  This is for device Rx, therefor a cold page is preferred.
	 * 2.  The expectation is the user wants a compound page.
	 * 3.  If requesting a order 0 page it will not be compound
	 *     due to the check to see if order has a value in prep_new_page
	 * 4.  __GFP_MEMALLOC is ignored if __GFP_NOMEMALLOC is set due to
	 *     code in gfp_to_alloc_flags that should be enforcing this.
	 */
M
Mel Gorman 已提交
2850
	gfp_mask |= __GFP_COMP | __GFP_MEMALLOC;
2851 2852 2853 2854 2855 2856

	return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
}

static inline struct page *dev_alloc_pages(unsigned int order)
{
2857
	return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order);
2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874
}

/**
 * __dev_alloc_page - allocate a page for network Rx
 * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
 *
 * Allocate a new page.
 *
 * %NULL is returned if there is no free memory.
 */
static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
{
	return __dev_alloc_pages(gfp_mask, 0);
}

static inline struct page *dev_alloc_page(void)
{
2875
	return dev_alloc_pages(0);
2876 2877
}

2878 2879 2880 2881 2882 2883 2884 2885
/**
 *	skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page
 *	@page: The page that was allocated from skb_alloc_page
 *	@skb: The skb that may need pfmemalloc set
 */
static inline void skb_propagate_pfmemalloc(struct page *page,
					     struct sk_buff *skb)
{
2886
	if (page_is_pfmemalloc(page))
2887 2888 2889
		skb->pfmemalloc = true;
}

2890 2891 2892 2893 2894 2895
/**
 * skb_frag_off() - Returns the offset of a skb fragment
 * @frag: the paged fragment
 */
static inline unsigned int skb_frag_off(const skb_frag_t *frag)
{
2896
	return frag->bv_offset;
2897 2898 2899 2900 2901 2902 2903 2904 2905
}

/**
 * skb_frag_off_add() - Increments the offset of a skb fragment by @delta
 * @frag: skb fragment
 * @delta: value to add
 */
static inline void skb_frag_off_add(skb_frag_t *frag, int delta)
{
2906
	frag->bv_offset += delta;
2907 2908 2909 2910 2911 2912 2913 2914 2915
}

/**
 * skb_frag_off_set() - Sets the offset of a skb fragment
 * @frag: skb fragment
 * @offset: offset of fragment
 */
static inline void skb_frag_off_set(skb_frag_t *frag, unsigned int offset)
{
2916
	frag->bv_offset = offset;
2917 2918 2919 2920 2921 2922 2923 2924 2925 2926
}

/**
 * skb_frag_off_copy() - Sets the offset of a skb fragment from another fragment
 * @fragto: skb fragment where offset is set
 * @fragfrom: skb fragment offset is copied from
 */
static inline void skb_frag_off_copy(skb_frag_t *fragto,
				     const skb_frag_t *fragfrom)
{
2927
	fragto->bv_offset = fragfrom->bv_offset;
2928 2929
}

2930
/**
2931
 * skb_frag_page - retrieve the page referred to by a paged fragment
2932 2933 2934 2935 2936 2937
 * @frag: the paged fragment
 *
 * Returns the &struct page associated with @frag.
 */
static inline struct page *skb_frag_page(const skb_frag_t *frag)
{
2938
	return frag->bv_page;
2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995
}

/**
 * __skb_frag_ref - take an addition reference on a paged fragment.
 * @frag: the paged fragment
 *
 * Takes an additional reference on the paged fragment @frag.
 */
static inline void __skb_frag_ref(skb_frag_t *frag)
{
	get_page(skb_frag_page(frag));
}

/**
 * skb_frag_ref - take an addition reference on a paged fragment of an skb.
 * @skb: the buffer
 * @f: the fragment offset.
 *
 * Takes an additional reference on the @f'th paged fragment of @skb.
 */
static inline void skb_frag_ref(struct sk_buff *skb, int f)
{
	__skb_frag_ref(&skb_shinfo(skb)->frags[f]);
}

/**
 * __skb_frag_unref - release a reference on a paged fragment.
 * @frag: the paged fragment
 *
 * Releases a reference on the paged fragment @frag.
 */
static inline void __skb_frag_unref(skb_frag_t *frag)
{
	put_page(skb_frag_page(frag));
}

/**
 * skb_frag_unref - release a reference on a paged fragment of an skb.
 * @skb: the buffer
 * @f: the fragment offset
 *
 * Releases a reference on the @f'th paged fragment of @skb.
 */
static inline void skb_frag_unref(struct sk_buff *skb, int f)
{
	__skb_frag_unref(&skb_shinfo(skb)->frags[f]);
}

/**
 * skb_frag_address - gets the address of the data contained in a paged fragment
 * @frag: the paged fragment buffer
 *
 * Returns the address of the data within @frag. The page must already
 * be mapped.
 */
static inline void *skb_frag_address(const skb_frag_t *frag)
{
2996
	return page_address(skb_frag_page(frag)) + skb_frag_off(frag);
2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011
}

/**
 * skb_frag_address_safe - gets the address of the data contained in a paged fragment
 * @frag: the paged fragment buffer
 *
 * Returns the address of the data within @frag. Checks that the page
 * is mapped and returns %NULL otherwise.
 */
static inline void *skb_frag_address_safe(const skb_frag_t *frag)
{
	void *ptr = page_address(skb_frag_page(frag));
	if (unlikely(!ptr))
		return NULL;

3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023
	return ptr + skb_frag_off(frag);
}

/**
 * skb_frag_page_copy() - sets the page in a fragment from another fragment
 * @fragto: skb fragment where page is set
 * @fragfrom: skb fragment page is copied from
 */
static inline void skb_frag_page_copy(skb_frag_t *fragto,
				      const skb_frag_t *fragfrom)
{
	fragto->bv_page = fragfrom->bv_page;
3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034
}

/**
 * __skb_frag_set_page - sets the page contained in a paged fragment
 * @frag: the paged fragment
 * @page: the page to set
 *
 * Sets the fragment @frag to contain @page.
 */
static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
{
3035
	frag->bv_page = page;
3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051
}

/**
 * skb_frag_set_page - sets the page contained in a paged fragment of an skb
 * @skb: the buffer
 * @f: the fragment offset
 * @page: the page to set
 *
 * Sets the @f'th fragment of @skb to contain @page.
 */
static inline void skb_frag_set_page(struct sk_buff *skb, int f,
				     struct page *page)
{
	__skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
}

E
Eric Dumazet 已提交
3052 3053
bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);

3054 3055
/**
 * skb_frag_dma_map - maps a paged fragment via the DMA API
3056
 * @dev: the device to map the fragment to
3057 3058 3059 3060
 * @frag: the paged fragment to map
 * @offset: the offset within the fragment (starting at the
 *          fragment's own offset)
 * @size: the number of bytes to map
3061
 * @dir: the direction of the mapping (``PCI_DMA_*``)
3062 3063 3064 3065 3066 3067 3068 3069 3070
 *
 * Maps the page associated with @frag to @device.
 */
static inline dma_addr_t skb_frag_dma_map(struct device *dev,
					  const skb_frag_t *frag,
					  size_t offset, size_t size,
					  enum dma_data_direction dir)
{
	return dma_map_page(dev, skb_frag_page(frag),
3071
			    skb_frag_off(frag) + offset, size, dir);
3072 3073
}

E
Eric Dumazet 已提交
3074 3075 3076 3077 3078 3079
static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
					gfp_t gfp_mask)
{
	return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
}

3080 3081 3082 3083 3084 3085 3086 3087

static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
						  gfp_t gfp_mask)
{
	return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
}


3088 3089 3090 3091 3092 3093 3094 3095
/**
 *	skb_clone_writable - is the header of a clone writable
 *	@skb: buffer to check
 *	@len: length up to which to write
 *
 *	Returns true if modifying the header part of the cloned buffer
 *	does not requires the data to be copied.
 */
3096
static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
3097 3098 3099 3100 3101
{
	return !skb_header_cloned(skb) &&
	       skb_headroom(skb) + len <= skb->hdr_len;
}

3102 3103 3104 3105 3106 3107 3108
static inline int skb_try_make_writable(struct sk_buff *skb,
					unsigned int write_len)
{
	return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
	       pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
}

H
Herbert Xu 已提交
3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122
static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
			    int cloned)
{
	int delta = 0;

	if (headroom > skb_headroom(skb))
		delta = headroom - skb_headroom(skb);

	if (delta || cloned)
		return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
					GFP_ATOMIC);
	return 0;
}

L
Linus Torvalds 已提交
3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136
/**
 *	skb_cow - copy header of skb when it is required
 *	@skb: buffer to cow
 *	@headroom: needed headroom
 *
 *	If the skb passed lacks sufficient headroom or its data part
 *	is shared, data is reallocated. If reallocation fails, an error
 *	is returned and original skb is not changed.
 *
 *	The result is skb with writable area skb->head...skb->tail
 *	and at least @headroom of space at head.
 */
static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
{
H
Herbert Xu 已提交
3137 3138
	return __skb_cow(skb, headroom, skb_cloned(skb));
}
L
Linus Torvalds 已提交
3139

H
Herbert Xu 已提交
3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152
/**
 *	skb_cow_head - skb_cow but only making the head writable
 *	@skb: buffer to cow
 *	@headroom: needed headroom
 *
 *	This function is identical to skb_cow except that we replace the
 *	skb_cloned check by skb_header_cloned.  It should be used when
 *	you only need to push on some header and do not need to modify
 *	the data.
 */
static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
{
	return __skb_cow(skb, headroom, skb_header_cloned(skb));
L
Linus Torvalds 已提交
3153 3154 3155 3156 3157 3158 3159 3160 3161
}

/**
 *	skb_padto	- pad an skbuff up to a minimal size
 *	@skb: buffer to pad
 *	@len: minimal length
 *
 *	Pads up a buffer to ensure the trailing bytes exist and are
 *	blanked. If the buffer already contains sufficient data it
3162 3163
 *	is untouched. Otherwise it is extended. Returns zero on
 *	success. The skb is freed on error.
L
Linus Torvalds 已提交
3164
 */
3165
static inline int skb_padto(struct sk_buff *skb, unsigned int len)
L
Linus Torvalds 已提交
3166 3167 3168
{
	unsigned int size = skb->len;
	if (likely(size >= len))
3169
		return 0;
G
Gerrit Renker 已提交
3170
	return skb_pad(skb, len - size);
L
Linus Torvalds 已提交
3171 3172
}

3173
/**
3174
 *	__skb_put_padto - increase size and pad an skbuff up to a minimal size
3175 3176
 *	@skb: buffer to pad
 *	@len: minimal length
3177
 *	@free_on_error: free buffer on error
3178 3179 3180 3181
 *
 *	Pads up a buffer to ensure the trailing bytes exist and are
 *	blanked. If the buffer already contains sufficient data it
 *	is untouched. Otherwise it is extended. Returns zero on
3182
 *	success. The skb is freed on error if @free_on_error is true.
3183
 */
3184 3185
static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len,
				  bool free_on_error)
3186 3187 3188 3189 3190
{
	unsigned int size = skb->len;

	if (unlikely(size < len)) {
		len -= size;
3191
		if (__skb_pad(skb, len, free_on_error))
3192 3193 3194 3195 3196 3197
			return -ENOMEM;
		__skb_put(skb, len);
	}
	return 0;
}

3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212
/**
 *	skb_put_padto - increase size and pad an skbuff up to a minimal size
 *	@skb: buffer to pad
 *	@len: minimal length
 *
 *	Pads up a buffer to ensure the trailing bytes exist and are
 *	blanked. If the buffer already contains sufficient data it
 *	is untouched. Otherwise it is extended. Returns zero on
 *	success. The skb is freed on error.
 */
static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
{
	return __skb_put_padto(skb, len, true);
}

L
Linus Torvalds 已提交
3213
static inline int skb_add_data(struct sk_buff *skb,
3214
			       struct iov_iter *from, int copy)
L
Linus Torvalds 已提交
3215 3216 3217 3218
{
	const int off = skb->len;

	if (skb->ip_summed == CHECKSUM_NONE) {
3219
		__wsum csum = 0;
3220 3221
		if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy,
					         &csum, from)) {
L
Linus Torvalds 已提交
3222 3223 3224
			skb->csum = csum_block_add(skb->csum, csum, off);
			return 0;
		}
3225
	} else if (copy_from_iter_full(skb_put(skb, copy), copy, from))
L
Linus Torvalds 已提交
3226 3227 3228 3229 3230 3231
		return 0;

	__skb_trim(skb, off);
	return -EFAULT;
}

3232 3233
static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
				    const struct page *page, int off)
L
Linus Torvalds 已提交
3234
{
W
Willem de Bruijn 已提交
3235 3236
	if (skb_zcopy(skb))
		return false;
L
Linus Torvalds 已提交
3237
	if (i) {
3238
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
L
Linus Torvalds 已提交
3239

3240
		return page == skb_frag_page(frag) &&
3241
		       off == skb_frag_off(frag) + skb_frag_size(frag);
L
Linus Torvalds 已提交
3242
	}
3243
	return false;
L
Linus Torvalds 已提交
3244 3245
}

H
Herbert Xu 已提交
3246 3247 3248 3249 3250
static inline int __skb_linearize(struct sk_buff *skb)
{
	return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
}

L
Linus Torvalds 已提交
3251 3252 3253 3254 3255 3256 3257
/**
 *	skb_linearize - convert paged skb to linear one
 *	@skb: buffer to linarize
 *
 *	If there is no free memory -ENOMEM is returned, otherwise zero
 *	is returned and the old skb data released.
 */
H
Herbert Xu 已提交
3258 3259 3260 3261 3262
static inline int skb_linearize(struct sk_buff *skb)
{
	return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
}

3263 3264 3265 3266 3267 3268 3269 3270 3271
/**
 * skb_has_shared_frag - can any frag be overwritten
 * @skb: buffer to test
 *
 * Return true if the skb has at least one frag that might be modified
 * by an external entity (as in vmsplice()/sendfile())
 */
static inline bool skb_has_shared_frag(const struct sk_buff *skb)
{
3272 3273
	return skb_is_nonlinear(skb) &&
	       skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
3274 3275
}

H
Herbert Xu 已提交
3276 3277 3278 3279 3280 3281 3282 3283
/**
 *	skb_linearize_cow - make sure skb is linear and writable
 *	@skb: buffer to process
 *
 *	If there is no free memory -ENOMEM is returned, otherwise zero
 *	is returned and the old skb data released.
 */
static inline int skb_linearize_cow(struct sk_buff *skb)
L
Linus Torvalds 已提交
3284
{
H
Herbert Xu 已提交
3285 3286
	return skb_is_nonlinear(skb) || skb_cloned(skb) ?
	       __skb_linearize(skb) : 0;
L
Linus Torvalds 已提交
3287 3288
}

3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300
static __always_inline void
__skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
		     unsigned int off)
{
	if (skb->ip_summed == CHECKSUM_COMPLETE)
		skb->csum = csum_block_sub(skb->csum,
					   csum_partial(start, len, 0), off);
	else if (skb->ip_summed == CHECKSUM_PARTIAL &&
		 skb_checksum_start_offset(skb) < 0)
		skb->ip_summed = CHECKSUM_NONE;
}

L
Linus Torvalds 已提交
3301 3302 3303 3304 3305 3306 3307
/**
 *	skb_postpull_rcsum - update checksum for received skb after pull
 *	@skb: buffer to update
 *	@start: start of data before pull
 *	@len: length of data pulled
 *
 *	After doing a pull on a received packet, you need to call this to
3308 3309
 *	update the CHECKSUM_COMPLETE checksum, or set ip_summed to
 *	CHECKSUM_NONE so that it can be recomputed from scratch.
L
Linus Torvalds 已提交
3310 3311
 */
static inline void skb_postpull_rcsum(struct sk_buff *skb,
3312
				      const void *start, unsigned int len)
L
Linus Torvalds 已提交
3313
{
3314
	__skb_postpull_rcsum(skb, start, len, 0);
L
Linus Torvalds 已提交
3315 3316
}

3317 3318 3319 3320 3321 3322 3323 3324
static __always_inline void
__skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
		     unsigned int off)
{
	if (skb->ip_summed == CHECKSUM_COMPLETE)
		skb->csum = csum_block_add(skb->csum,
					   csum_partial(start, len, 0), off);
}
3325

3326 3327 3328 3329 3330 3331 3332 3333 3334
/**
 *	skb_postpush_rcsum - update checksum for received skb after push
 *	@skb: buffer to update
 *	@start: start of data after push
 *	@len: length of data pushed
 *
 *	After doing a push on a received packet, you need to call this to
 *	update the CHECKSUM_COMPLETE checksum.
 */
3335 3336 3337
static inline void skb_postpush_rcsum(struct sk_buff *skb,
				      const void *start, unsigned int len)
{
3338
	__skb_postpush_rcsum(skb, start, len, 0);
3339 3340
}

3341
void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
3342

3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353
/**
 *	skb_push_rcsum - push skb and update receive checksum
 *	@skb: buffer to update
 *	@len: length of data pulled
 *
 *	This function performs an skb_push on the packet and updates
 *	the CHECKSUM_COMPLETE checksum.  It should be used on
 *	receive path processing instead of skb_push unless you know
 *	that the checksum difference is zero (e.g., a valid IP header)
 *	or you are setting ip_summed to CHECKSUM_NONE.
 */
3354
static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len)
3355 3356 3357 3358 3359 3360
{
	skb_push(skb, len);
	skb_postpush_rcsum(skb, skb->data, len);
	return skb->data;
}

3361
int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
3362 3363 3364 3365 3366 3367 3368
/**
 *	pskb_trim_rcsum - trim received skb and update checksum
 *	@skb: buffer to trim
 *	@len: new length
 *
 *	This is exactly the same as pskb_trim except that it ensures the
 *	checksum of received packets are still valid after the operation.
3369
 *	It can change skb pointers.
3370 3371 3372 3373 3374 3375
 */

static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
{
	if (likely(len >= skb->len))
		return 0;
3376
	return pskb_trim_rcsum_slow(skb, len);
3377 3378
}

3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393
static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
{
	if (skb->ip_summed == CHECKSUM_COMPLETE)
		skb->ip_summed = CHECKSUM_NONE;
	__skb_trim(skb, len);
	return 0;
}

static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
{
	if (skb->ip_summed == CHECKSUM_COMPLETE)
		skb->ip_summed = CHECKSUM_NONE;
	return __skb_grow(skb, len);
}

3394 3395 3396 3397 3398 3399
#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
#define skb_rb_first(root) rb_to_skb(rb_first(root))
#define skb_rb_last(root)  rb_to_skb(rb_last(root))
#define skb_rb_next(skb)   rb_to_skb(rb_next(&(skb)->rbnode))
#define skb_rb_prev(skb)   rb_to_skb(rb_prev(&(skb)->rbnode))

L
Linus Torvalds 已提交
3400 3401
#define skb_queue_walk(queue, skb) \
		for (skb = (queue)->next;					\
3402
		     skb != (struct sk_buff *)(queue);				\
L
Linus Torvalds 已提交
3403 3404
		     skb = skb->next)

3405 3406 3407 3408 3409
#define skb_queue_walk_safe(queue, skb, tmp)					\
		for (skb = (queue)->next, tmp = skb->next;			\
		     skb != (struct sk_buff *)(queue);				\
		     skb = tmp, tmp = skb->next)

3410
#define skb_queue_walk_from(queue, skb)						\
3411
		for (; skb != (struct sk_buff *)(queue);			\
3412 3413
		     skb = skb->next)

3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425
#define skb_rbtree_walk(skb, root)						\
		for (skb = skb_rb_first(root); skb != NULL;			\
		     skb = skb_rb_next(skb))

#define skb_rbtree_walk_from(skb)						\
		for (; skb != NULL;						\
		     skb = skb_rb_next(skb))

#define skb_rbtree_walk_from_safe(skb, tmp)					\
		for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL);	\
		     skb = tmp)

3426 3427 3428 3429 3430
#define skb_queue_walk_from_safe(queue, skb, tmp)				\
		for (tmp = skb->next;						\
		     skb != (struct sk_buff *)(queue);				\
		     skb = tmp, tmp = skb->next)

3431 3432
#define skb_queue_reverse_walk(queue, skb) \
		for (skb = (queue)->prev;					\
3433
		     skb != (struct sk_buff *)(queue);				\
3434 3435
		     skb = skb->prev)

3436 3437 3438 3439 3440 3441 3442 3443 3444
#define skb_queue_reverse_walk_safe(queue, skb, tmp)				\
		for (skb = (queue)->prev, tmp = skb->prev;			\
		     skb != (struct sk_buff *)(queue);				\
		     skb = tmp, tmp = skb->prev)

#define skb_queue_reverse_walk_from_safe(queue, skb, tmp)			\
		for (tmp = skb->prev;						\
		     skb != (struct sk_buff *)(queue);				\
		     skb = tmp, tmp = skb->prev)
L
Linus Torvalds 已提交
3445

3446
static inline bool skb_has_frag_list(const struct sk_buff *skb)
3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458
{
	return skb_shinfo(skb)->frag_list != NULL;
}

static inline void skb_frag_list_init(struct sk_buff *skb)
{
	skb_shinfo(skb)->frag_list = NULL;
}

#define skb_walk_frags(skb, iter)	\
	for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)

3459 3460 3461

int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
				const struct sk_buff *skb);
3462 3463 3464 3465 3466
struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
					  struct sk_buff_head *queue,
					  unsigned int flags,
					  void (*destructor)(struct sock *sk,
							   struct sk_buff *skb),
3467
					  int *off, int *err,
3468
					  struct sk_buff **last);
3469
struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags,
3470 3471
					void (*destructor)(struct sock *sk,
							   struct sk_buff *skb),
3472
					int *off, int *err,
3473
					struct sk_buff **last);
3474
struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
3475 3476
				    void (*destructor)(struct sock *sk,
						       struct sk_buff *skb),
3477
				    int *off, int *err);
3478 3479
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
				  int *err);
3480 3481
__poll_t datagram_poll(struct file *file, struct socket *sock,
			   struct poll_table_struct *wait);
A
Al Viro 已提交
3482 3483
int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
			   struct iov_iter *to, int size);
3484 3485 3486
static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
					struct msghdr *msg, int size)
{
3487
	return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
3488
}
3489 3490
int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
				   struct msghdr *msg);
3491 3492 3493
int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
			   struct iov_iter *to, int len,
			   struct ahash_request *hash);
3494 3495 3496
int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
				 struct iov_iter *from, int len);
int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
3497
void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
3498 3499 3500 3501 3502 3503
void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
static inline void skb_free_datagram_locked(struct sock *sk,
					    struct sk_buff *skb)
{
	__skb_free_datagram_locked(sk, skb, 0);
}
3504 3505 3506 3507 3508
int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
			      int len, __wsum csum);
3509
int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
3510
		    struct pipe_inode_info *pipe, unsigned int len,
A
Al Viro 已提交
3511
		    unsigned int flags);
3512 3513
int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
			 int len);
3514
void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
3515
unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
3516 3517
int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
		 int len, int hlen);
3518 3519 3520
void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
void skb_scrub_packet(struct sk_buff *skb, bool xnet);
3521
bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
3522
bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
3523
struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
3524
struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
3525
int skb_ensure_writable(struct sk_buff *skb, int write_len);
3526
int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
3527 3528
int skb_vlan_pop(struct sk_buff *skb);
int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
3529 3530 3531
int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
		  int mac_len);
int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len);
3532
int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse);
3533
int skb_mpls_dec_ttl(struct sk_buff *skb);
3534 3535
struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
			     gfp_t gfp);
3536

A
Al Viro 已提交
3537 3538
static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
{
3539
	return copy_from_iter_full(data, len, &msg->msg_iter) ? 0 : -EFAULT;
A
Al Viro 已提交
3540 3541
}

A
Al Viro 已提交
3542 3543
static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
{
3544
	return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
A
Al Viro 已提交
3545 3546
}

3547 3548 3549 3550 3551
struct skb_checksum_ops {
	__wsum (*update)(const void *mem, int len, __wsum wsum);
	__wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
};

3552 3553
extern const struct skb_checksum_ops *crc32c_csum_stub __read_mostly;

3554 3555 3556 3557 3558
__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
		      __wsum csum, const struct skb_checksum_ops *ops);
__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
		    __wsum csum);

3559 3560 3561
static inline void * __must_check
__skb_header_pointer(const struct sk_buff *skb, int offset,
		     int len, void *data, int hlen, void *buffer)
L
Linus Torvalds 已提交
3562
{
3563
	if (hlen - offset >= len)
3564
		return data + offset;
L
Linus Torvalds 已提交
3565

3566 3567
	if (!skb ||
	    skb_copy_bits(skb, offset, buffer, len) < 0)
L
Linus Torvalds 已提交
3568 3569 3570 3571 3572
		return NULL;

	return buffer;
}

3573 3574
static inline void * __must_check
skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
3575 3576 3577 3578 3579
{
	return __skb_header_pointer(skb, offset, len, skb->data,
				    skb_headlen(skb), buffer);
}

3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597
/**
 *	skb_needs_linearize - check if we need to linearize a given skb
 *			      depending on the given device features.
 *	@skb: socket buffer to check
 *	@features: net device features
 *
 *	Returns true if either:
 *	1. skb has frag_list and the device doesn't support FRAGLIST, or
 *	2. skb is fragmented and the device does not support SG.
 */
static inline bool skb_needs_linearize(struct sk_buff *skb,
				       netdev_features_t features)
{
	return skb_is_nonlinear(skb) &&
	       ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
		(skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
}

3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611
static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
					     void *to,
					     const unsigned int len)
{
	memcpy(to, skb->data, len);
}

static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
						    const int offset, void *to,
						    const unsigned int len)
{
	memcpy(to, skb->data + offset, len);
}

3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626
static inline void skb_copy_to_linear_data(struct sk_buff *skb,
					   const void *from,
					   const unsigned int len)
{
	memcpy(skb->data, from, len);
}

static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
						  const int offset,
						  const void *from,
						  const unsigned int len)
{
	memcpy(skb->data + offset, from, len);
}

3627
void skb_init(void);
L
Linus Torvalds 已提交
3628

3629 3630 3631 3632 3633
static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
{
	return skb->tstamp;
}

3634 3635 3636
/**
 *	skb_get_timestamp - get timestamp from a skb
 *	@skb: skb to get stamp from
3637
 *	@stamp: pointer to struct __kernel_old_timeval to store stamp in
3638 3639 3640 3641 3642
 *
 *	Timestamps are stored in the skb as offsets to a base timestamp.
 *	This function converts the offset back to a struct timeval and stores
 *	it in stamp.
 */
3643
static inline void skb_get_timestamp(const struct sk_buff *skb,
3644
				     struct __kernel_old_timeval *stamp)
3645
{
3646
	*stamp = ns_to_kernel_old_timeval(skb->tstamp);
3647 3648
}

3649 3650 3651 3652 3653 3654 3655 3656 3657
static inline void skb_get_new_timestamp(const struct sk_buff *skb,
					 struct __kernel_sock_timeval *stamp)
{
	struct timespec64 ts = ktime_to_timespec64(skb->tstamp);

	stamp->tv_sec = ts.tv_sec;
	stamp->tv_usec = ts.tv_nsec / 1000;
}

3658 3659 3660 3661 3662 3663
static inline void skb_get_timestampns(const struct sk_buff *skb,
				       struct timespec *stamp)
{
	*stamp = ktime_to_timespec(skb->tstamp);
}

3664 3665 3666 3667 3668 3669 3670 3671 3672
static inline void skb_get_new_timestampns(const struct sk_buff *skb,
					   struct __kernel_timespec *stamp)
{
	struct timespec64 ts = ktime_to_timespec64(skb->tstamp);

	stamp->tv_sec = ts.tv_sec;
	stamp->tv_nsec = ts.tv_nsec;
}

3673
static inline void __net_timestamp(struct sk_buff *skb)
3674
{
3675
	skb->tstamp = ktime_get_real();
3676 3677
}

3678 3679 3680 3681 3682
static inline ktime_t net_timedelta(ktime_t t)
{
	return ktime_sub(ktime_get_real(), t);
}

3683 3684
static inline ktime_t net_invalid_timestamp(void)
{
T
Thomas Gleixner 已提交
3685
	return 0;
3686
}
3687

3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711
static inline u8 skb_metadata_len(const struct sk_buff *skb)
{
	return skb_shinfo(skb)->meta_len;
}

static inline void *skb_metadata_end(const struct sk_buff *skb)
{
	return skb_mac_header(skb);
}

static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
					  const struct sk_buff *skb_b,
					  u8 meta_len)
{
	const void *a = skb_metadata_end(skb_a);
	const void *b = skb_metadata_end(skb_b);
	/* Using more efficient varaiant than plain call to memcmp(). */
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
	u64 diffs = 0;

	switch (meta_len) {
#define __it(x, op) (x -= sizeof(u##op))
#define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op))
	case 32: diffs |= __it_diff(a, b, 64);
3712
		 /* fall through */
3713
	case 24: diffs |= __it_diff(a, b, 64);
3714
		 /* fall through */
3715
	case 16: diffs |= __it_diff(a, b, 64);
3716
		 /* fall through */
3717 3718 3719
	case  8: diffs |= __it_diff(a, b, 64);
		break;
	case 28: diffs |= __it_diff(a, b, 64);
3720
		 /* fall through */
3721
	case 20: diffs |= __it_diff(a, b, 64);
3722
		 /* fall through */
3723
	case 12: diffs |= __it_diff(a, b, 64);
3724
		 /* fall through */
3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756
	case  4: diffs |= __it_diff(a, b, 32);
		break;
	}
	return diffs;
#else
	return memcmp(a - meta_len, b - meta_len, meta_len);
#endif
}

static inline bool skb_metadata_differs(const struct sk_buff *skb_a,
					const struct sk_buff *skb_b)
{
	u8 len_a = skb_metadata_len(skb_a);
	u8 len_b = skb_metadata_len(skb_b);

	if (!(len_a | len_b))
		return false;

	return len_a != len_b ?
	       true : __skb_metadata_differs(skb_a, skb_b, len_a);
}

static inline void skb_metadata_set(struct sk_buff *skb, u8 meta_len)
{
	skb_shinfo(skb)->meta_len = meta_len;
}

static inline void skb_metadata_clear(struct sk_buff *skb)
{
	skb_metadata_set(skb, 0);
}

3757 3758
struct sk_buff *skb_clone_sk(struct sk_buff *skb);

3759 3760
#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING

3761 3762
void skb_clone_tx_timestamp(struct sk_buff *skb);
bool skb_defer_rx_timestamp(struct sk_buff *skb);
3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779

#else /* CONFIG_NETWORK_PHY_TIMESTAMPING */

static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
{
}

static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
{
	return false;
}

#endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */

/**
 * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps
 *
3780 3781
 * PHY drivers may accept clones of transmitted packets for
 * timestamping via their phy_driver.txtstamp method. These drivers
3782 3783
 * must call this function to return the skb back to the stack with a
 * timestamp.
3784
 *
3785
 * @skb: clone of the the original outgoing packet
3786
 * @hwtstamps: hardware time stamps
3787 3788 3789 3790 3791
 *
 */
void skb_complete_tx_timestamp(struct sk_buff *skb,
			       struct skb_shared_hwtstamps *hwtstamps);

3792 3793 3794 3795
void __skb_tstamp_tx(struct sk_buff *orig_skb,
		     struct skb_shared_hwtstamps *hwtstamps,
		     struct sock *sk, int tstype);

3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806
/**
 * skb_tstamp_tx - queue clone of skb with send time stamps
 * @orig_skb:	the original outgoing packet
 * @hwtstamps:	hardware time stamps, may be NULL if not available
 *
 * If the skb has a socket associated, then this function clones the
 * skb (thus sharing the actual data and optional structures), stores
 * the optional hardware time stamping information (if non NULL) or
 * generates a software time stamp (otherwise), then queues the clone
 * to the error queue of the socket.  Errors are silently ignored.
 */
3807 3808
void skb_tstamp_tx(struct sk_buff *orig_skb,
		   struct skb_shared_hwtstamps *hwtstamps);
3809

3810 3811 3812 3813
/**
 * skb_tx_timestamp() - Driver hook for transmit timestamping
 *
 * Ethernet MAC Drivers should call this function in their hard_xmit()
3814
 * function immediately before giving the sk_buff to the MAC hardware.
3815
 *
3816 3817 3818 3819
 * Specifically, one should make absolutely sure that this function is
 * called before TX completion of this packet can trigger.  Otherwise
 * the packet could potentially already be freed.
 *
3820 3821 3822 3823
 * @skb: A socket buffer.
 */
static inline void skb_tx_timestamp(struct sk_buff *skb)
{
3824
	skb_clone_tx_timestamp(skb);
3825 3826
	if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP)
		skb_tstamp_tx(skb, NULL);
3827 3828
}

3829 3830 3831 3832 3833 3834 3835 3836 3837
/**
 * skb_complete_wifi_ack - deliver skb with wifi status
 *
 * @skb: the original outgoing packet
 * @acked: ack status
 *
 */
void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);

3838 3839
__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
__sum16 __skb_checksum_complete(struct sk_buff *skb);
3840

3841 3842
static inline int skb_csum_unnecessary(const struct sk_buff *skb)
{
3843 3844 3845 3846
	return ((skb->ip_summed == CHECKSUM_UNNECESSARY) ||
		skb->csum_valid ||
		(skb->ip_summed == CHECKSUM_PARTIAL &&
		 skb_checksum_start_offset(skb) >= 0));
3847 3848
}

3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864
/**
 *	skb_checksum_complete - Calculate checksum of an entire packet
 *	@skb: packet to process
 *
 *	This function calculates the checksum over the entire packet plus
 *	the value of skb->csum.  The latter can be used to supply the
 *	checksum of a pseudo header as used by TCP/UDP.  It returns the
 *	checksum.
 *
 *	For protocols that contain complete checksums such as ICMP/TCP/UDP,
 *	this function can be used to verify that checksum on received
 *	packets.  In that case the function should return zero if the
 *	checksum is correct.  In particular, this function will return zero
 *	if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
 *	hardware has already verified the correctness of the checksum.
 */
3865
static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
3866
{
3867 3868
	return skb_csum_unnecessary(skb) ?
	       0 : __skb_checksum_complete(skb);
3869 3870
}

3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891
static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
{
	if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
		if (skb->csum_level == 0)
			skb->ip_summed = CHECKSUM_NONE;
		else
			skb->csum_level--;
	}
}

static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
{
	if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
		if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
			skb->csum_level++;
	} else if (skb->ip_summed == CHECKSUM_NONE) {
		skb->ip_summed = CHECKSUM_UNNECESSARY;
		skb->csum_level = 0;
	}
}

3892 3893 3894 3895 3896 3897 3898 3899 3900
/* Check if we need to perform checksum complete validation.
 *
 * Returns true if checksum complete is needed, false otherwise
 * (either checksum is unnecessary or zero checksum is allowed).
 */
static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
						  bool zero_okay,
						  __sum16 check)
{
3901 3902
	if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
		skb->csum_valid = 1;
3903
		__skb_decr_checksum_unnecessary(skb);
3904 3905 3906 3907 3908 3909
		return false;
	}

	return true;
}

3910
/* For small packets <= CHECKSUM_BREAK perform checksum complete directly
3911 3912 3913 3914
 * in checksum_init.
 */
#define CHECKSUM_BREAK 76

3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926
/* Unset checksum-complete
 *
 * Unset checksum complete can be done when packet is being modified
 * (uncompressed for instance) and checksum-complete value is
 * invalidated.
 */
static inline void skb_checksum_complete_unset(struct sk_buff *skb)
{
	if (skb->ip_summed == CHECKSUM_COMPLETE)
		skb->ip_summed = CHECKSUM_NONE;
}

3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941
/* Validate (init) checksum based on checksum complete.
 *
 * Return values:
 *   0: checksum is validated or try to in skb_checksum_complete. In the latter
 *	case the ip_summed will not be CHECKSUM_UNNECESSARY and the pseudo
 *	checksum is stored in skb->csum for use in __skb_checksum_complete
 *   non-zero: value of invalid checksum
 *
 */
static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
						       bool complete,
						       __wsum psum)
{
	if (skb->ip_summed == CHECKSUM_COMPLETE) {
		if (!csum_fold(csum_add(psum, skb->csum))) {
3942
			skb->csum_valid = 1;
3943 3944 3945 3946 3947 3948
			return 0;
		}
	}

	skb->csum = psum;

3949 3950 3951 3952 3953 3954 3955
	if (complete || skb->len <= CHECKSUM_BREAK) {
		__sum16 csum;

		csum = __skb_checksum_complete(skb);
		skb->csum_valid = !csum;
		return csum;
	}
3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978

	return 0;
}

static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
{
	return 0;
}

/* Perform checksum validate (init). Note that this is a macro since we only
 * want to calculate the pseudo header which is an input function if necessary.
 * First we try to validate without any computation (checksum unnecessary) and
 * then calculate based on checksum complete calling the function to compute
 * pseudo header.
 *
 * Return values:
 *   0: checksum is validated or try to in skb_checksum_complete
 *   non-zero: value of invalid checksum
 */
#define __skb_checksum_validate(skb, proto, complete,			\
				zero_okay, check, compute_pseudo)	\
({									\
	__sum16 __ret = 0;						\
3979
	skb->csum_valid = 0;						\
3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996
	if (__skb_checksum_validate_needed(skb, zero_okay, check))	\
		__ret = __skb_checksum_validate_complete(skb,		\
				complete, compute_pseudo(skb, proto));	\
	__ret;								\
})

#define skb_checksum_init(skb, proto, compute_pseudo)			\
	__skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)

#define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo)	\
	__skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)

#define skb_checksum_validate(skb, proto, compute_pseudo)		\
	__skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)

#define skb_checksum_validate_zero_check(skb, proto, check,		\
					 compute_pseudo)		\
3997
	__skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
3998 3999 4000 4001

#define skb_checksum_simple_validate(skb)				\
	__skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)

4002 4003
static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
{
4004
	return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid);
4005 4006
}

4007
static inline void __skb_checksum_convert(struct sk_buff *skb, __wsum pseudo)
4008 4009 4010 4011 4012
{
	skb->csum = ~pseudo;
	skb->ip_summed = CHECKSUM_COMPLETE;
}

4013
#define skb_checksum_try_convert(skb, proto, compute_pseudo)	\
4014 4015
do {									\
	if (__skb_checksum_convert_check(skb))				\
4016
		__skb_checksum_convert(skb, compute_pseudo(skb, proto)); \
4017 4018
} while (0)

4019 4020 4021 4022 4023 4024 4025 4026
static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
					      u16 start, u16 offset)
{
	skb->ip_summed = CHECKSUM_PARTIAL;
	skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
	skb->csum_offset = offset - start;
}

4027 4028 4029 4030 4031 4032
/* Update skbuf and packet to reflect the remote checksum offload operation.
 * When called, ptr indicates the starting point for skb->csum when
 * ip_summed is CHECKSUM_COMPLETE. If we need create checksum complete
 * here, skb_postpull_rcsum is done so skb->csum start is ptr.
 */
static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
4033
				       int start, int offset, bool nopartial)
4034 4035 4036
{
	__wsum delta;

4037 4038 4039 4040 4041
	if (!nopartial) {
		skb_remcsum_adjust_partial(skb, ptr, start, offset);
		return;
	}

4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052
	 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
		__skb_checksum_complete(skb);
		skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
	}

	delta = remcsum_adjust(ptr, skb->csum, start, offset);

	/* Adjust skb->csum since we changed the packet */
	skb->csum = csum_add(skb->csum, delta);
}

4053 4054 4055
static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb)
{
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
4056
	return (void *)(skb->_nfct & NFCT_PTRMASK);
4057 4058 4059 4060 4061
#else
	return NULL;
#endif
}

4062
static inline unsigned long skb_get_nfct(const struct sk_buff *skb)
L
Linus Torvalds 已提交
4063
{
4064 4065 4066 4067 4068
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
	return skb->_nfct;
#else
	return 0UL;
#endif
L
Linus Torvalds 已提交
4069
}
4070 4071

static inline void skb_set_nfct(struct sk_buff *skb, unsigned long nfct)
L
Linus Torvalds 已提交
4072
{
4073 4074
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
	skb->_nfct = nfct;
4075
#endif
4076
}
4077 4078 4079 4080 4081

#ifdef CONFIG_SKB_EXTENSIONS
enum skb_ext_id {
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
	SKB_EXT_BRIDGE_NF,
4082 4083 4084
#endif
#ifdef CONFIG_XFRM
	SKB_EXT_SEC_PATH,
4085 4086 4087
#endif
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
	TC_SKB_EXT,
4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163
#endif
	SKB_EXT_NUM, /* must be last */
};

/**
 *	struct skb_ext - sk_buff extensions
 *	@refcnt: 1 on allocation, deallocated on 0
 *	@offset: offset to add to @data to obtain extension address
 *	@chunks: size currently allocated, stored in SKB_EXT_ALIGN_SHIFT units
 *	@data: start of extension data, variable sized
 *
 *	Note: offsets/lengths are stored in chunks of 8 bytes, this allows
 *	to use 'u8' types while allowing up to 2kb worth of extension data.
 */
struct skb_ext {
	refcount_t refcnt;
	u8 offset[SKB_EXT_NUM]; /* in chunks of 8 bytes */
	u8 chunks;		/* same */
	char data[0] __aligned(8);
};

void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id);
void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id);
void __skb_ext_put(struct skb_ext *ext);

static inline void skb_ext_put(struct sk_buff *skb)
{
	if (skb->active_extensions)
		__skb_ext_put(skb->extensions);
}

static inline void __skb_ext_copy(struct sk_buff *dst,
				  const struct sk_buff *src)
{
	dst->active_extensions = src->active_extensions;

	if (src->active_extensions) {
		struct skb_ext *ext = src->extensions;

		refcount_inc(&ext->refcnt);
		dst->extensions = ext;
	}
}

static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *src)
{
	skb_ext_put(dst);
	__skb_ext_copy(dst, src);
}

static inline bool __skb_ext_exist(const struct skb_ext *ext, enum skb_ext_id i)
{
	return !!ext->offset[i];
}

static inline bool skb_ext_exist(const struct sk_buff *skb, enum skb_ext_id id)
{
	return skb->active_extensions & (1 << id);
}

static inline void skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
{
	if (skb_ext_exist(skb, id))
		__skb_ext_del(skb, id);
}

static inline void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id)
{
	if (skb_ext_exist(skb, id)) {
		struct skb_ext *ext = skb->extensions;

		return (void *)ext + (ext->offset[id] << 3);
	}

	return NULL;
}
4164 4165 4166 4167 4168 4169 4170 4171

static inline void skb_ext_reset(struct sk_buff *skb)
{
	if (unlikely(skb->active_extensions)) {
		__skb_ext_put(skb->extensions);
		skb->active_extensions = 0;
	}
}
4172 4173
#else
static inline void skb_ext_put(struct sk_buff *skb) {}
4174
static inline void skb_ext_reset(struct sk_buff *skb) {}
4175 4176 4177 4178 4179
static inline void skb_ext_del(struct sk_buff *skb, int unused) {}
static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {}
static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {}
#endif /* CONFIG_SKB_EXTENSIONS */

4180
static inline void nf_reset_ct(struct sk_buff *skb)
4181
{
4182
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4183 4184
	nf_conntrack_put(skb_nfct(skb));
	skb->_nfct = 0;
4185
#endif
4186 4187
}

4188 4189
static inline void nf_reset_trace(struct sk_buff *skb)
{
4190
#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
G
Gao feng 已提交
4191 4192
	skb->nf_trace = 0;
#endif
4193 4194
}

4195 4196 4197 4198 4199 4200 4201
static inline void ipvs_reset(struct sk_buff *skb)
{
#if IS_ENABLED(CONFIG_IP_VS)
	skb->ipvs_property = 0;
#endif
}

4202
/* Note: This doesn't put any conntrack info in dst. */
4203 4204
static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
			     bool copy)
4205
{
4206
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4207 4208
	dst->_nfct = src->_nfct;
	nf_conntrack_get(skb_nfct(src));
4209
#endif
4210
#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
4211 4212
	if (copy)
		dst->nf_trace = src->nf_trace;
4213
#endif
4214 4215
}

4216 4217 4218
static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
{
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4219
	nf_conntrack_put(skb_nfct(dst));
4220
#endif
4221
	__nf_copy(dst, src, true);
4222 4223
}

4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241
#ifdef CONFIG_NETWORK_SECMARK
static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
{
	to->secmark = from->secmark;
}

static inline void skb_init_secmark(struct sk_buff *skb)
{
	skb->secmark = 0;
}
#else
static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
{ }

static inline void skb_init_secmark(struct sk_buff *skb)
{ }
#endif

4242 4243 4244
static inline int secpath_exists(const struct sk_buff *skb)
{
#ifdef CONFIG_XFRM
4245
	return skb_ext_exist(skb, SKB_EXT_SEC_PATH);
4246 4247 4248 4249 4250
#else
	return 0;
#endif
}

4251 4252 4253
static inline bool skb_irq_freeable(const struct sk_buff *skb)
{
	return !skb->destructor &&
4254
		!secpath_exists(skb) &&
4255
		!skb_nfct(skb) &&
4256 4257 4258 4259
		!skb->_skb_refdst &&
		!skb_has_frag_list(skb);
}

4260 4261 4262 4263 4264
static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
{
	skb->queue_mapping = queue_mapping;
}

4265
static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
4266 4267 4268 4269
{
	return skb->queue_mapping;
}

4270 4271 4272 4273 4274
static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
{
	to->queue_mapping = from->queue_mapping;
}

4275 4276 4277 4278 4279
static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
{
	skb->queue_mapping = rx_queue + 1;
}

4280
static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
4281 4282 4283 4284
{
	return skb->queue_mapping - 1;
}

4285
static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
4286
{
E
Eric Dumazet 已提交
4287
	return skb->queue_mapping != 0;
4288 4289
}

4290 4291 4292 4293 4294 4295 4296 4297 4298 4299
static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val)
{
	skb->dst_pending_confirm = val;
}

static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb)
{
	return skb->dst_pending_confirm != 0;
}

4300
static inline struct sec_path *skb_sec_path(const struct sk_buff *skb)
4301
{
4302
#ifdef CONFIG_XFRM
4303
	return skb_ext_find(skb, SKB_EXT_SEC_PATH);
4304 4305 4306
#else
	return NULL;
#endif
4307
}
4308

4309 4310 4311
/* Keeps track of mac header offset relative to skb->head.
 * It is useful for TSO of Tunneling protocol. e.g. GRE.
 * For non-tunnel skb it points to skb_mac_header() and for
4312 4313 4314
 * tunnel skb it points to outer mac header.
 * Keeps track of level of encapsulation of network headers.
 */
4315
struct skb_gso_cb {
4316 4317 4318 4319
	union {
		int	mac_offset;
		int	data_offset;
	};
4320
	int	encap_level;
4321
	__wsum	csum;
4322
	__u16	csum_start;
4323
};
4324 4325
#define SKB_SGO_CB_OFFSET	32
#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET))
4326 4327 4328 4329 4330 4331 4332

static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
{
	return (skb_mac_header(inner_skb) - inner_skb->head) -
		SKB_GSO_CB(inner_skb)->mac_offset;
}

4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347
static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
{
	int new_headroom, headroom;
	int ret;

	headroom = skb_headroom(skb);
	ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
	if (ret)
		return ret;

	new_headroom = skb_headroom(skb);
	SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
	return 0;
}

4348 4349 4350 4351 4352 4353 4354 4355 4356 4357
static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
{
	/* Do not update partial checksums if remote checksum is enabled. */
	if (skb->remcsum_offload)
		return;

	SKB_GSO_CB(skb)->csum = res;
	SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
}

4358 4359 4360 4361 4362 4363 4364 4365 4366 4367
/* Compute the checksum for a gso segment. First compute the checksum value
 * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and
 * then add in skb->csum (checksum from csum_start to end of packet).
 * skb->csum and csum_start are then updated to reflect the checksum of the
 * resultant packet starting from the transport header-- the resultant checksum
 * is in the res argument (i.e. normally zero or ~ of checksum of a pseudo
 * header.
 */
static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
{
4368 4369 4370
	unsigned char *csum_start = skb_transport_header(skb);
	int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
	__wsum partial = SKB_GSO_CB(skb)->csum;
4371

4372 4373
	SKB_GSO_CB(skb)->csum = res;
	SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
4374

4375
	return csum_fold(csum_partial(csum_start, plen, partial));
4376 4377
}

4378
static inline bool skb_is_gso(const struct sk_buff *skb)
H
Herbert Xu 已提交
4379 4380 4381 4382
{
	return skb_shinfo(skb)->gso_size;
}

4383
/* Note: Should be called only if skb_is_gso(skb) is true */
4384
static inline bool skb_is_gso_v6(const struct sk_buff *skb)
B
Brice Goglin 已提交
4385 4386 4387 4388
{
	return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
}

4389 4390 4391 4392 4393 4394
/* Note: Should be called only if skb_is_gso(skb) is true */
static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
{
	return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
}

4395
/* Note: Should be called only if skb_is_gso(skb) is true */
4396 4397
static inline bool skb_is_gso_tcp(const struct sk_buff *skb)
{
4398
	return skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6);
4399 4400
}

4401 4402 4403 4404 4405 4406 4407
static inline void skb_gso_reset(struct sk_buff *skb)
{
	skb_shinfo(skb)->gso_size = 0;
	skb_shinfo(skb)->gso_segs = 0;
	skb_shinfo(skb)->gso_type = 0;
}

4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423
static inline void skb_increase_gso_size(struct skb_shared_info *shinfo,
					 u16 increment)
{
	if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
		return;
	shinfo->gso_size += increment;
}

static inline void skb_decrease_gso_size(struct skb_shared_info *shinfo,
					 u16 decrement)
{
	if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
		return;
	shinfo->gso_size -= decrement;
}

4424
void __skb_warn_lro_forwarding(const struct sk_buff *skb);
4425 4426 4427 4428 4429

static inline bool skb_warn_if_lro(const struct sk_buff *skb)
{
	/* LRO sets gso_size but not gso_type, whereas if GSO is really
	 * wanted then gso_type will be set. */
4430 4431
	const struct skb_shared_info *shinfo = skb_shinfo(skb);

4432 4433
	if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
	    unlikely(shinfo->gso_type == 0)) {
4434 4435 4436 4437 4438 4439
		__skb_warn_lro_forwarding(skb);
		return true;
	}
	return false;
}

4440 4441 4442 4443 4444 4445 4446
static inline void skb_forward_csum(struct sk_buff *skb)
{
	/* Unfortunately we don't support this one.  Any brave souls? */
	if (skb->ip_summed == CHECKSUM_COMPLETE)
		skb->ip_summed = CHECKSUM_NONE;
}

4447 4448 4449 4450 4451 4452 4453 4454
/**
 * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE
 * @skb: skb to check
 *
 * fresh skbs have their ip_summed set to CHECKSUM_NONE.
 * Instead of forcing ip_summed to CHECKSUM_NONE, we can
 * use this helper, to document places where we make this assertion.
 */
4455
static inline void skb_checksum_none_assert(const struct sk_buff *skb)
4456 4457 4458 4459 4460 4461
{
#ifdef DEBUG
	BUG_ON(skb->ip_summed != CHECKSUM_NONE);
#endif
}

4462
bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
4463

P
Paul Durrant 已提交
4464
int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
4465 4466 4467
struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
				     unsigned int transport_len,
				     __sum16(*skb_chkf)(struct sk_buff *skb));
P
Paul Durrant 已提交
4468

4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481
/**
 * skb_head_is_locked - Determine if the skb->head is locked down
 * @skb: skb to check
 *
 * The head on skbs build around a head frag can be removed if they are
 * not cloned.  This function returns true if the skb head is locked down
 * due to either being allocated via kmalloc, or by being a clone with
 * multiple references to the head.
 */
static inline bool skb_head_is_locked(const struct sk_buff *skb)
{
	return !skb->head_frag || skb_cloned(skb);
}
4482

4483 4484 4485
/* Local Checksum Offload.
 * Compute outer checksum based on the assumption that the
 * inner checksum will be offloaded later.
4486
 * See Documentation/networking/checksum-offloads.rst for
4487
 * explanation of how this works.
4488 4489 4490 4491 4492 4493
 * Fill in outer checksum adjustment (e.g. with sum of outer
 * pseudo-header) before calling.
 * Also ensure that inner checksum is in linear data area.
 */
static inline __wsum lco_csum(struct sk_buff *skb)
{
4494 4495 4496
	unsigned char *csum_start = skb_checksum_start(skb);
	unsigned char *l4_hdr = skb_transport_header(skb);
	__wsum partial;
4497 4498

	/* Start with complement of inner checksum adjustment */
4499 4500 4501
	partial = ~csum_unfold(*(__force __sum16 *)(csum_start +
						    skb->csum_offset));

4502
	/* Add in checksum of our headers (incl. outer checksum
4503
	 * adjustment filled in by caller) and return result.
4504
	 */
4505
	return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
4506 4507
}

L
Linus Torvalds 已提交
4508 4509
#endif	/* __KERNEL__ */
#endif	/* _LINUX_SKBUFF_H */