skbuff.h 112.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 *	Definitions for the 'struct sk_buff' memory handlers.
 *
 *	Authors:
 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
 *		Florian La Roche, <rzsfl@rz.uni-sb.de>
 *
 *	This program is free software; you can redistribute it and/or
 *	modify it under the terms of the GNU General Public License
 *	as published by the Free Software Foundation; either version
 *	2 of the License, or (at your option) any later version.
 */

#ifndef _LINUX_SKBUFF_H
#define _LINUX_SKBUFF_H

#include <linux/kernel.h>
18
#include <linux/kmemcheck.h>
L
Linus Torvalds 已提交
19 20
#include <linux/compiler.h>
#include <linux/time.h>
21
#include <linux/bug.h>
L
Linus Torvalds 已提交
22
#include <linux/cache.h>
E
Eric Dumazet 已提交
23
#include <linux/rbtree.h>
24
#include <linux/socket.h>
L
Linus Torvalds 已提交
25

A
Arun Sharma 已提交
26
#include <linux/atomic.h>
L
Linus Torvalds 已提交
27 28 29
#include <asm/types.h>
#include <linux/spinlock.h>
#include <linux/net.h>
30
#include <linux/textsearch.h>
L
Linus Torvalds 已提交
31
#include <net/checksum.h>
32
#include <linux/rcupdate.h>
33
#include <linux/hrtimer.h>
34
#include <linux/dma-mapping.h>
35
#include <linux/netdev_features.h>
36
#include <linux/sched.h>
37
#include <linux/sched/clock.h>
38
#include <net/flow_dissector.h>
39
#include <linux/splice.h>
40
#include <linux/in6.h>
41
#include <linux/if_packet.h>
42
#include <net/flow.h>
L
Linus Torvalds 已提交
43

44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
/* The interface for checksum offload between the stack and networking drivers
 * is as follows...
 *
 * A. IP checksum related features
 *
 * Drivers advertise checksum offload capabilities in the features of a device.
 * From the stack's point of view these are capabilities offered by the driver,
 * a driver typically only advertises features that it is capable of offloading
 * to its device.
 *
 * The checksum related features are:
 *
 *	NETIF_F_HW_CSUM	- The driver (or its device) is able to compute one
 *			  IP (one's complement) checksum for any combination
 *			  of protocols or protocol layering. The checksum is
 *			  computed and set in a packet per the CHECKSUM_PARTIAL
 *			  interface (see below).
 *
 *	NETIF_F_IP_CSUM - Driver (device) is only able to checksum plain
 *			  TCP or UDP packets over IPv4. These are specifically
 *			  unencapsulated packets of the form IPv4|TCP or
 *			  IPv4|UDP where the Protocol field in the IPv4 header
 *			  is TCP or UDP. The IPv4 header may contain IP options
 *			  This feature cannot be set in features for a device
 *			  with NETIF_F_HW_CSUM also set. This feature is being
 *			  DEPRECATED (see below).
 *
 *	NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain
 *			  TCP or UDP packets over IPv6. These are specifically
 *			  unencapsulated packets of the form IPv6|TCP or
 *			  IPv4|UDP where the Next Header field in the IPv6
 *			  header is either TCP or UDP. IPv6 extension headers
 *			  are not supported with this feature. This feature
 *			  cannot be set in features for a device with
 *			  NETIF_F_HW_CSUM also set. This feature is being
 *			  DEPRECATED (see below).
 *
 *	NETIF_F_RXCSUM - Driver (device) performs receive checksum offload.
 *			 This flag is used only used to disable the RX checksum
 *			 feature for a device. The stack will accept receive
 *			 checksum indication in packets received on a device
 *			 regardless of whether NETIF_F_RXCSUM is set.
 *
 * B. Checksumming of received packets by device. Indication of checksum
 *    verification is in set skb->ip_summed. Possible values are:
89 90 91
 *
 * CHECKSUM_NONE:
 *
92
 *   Device did not checksum this packet e.g. due to lack of capabilities.
93 94 95 96 97 98 99
 *   The packet contains full (though not verified) checksum in packet but
 *   not in skb->csum. Thus, skb->csum is undefined in this case.
 *
 * CHECKSUM_UNNECESSARY:
 *
 *   The hardware you're dealing with doesn't calculate the full checksum
 *   (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums
100 101
 *   for specific protocols. For such packets it will set CHECKSUM_UNNECESSARY
 *   if their checksums are okay. skb->csum is still undefined in this case
102 103
 *   though. A driver or device must never modify the checksum field in the
 *   packet even if checksum is verified.
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
 *
 *   CHECKSUM_UNNECESSARY is applicable to following protocols:
 *     TCP: IPv6 and IPv4.
 *     UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a
 *       zero UDP checksum for either IPv4 or IPv6, the networking stack
 *       may perform further validation in this case.
 *     GRE: only if the checksum is present in the header.
 *     SCTP: indicates the CRC in SCTP header has been validated.
 *
 *   skb->csum_level indicates the number of consecutive checksums found in
 *   the packet minus one that have been verified as CHECKSUM_UNNECESSARY.
 *   For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet
 *   and a device is able to verify the checksums for UDP (possibly zero),
 *   GRE (checksum flag is set), and TCP-- skb->csum_level would be set to
 *   two. If the device were only able to verify the UDP checksum and not
 *   GRE, either because it doesn't support GRE checksum of because GRE
 *   checksum is bad, skb->csum_level would be set to zero (TCP checksum is
 *   not considered in this case).
122 123 124 125 126 127 128 129 130 131 132 133
 *
 * CHECKSUM_COMPLETE:
 *
 *   This is the most generic way. The device supplied checksum of the _whole_
 *   packet as seen by netif_rx() and fills out in skb->csum. Meaning, the
 *   hardware doesn't need to parse L3/L4 headers to implement this.
 *
 *   Note: Even if device supports only some protocols, but is able to produce
 *   skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY.
 *
 * CHECKSUM_PARTIAL:
 *
134 135
 *   A checksum is set up to be offloaded to a device as described in the
 *   output description for CHECKSUM_PARTIAL. This may occur on a packet
136
 *   received directly from another Linux OS, e.g., a virtualized Linux kernel
137 138 139 140 141 142
 *   on the same host, or it may be set in the input path in GRO or remote
 *   checksum offload. For the purposes of checksum verification, the checksum
 *   referred to by skb->csum_start + skb->csum_offset and any preceding
 *   checksums in the packet are considered verified. Any checksums in the
 *   packet that are after the checksum being offloaded are not considered to
 *   be verified.
143
 *
144 145
 * C. Checksumming on transmit for non-GSO. The stack requests checksum offload
 *    in the skb->ip_summed for a packet. Values are:
146 147 148
 *
 * CHECKSUM_PARTIAL:
 *
149
 *   The driver is required to checksum the packet as seen by hard_start_xmit()
150
 *   from skb->csum_start up to the end, and to record/write the checksum at
151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
 *   offset skb->csum_start + skb->csum_offset. A driver may verify that the
 *   csum_start and csum_offset values are valid values given the length and
 *   offset of the packet, however they should not attempt to validate that the
 *   checksum refers to a legitimate transport layer checksum-- it is the
 *   purview of the stack to validate that csum_start and csum_offset are set
 *   correctly.
 *
 *   When the stack requests checksum offload for a packet, the driver MUST
 *   ensure that the checksum is set correctly. A driver can either offload the
 *   checksum calculation to the device, or call skb_checksum_help (in the case
 *   that the device does not support offload for a particular checksum).
 *
 *   NETIF_F_IP_CSUM and NETIF_F_IPV6_CSUM are being deprecated in favor of
 *   NETIF_F_HW_CSUM. New devices should use NETIF_F_HW_CSUM to indicate
 *   checksum offload capability. If a	device has limited checksum capabilities
 *   (for instance can only perform NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM as
 *   described above) a helper function can be called to resolve
 *   CHECKSUM_PARTIAL. The helper functions are skb_csum_off_chk*. The helper
 *   function takes a spec argument that describes the protocol layer that is
 *   supported for checksum offload and can be called for each packet. If a
 *   packet does not match the specification for offload, skb_checksum_help
 *   is called to resolve the checksum.
173
 *
174
 * CHECKSUM_NONE:
175
 *
176 177
 *   The skb was already checksummed by the protocol, or a checksum is not
 *   required.
178 179 180
 *
 * CHECKSUM_UNNECESSARY:
 *
181 182
 *   This has the same meaning on as CHECKSUM_NONE for checksum offload on
 *   output.
183
 *
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
 * CHECKSUM_COMPLETE:
 *   Not used in checksum output. If a driver observes a packet with this value
 *   set in skbuff, if should treat as CHECKSUM_NONE being set.
 *
 * D. Non-IP checksum (CRC) offloads
 *
 *   NETIF_F_SCTP_CRC - This feature indicates that a device is capable of
 *     offloading the SCTP CRC in a packet. To perform this offload the stack
 *     will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset
 *     accordingly. Note the there is no indication in the skbuff that the
 *     CHECKSUM_PARTIAL refers to an SCTP checksum, a driver that supports
 *     both IP checksum offload and SCTP CRC offload must verify which offload
 *     is configured for a packet presumably by inspecting packet headers.
 *
 *   NETIF_F_FCOE_CRC - This feature indicates that a device is capable of
 *     offloading the FCOE CRC in a packet. To perform this offload the stack
 *     will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset
 *     accordingly. Note the there is no indication in the skbuff that the
 *     CHECKSUM_PARTIAL refers to an FCOE checksum, a driver that supports
 *     both IP checksum offload and FCOE CRC offload must verify which offload
 *     is configured for a packet presumably by inspecting packet headers.
 *
 * E. Checksumming on output with GSO.
 *
 * In the case of a GSO packet (skb_is_gso(skb) is true), checksum offload
 * is implied by the SKB_GSO_* flags in gso_type. Most obviously, if the
 * gso_type is SKB_GSO_TCPV4 or SKB_GSO_TCPV6, TCP checksum offload as
 * part of the GSO operation is implied. If a checksum is being offloaded
 * with GSO then ip_summed is CHECKSUM_PARTIAL, csum_start and csum_offset
 * are set to refer to the outermost checksum being offload (two offloaded
 * checksums are possible with UDP encapsulation).
215 216
 */

217
/* Don't change this without changing skb_csum_unnecessary! */
218 219 220 221
#define CHECKSUM_NONE		0
#define CHECKSUM_UNNECESSARY	1
#define CHECKSUM_COMPLETE	2
#define CHECKSUM_PARTIAL	3
L
Linus Torvalds 已提交
222

223 224 225
/* Maximum value in skb->csum_level */
#define SKB_MAX_CSUM_LEVEL	3

226
#define SKB_DATA_ALIGN(X)	ALIGN(X, SMP_CACHE_BYTES)
227
#define SKB_WITH_OVERHEAD(X)	\
228
	((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
229 230
#define SKB_MAX_ORDER(X, ORDER) \
	SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
L
Linus Torvalds 已提交
231 232 233
#define SKB_MAX_HEAD(X)		(SKB_MAX_ORDER((X), 0))
#define SKB_MAX_ALLOC		(SKB_MAX_ORDER(0, 2))

E
Eric Dumazet 已提交
234 235 236 237 238
/* return minimum truesize of one skb containing X bytes of data */
#define SKB_TRUESIZE(X) ((X) +						\
			 SKB_DATA_ALIGN(sizeof(struct sk_buff)) +	\
			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))

L
Linus Torvalds 已提交
239
struct net_device;
240
struct scatterlist;
J
Jens Axboe 已提交
241
struct pipe_inode_info;
H
Herbert Xu 已提交
242
struct iov_iter;
243
struct napi_struct;
L
Linus Torvalds 已提交
244

245
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
L
Linus Torvalds 已提交
246 247 248
struct nf_conntrack {
	atomic_t use;
};
249
#endif
L
Linus Torvalds 已提交
250

251
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
L
Linus Torvalds 已提交
252
struct nf_bridge_info {
253
	atomic_t		use;
254 255 256 257
	enum {
		BRNF_PROTO_UNCHANGED,
		BRNF_PROTO_8021Q,
		BRNF_PROTO_PPPOE
258
	} orig_proto:8;
259 260 261
	u8			pkt_otherhost:1;
	u8			in_prerouting:1;
	u8			bridged_dnat:1;
262
	__u16			frag_max_size;
263
	struct net_device	*physindev;
264 265 266

	/* always valid & non-NULL from FORWARD on, for physdev match */
	struct net_device	*physoutdev;
267
	union {
268
		/* prerouting: detect dnat in orig/reply direction */
269 270
		__be32          ipv4_daddr;
		struct in6_addr ipv6_daddr;
271 272 273 274 275 276

		/* after prerouting + nat detected: store original source
		 * mac since neigh resolution overwrites it, only used while
		 * skb is out in neigh layer.
		 */
		char neigh_header[8];
277
	};
L
Linus Torvalds 已提交
278 279 280 281 282 283 284 285 286 287 288 289 290 291
};
#endif

struct sk_buff_head {
	/* These two members must be first. */
	struct sk_buff	*next;
	struct sk_buff	*prev;

	__u32		qlen;
	spinlock_t	lock;
};

struct sk_buff;

292 293 294 295 296 297
/* To allow 64K frame to be packed as single skb without frag_list we
 * require 64K/PAGE_SIZE pages plus 1 additional page to allow for
 * buffers which do not start on a page boundary.
 *
 * Since GRO uses frags we allocate at least 16 regardless of page
 * size.
298
 */
299
#if (65536/PAGE_SIZE + 1) < 16
300
#define MAX_SKB_FRAGS 16UL
301
#else
302
#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
303
#endif
H
Hans Westgaard Ry 已提交
304
extern int sysctl_max_skb_frags;
L
Linus Torvalds 已提交
305

306 307 308 309 310
/* Set skb_shinfo(skb)->gso_size to this in case you want skb_segment to
 * segment using its current segmentation instead.
 */
#define GSO_BY_FRAGS	0xFFFF

L
Linus Torvalds 已提交
311 312 313
typedef struct skb_frag_struct skb_frag_t;

struct skb_frag_struct {
314 315 316
	struct {
		struct page *p;
	} page;
317
#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
318 319
	__u32 page_offset;
	__u32 size;
320 321 322 323
#else
	__u16 page_offset;
	__u16 size;
#endif
L
Linus Torvalds 已提交
324 325
};

E
Eric Dumazet 已提交
326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345
static inline unsigned int skb_frag_size(const skb_frag_t *frag)
{
	return frag->size;
}

static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
{
	frag->size = size;
}

static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
{
	frag->size += delta;
}

static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
{
	frag->size -= delta;
}

346 347 348
#define HAVE_HW_TIME_STAMP

/**
349
 * struct skb_shared_hwtstamps - hardware time stamps
350 351 352 353
 * @hwtstamp:	hardware time stamp transformed into duration
 *		since arbitrary point in time
 *
 * Software time stamps generated by ktime_get_real() are stored in
354
 * skb->tstamp.
355 356 357 358 359 360 361 362 363 364 365
 *
 * hwtstamps can only be compared against other hwtstamps from
 * the same device.
 *
 * This structure is attached to packets as part of the
 * &skb_shared_info. Use skb_hwtstamps() to get a pointer.
 */
struct skb_shared_hwtstamps {
	ktime_t	hwtstamp;
};

366 367 368 369 370
/* Definitions for tx_flags in struct skb_shared_info */
enum {
	/* generate hardware time stamp */
	SKBTX_HW_TSTAMP = 1 << 0,

371
	/* generate software time stamp when queueing packet to NIC */
372 373 374 375 376
	SKBTX_SW_TSTAMP = 1 << 1,

	/* device driver is going to provide hardware time stamp */
	SKBTX_IN_PROGRESS = 1 << 2,

377
	/* device driver supports TX zero-copy buffers */
E
Eric Dumazet 已提交
378
	SKBTX_DEV_ZEROCOPY = 1 << 3,
379 380

	/* generate wifi status information (where possible) */
E
Eric Dumazet 已提交
381
	SKBTX_WIFI_STATUS = 1 << 4,
382 383 384 385 386 387 388

	/* This indicates at least one fragment might be overwritten
	 * (as in vmsplice(), sendfile() ...)
	 * If we need to compute a TX checksum, we'll need to copy
	 * all frags to avoid possible bad checksum
	 */
	SKBTX_SHARED_FRAG = 1 << 5,
389 390 391

	/* generate software time stamp when entering packet scheduling */
	SKBTX_SCHED_TSTAMP = 1 << 6,
392 393
};

394
#define SKBTX_ANY_SW_TSTAMP	(SKBTX_SW_TSTAMP    | \
395
				 SKBTX_SCHED_TSTAMP)
396 397
#define SKBTX_ANY_TSTAMP	(SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)

398 399 400
/*
 * The callback notifies userspace to release buffers when skb DMA is done in
 * lower device, the skb last reference should be 0 when calling this.
401 402
 * The zerocopy_success argument is true if zero copy transmit occurred,
 * false on data copy or out of memory error caused by data copy attempt.
403 404
 * The ctx field is used to track device context.
 * The desc field is used to track userspace buffer index.
405 406
 */
struct ubuf_info {
407
	void (*callback)(struct ubuf_info *, bool zerocopy_success);
408
	void *ctx;
409
	unsigned long desc;
410 411
};

L
Linus Torvalds 已提交
412 413 414 415
/* This data is invariant across clones and lives at
 * the end of the header data, ie. at skb->end.
 */
struct skb_shared_info {
416
	unsigned short	_unused;
417 418
	unsigned char	nr_frags;
	__u8		tx_flags;
419 420 421
	unsigned short	gso_size;
	/* Warning: this field is not always filled in (UFO)! */
	unsigned short	gso_segs;
L
Linus Torvalds 已提交
422
	struct sk_buff	*frag_list;
423
	struct skb_shared_hwtstamps hwtstamps;
424
	unsigned int	gso_type;
425
	u32		tskey;
426
	__be32          ip6_frag_id;
E
Eric Dumazet 已提交
427 428 429 430 431 432

	/*
	 * Warning : all fields before dataref are cleared in __alloc_skb()
	 */
	atomic_t	dataref;

J
Johann Baudy 已提交
433 434 435
	/* Intermediate layers must ensure that destructor_arg
	 * remains valid until skb destructor */
	void *		destructor_arg;
436

437 438
	/* must be last field, see pskb_expand_head() */
	skb_frag_t	frags[MAX_SKB_FRAGS];
L
Linus Torvalds 已提交
439 440 441 442
};

/* We divide dataref into two halves.  The higher 16 bits hold references
 * to the payload part of skb->data.  The lower 16 bits hold references to
443 444
 * the entire skb->data.  A clone of a headerless skb holds the length of
 * the header in skb->hdr_len.
L
Linus Torvalds 已提交
445 446 447 448 449 450 451 452 453 454
 *
 * All users must obey the rule that the skb->data reference count must be
 * greater than or equal to the payload reference count.
 *
 * Holding a reference to the payload part means that the user does not
 * care about modifications to the header part of skb->data.
 */
#define SKB_DATAREF_SHIFT 16
#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)

455 456

enum {
457 458 459
	SKB_FCLONE_UNAVAILABLE,	/* skb has no fclone (from head_cache) */
	SKB_FCLONE_ORIG,	/* orig skb (from fclone_cache) */
	SKB_FCLONE_CLONE,	/* companion fclone skb (from fclone_cache) */
460 461
};

462 463
enum {
	SKB_GSO_TCPV4 = 1 << 0,
H
Herbert Xu 已提交
464
	SKB_GSO_UDP = 1 << 1,
465 466 467

	/* This indicates the skb is from an untrusted source. */
	SKB_GSO_DODGY = 1 << 2,
M
Michael Chan 已提交
468 469

	/* This indicates the tcp segment has CWR set. */
H
Herbert Xu 已提交
470 471
	SKB_GSO_TCP_ECN = 1 << 3,

472
	SKB_GSO_TCP_FIXEDID = 1 << 4,
473

474
	SKB_GSO_TCPV6 = 1 << 5,
475

476
	SKB_GSO_FCOE = 1 << 6,
477

478
	SKB_GSO_GRE = 1 << 7,
S
Simon Horman 已提交
479

480
	SKB_GSO_GRE_CSUM = 1 << 8,
E
Eric Dumazet 已提交
481

482
	SKB_GSO_IPXIP4 = 1 << 9,
E
Eric Dumazet 已提交
483

484
	SKB_GSO_IPXIP6 = 1 << 10,
485

486
	SKB_GSO_UDP_TUNNEL = 1 << 11,
T
Tom Herbert 已提交
487

488 489
	SKB_GSO_UDP_TUNNEL_CSUM = 1 << 12,

490 491 492
	SKB_GSO_PARTIAL = 1 << 13,

	SKB_GSO_TUNNEL_REMCSUM = 1 << 14,
M
Marcelo Ricardo Leitner 已提交
493 494

	SKB_GSO_SCTP = 1 << 15,
S
Steffen Klassert 已提交
495 496

	SKB_GSO_ESP = 1 << 16,
497 498
};

499 500 501 502 503 504 505 506 507 508
#if BITS_PER_LONG > 32
#define NET_SKBUFF_DATA_USES_OFFSET 1
#endif

#ifdef NET_SKBUFF_DATA_USES_OFFSET
typedef unsigned int sk_buff_data_t;
#else
typedef unsigned char *sk_buff_data_t;
#endif

509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558
/**
 * struct skb_mstamp - multi resolution time stamps
 * @stamp_us: timestamp in us resolution
 * @stamp_jiffies: timestamp in jiffies
 */
struct skb_mstamp {
	union {
		u64		v64;
		struct {
			u32	stamp_us;
			u32	stamp_jiffies;
		};
	};
};

/**
 * skb_mstamp_get - get current timestamp
 * @cl: place to store timestamps
 */
static inline void skb_mstamp_get(struct skb_mstamp *cl)
{
	u64 val = local_clock();

	do_div(val, NSEC_PER_USEC);
	cl->stamp_us = (u32)val;
	cl->stamp_jiffies = (u32)jiffies;
}

/**
 * skb_mstamp_delta - compute the difference in usec between two skb_mstamp
 * @t1: pointer to newest sample
 * @t0: pointer to oldest sample
 */
static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
				      const struct skb_mstamp *t0)
{
	s32 delta_us = t1->stamp_us - t0->stamp_us;
	u32 delta_jiffies = t1->stamp_jiffies - t0->stamp_jiffies;

	/* If delta_us is negative, this might be because interval is too big,
	 * or local_clock() drift is too big : fallback using jiffies.
	 */
	if (delta_us <= 0 ||
	    delta_jiffies >= (INT_MAX / (USEC_PER_SEC / HZ)))

		delta_us = jiffies_to_usecs(delta_jiffies);

	return delta_us;
}

Y
Yuchung Cheng 已提交
559 560 561 562 563 564 565 566 567
static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
				    const struct skb_mstamp *t0)
{
	s32 diff = t1->stamp_jiffies - t0->stamp_jiffies;

	if (!diff)
		diff = t1->stamp_us - t0->stamp_us;
	return diff > 0;
}
568

L
Linus Torvalds 已提交
569 570 571 572
/** 
 *	struct sk_buff - socket buffer
 *	@next: Next buffer in list
 *	@prev: Previous buffer in list
573
 *	@tstamp: Time we arrived/left
E
Eric Dumazet 已提交
574
 *	@rbnode: RB tree node, alternative to next/prev for netem/tcp
575
 *	@sk: Socket we are owned by
L
Linus Torvalds 已提交
576
 *	@dev: Device we arrived on/are leaving by
577
 *	@cb: Control buffer. Free for use by every layer. Put private vars here
E
Eric Dumazet 已提交
578
 *	@_skb_refdst: destination entry (with norefcount bit)
579
 *	@sp: the security path, used for xfrm
L
Linus Torvalds 已提交
580 581 582
 *	@len: Length of actual data
 *	@data_len: Data length
 *	@mac_len: Length of link layer header
583
 *	@hdr_len: writable header length of cloned skb
584 585 586
 *	@csum: Checksum (must include start/offset pair)
 *	@csum_start: Offset from skb->head where checksumming should start
 *	@csum_offset: Offset from csum_start where checksum should be stored
587
 *	@priority: Packet queueing priority
W
WANG Cong 已提交
588
 *	@ignore_df: allow local fragmentation
L
Linus Torvalds 已提交
589
 *	@cloned: Head may be cloned (check refcnt to be sure)
590
 *	@ip_summed: Driver fed us an IP checksum
L
Linus Torvalds 已提交
591 592
 *	@nohdr: Payload reference only, must not modify header
 *	@pkt_type: Packet class
593 594
 *	@fclone: skbuff clone status
 *	@ipvs_property: skbuff is owned by ipvs
595
 *	@tc_skip_classify: do not classify packet. set by IFB device
596
 *	@tc_at_ingress: used within tc_classify to distinguish in/egress
597 598
 *	@tc_redirected: packet was redirected by a tc action
 *	@tc_from_ingress: if tc_redirected, tc_at_ingress at time of redirect
599 600
 *	@peeked: this packet has been seen already, so stats have been
 *		done for it, don't do them again
601
 *	@nf_trace: netfilter packet trace flag
602 603
 *	@protocol: Packet protocol from driver
 *	@destructor: Destruct function
604
 *	@_nfct: Associated connection, if any (with nfctinfo bits)
L
Linus Torvalds 已提交
605
 *	@nf_bridge: Saved data about a bridged frame - see br_netfilter.c
606
 *	@skb_iif: ifindex of device we arrived on
L
Linus Torvalds 已提交
607
 *	@tc_index: Traffic control index
608
 *	@hash: the packet hash
609
 *	@queue_mapping: Queue mapping for multiqueue devices
610
 *	@xmit_more: More SKBs are pending for this queue
611
 *	@ndisc_nodetype: router type (from link layer)
612
 *	@ooo_okay: allow the mapping of a socket to a queue to be changed
613
 *	@l4_hash: indicate hash is a canonical 4-tuple hash over transport
614
 *		ports.
615
 *	@sw_hash: indicates hash was computed in software stack
616 617
 *	@wifi_acked_valid: wifi_acked was set
 *	@wifi_acked: whether frame was acked on wifi or not
618
 *	@no_fcs:  Request NIC to treat last 4 bytes as Ethernet FCS
619
 *	@dst_pending_confirm: need to confirm neighbour
E
Eliezer Tamir 已提交
620
  *	@napi_id: id of the NAPI struct this skb came from
621
 *	@secmark: security marking
622
 *	@mark: Generic packet mark
623
 *	@vlan_proto: vlan encapsulation protocol
624
 *	@vlan_tci: vlan tag control information
S
Simon Horman 已提交
625
 *	@inner_protocol: Protocol (encapsulation)
626 627
 *	@inner_transport_header: Inner transport layer header (encapsulation)
 *	@inner_network_header: Network layer header (encapsulation)
628
 *	@inner_mac_header: Link layer header (encapsulation)
629 630 631 632 633 634 635 636 637
 *	@transport_header: Transport layer header
 *	@network_header: Network layer header
 *	@mac_header: Link layer header
 *	@tail: Tail pointer
 *	@end: End pointer
 *	@head: Head of buffer
 *	@data: Data head pointer
 *	@truesize: Buffer size
 *	@users: User count - see {datagram,tcp}.c
L
Linus Torvalds 已提交
638 639 640
 */

struct sk_buff {
641
	union {
E
Eric Dumazet 已提交
642 643 644 645 646 647 648 649 650 651 652
		struct {
			/* These two members must be first. */
			struct sk_buff		*next;
			struct sk_buff		*prev;

			union {
				ktime_t		tstamp;
				struct skb_mstamp skb_mstamp;
			};
		};
		struct rb_node	rbnode; /* used in netem & tcp stack */
653
	};
654
	struct sock		*sk;
L
Linus Torvalds 已提交
655

656 657 658 659 660 661 662 663
	union {
		struct net_device	*dev;
		/* Some protocols might use this space to store information,
		 * while device pointer would be NULL.
		 * UDP receive path is one user.
		 */
		unsigned long		dev_scratch;
	};
L
Linus Torvalds 已提交
664 665 666 667 668 669
	/*
	 * This is the control buffer. It is free to use for every
	 * layer. Please put your private variables there. If you
	 * want to keep them across layers you have to do a skb_clone()
	 * first. This is owned by whoever has the skb queued ATM.
	 */
670
	char			cb[48] __aligned(8);
L
Linus Torvalds 已提交
671

E
Eric Dumazet 已提交
672
	unsigned long		_skb_refdst;
673
	void			(*destructor)(struct sk_buff *skb);
674 675
#ifdef CONFIG_XFRM
	struct	sec_path	*sp;
676 677
#endif
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
678
	unsigned long		 _nfct;
679
#endif
680
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
681
	struct nf_bridge_info	*nf_bridge;
682
#endif
L
Linus Torvalds 已提交
683
	unsigned int		len,
684 685 686
				data_len;
	__u16			mac_len,
				hdr_len;
687 688 689 690

	/* Following fields are _not_ copied in __copy_skb_header()
	 * Note that queue_mapping is here mostly to fill a hole.
	 */
691
	kmemcheck_bitfield_begin(flags1);
692
	__u16			queue_mapping;
693 694 695 696 697 698 699 700 701 702

/* if you move cloned around you also must adapt those constants */
#ifdef __BIG_ENDIAN_BITFIELD
#define CLONED_MASK	(1 << 7)
#else
#define CLONED_MASK	1
#endif
#define CLONED_OFFSET()		offsetof(struct sk_buff, __cloned_offset)

	__u8			__cloned_offset[0];
703
	__u8			cloned:1,
704
				nohdr:1,
705
				fclone:2,
706
				peeked:1,
707
				head_frag:1,
708 709
				xmit_more:1,
				__unused:1; /* one bit hole */
710
	kmemcheck_bitfield_end(flags1);
711

712 713 714
	/* fields enclosed in headers_start/headers_end are copied
	 * using a single memcpy() in __copy_skb_header()
	 */
715
	/* private: */
716
	__u32			headers_start[0];
717
	/* public: */
718

719 720 721 722 723
/* if you move pkt_type around you also must adapt those constants */
#ifdef __BIG_ENDIAN_BITFIELD
#define PKT_TYPE_MAX	(7 << 5)
#else
#define PKT_TYPE_MAX	7
L
Linus Torvalds 已提交
724
#endif
725
#define PKT_TYPE_OFFSET()	offsetof(struct sk_buff, __pkt_type_offset)
726

727
	__u8			__pkt_type_offset[0];
728
	__u8			pkt_type:3;
729
	__u8			pfmemalloc:1;
730 731 732 733
	__u8			ignore_df:1;

	__u8			nf_trace:1;
	__u8			ip_summed:2;
734
	__u8			ooo_okay:1;
735
	__u8			l4_hash:1;
736
	__u8			sw_hash:1;
737 738
	__u8			wifi_acked_valid:1;
	__u8			wifi_acked:1;
739

740
	__u8			no_fcs:1;
741
	/* Indicates the inner headers are valid in the skbuff. */
742
	__u8			encapsulation:1;
743
	__u8			encap_hdr_csum:1;
744
	__u8			csum_valid:1;
745
	__u8			csum_complete_sw:1;
746 747
	__u8			csum_level:2;
	__u8			csum_bad:1;
748

749
	__u8			dst_pending_confirm:1;
750 751 752 753
#ifdef CONFIG_IPV6_NDISC_NODETYPE
	__u8			ndisc_nodetype:2;
#endif
	__u8			ipvs_property:1;
T
Tom Herbert 已提交
754
	__u8			inner_protocol_type:1;
755
	__u8			remcsum_offload:1;
756 757 758
#ifdef CONFIG_NET_SWITCHDEV
	__u8			offload_fwd_mark:1;
#endif
759 760
#ifdef CONFIG_NET_CLS_ACT
	__u8			tc_skip_classify:1;
761
	__u8			tc_at_ingress:1;
762 763
	__u8			tc_redirected:1;
	__u8			tc_from_ingress:1;
764
#endif
765 766 767 768

#ifdef CONFIG_NET_SCHED
	__u16			tc_index;	/* traffic control index */
#endif
769

770 771 772 773 774 775 776 777 778 779 780 781
	union {
		__wsum		csum;
		struct {
			__u16	csum_start;
			__u16	csum_offset;
		};
	};
	__u32			priority;
	int			skb_iif;
	__u32			hash;
	__be16			vlan_proto;
	__u16			vlan_tci;
E
Eric Dumazet 已提交
782 783 784 785 786
#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
	union {
		unsigned int	napi_id;
		unsigned int	sender_cpu;
	};
787
#endif
788
#ifdef CONFIG_NETWORK_SECMARK
789
	__u32		secmark;
790 791
#endif

792 793
	union {
		__u32		mark;
E
Eric Dumazet 已提交
794
		__u32		reserved_tailroom;
795
	};
L
Linus Torvalds 已提交
796

T
Tom Herbert 已提交
797 798 799 800 801
	union {
		__be16		inner_protocol;
		__u8		inner_ipproto;
	};

802 803 804
	__u16			inner_transport_header;
	__u16			inner_network_header;
	__u16			inner_mac_header;
805 806

	__be16			protocol;
807 808 809
	__u16			transport_header;
	__u16			network_header;
	__u16			mac_header;
810

811
	/* private: */
812
	__u32			headers_end[0];
813
	/* public: */
814

L
Linus Torvalds 已提交
815
	/* These elements must be at the end, see alloc_skb() for details.  */
816
	sk_buff_data_t		tail;
817
	sk_buff_data_t		end;
L
Linus Torvalds 已提交
818
	unsigned char		*head,
819
				*data;
820 821
	unsigned int		truesize;
	atomic_t		users;
L
Linus Torvalds 已提交
822 823 824 825 826 827 828 829 830
};

#ifdef __KERNEL__
/*
 *	Handling routines are only of interest to the kernel
 */
#include <linux/slab.h>


831 832
#define SKB_ALLOC_FCLONE	0x01
#define SKB_ALLOC_RX		0x02
833
#define SKB_ALLOC_NAPI		0x04
834 835 836 837 838 839 840

/* Returns true if the skb was allocated from PFMEMALLOC reserves */
static inline bool skb_pfmemalloc(const struct sk_buff *skb)
{
	return unlikely(skb->pfmemalloc);
}

E
Eric Dumazet 已提交
841 842 843 844 845 846 847
/*
 * skb might have a dst pointer attached, refcounted or not.
 * _skb_refdst low order bit is set if refcount was _not_ taken
 */
#define SKB_DST_NOREF	1UL
#define SKB_DST_PTRMASK	~(SKB_DST_NOREF)

848
#define SKB_NFCT_PTRMASK	~(7UL)
E
Eric Dumazet 已提交
849 850 851 852 853 854
/**
 * skb_dst - returns skb dst_entry
 * @skb: buffer
 *
 * Returns skb dst_entry, regardless of reference taken or not.
 */
E
Eric Dumazet 已提交
855 856
static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
{
E
Eric Dumazet 已提交
857 858 859 860 861 862 863
	/* If refdst was not refcounted, check we still are in a 
	 * rcu_read_lock section
	 */
	WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
		!rcu_read_lock_held() &&
		!rcu_read_lock_bh_held());
	return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
E
Eric Dumazet 已提交
864 865
}

E
Eric Dumazet 已提交
866 867 868 869 870 871 872 873
/**
 * skb_dst_set - sets skb dst
 * @skb: buffer
 * @dst: dst entry
 *
 * Sets skb dst, assuming a reference was taken on dst and should
 * be released by skb_dst_drop()
 */
E
Eric Dumazet 已提交
874 875
static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
{
E
Eric Dumazet 已提交
876 877 878
	skb->_skb_refdst = (unsigned long)dst;
}

879 880 881 882 883 884 885 886 887 888 889 890
/**
 * skb_dst_set_noref - sets skb dst, hopefully, without taking reference
 * @skb: buffer
 * @dst: dst entry
 *
 * Sets skb dst, assuming a reference was not taken on dst.
 * If dst entry is cached, we do not take reference and dst_release
 * will be avoided by refdst_drop. If dst entry is not cached, we take
 * reference, so that last dst_release can destroy the dst immediately.
 */
static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
{
891 892
	WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
	skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
893
}
E
Eric Dumazet 已提交
894 895

/**
L
Lucas De Marchi 已提交
896
 * skb_dst_is_noref - Test if skb dst isn't refcounted
E
Eric Dumazet 已提交
897 898 899 900 901
 * @skb: buffer
 */
static inline bool skb_dst_is_noref(const struct sk_buff *skb)
{
	return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
E
Eric Dumazet 已提交
902 903
}

E
Eric Dumazet 已提交
904 905
static inline struct rtable *skb_rtable(const struct sk_buff *skb)
{
E
Eric Dumazet 已提交
906
	return (struct rtable *)skb_dst(skb);
E
Eric Dumazet 已提交
907 908
}

909 910 911 912 913 914 915 916 917
/* For mangling skb->pkt_type from user space side from applications
 * such as nft, tc, etc, we only allow a conservative subset of
 * possible pkt_types to be set.
*/
static inline bool skb_pkt_type_ok(u32 ptype)
{
	return ptype <= PACKET_OTHERHOST;
}

918 919 920 921 922
void kfree_skb(struct sk_buff *skb);
void kfree_skb_list(struct sk_buff *segs);
void skb_tx_error(struct sk_buff *skb);
void consume_skb(struct sk_buff *skb);
void  __kfree_skb(struct sk_buff *skb);
923
extern struct kmem_cache *skbuff_head_cache;
E
Eric Dumazet 已提交
924

925 926 927
void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
		      bool *fragstolen, int *delta_truesize);
E
Eric Dumazet 已提交
928

929 930
struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
			    int node);
E
Eric Dumazet 已提交
931
struct sk_buff *__build_skb(void *data, unsigned int frag_size);
932
struct sk_buff *build_skb(void *data, unsigned int frag_size);
933
static inline struct sk_buff *alloc_skb(unsigned int size,
A
Al Viro 已提交
934
					gfp_t priority)
935
{
E
Eric Dumazet 已提交
936
	return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
937 938
}

939 940 941 942 943 944
struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
				     unsigned long data_len,
				     int max_page_order,
				     int *errcode,
				     gfp_t gfp_mask);

945 946 947 948 949 950 951 952 953 954 955
/* Layout of fast clones : [skb1][skb2][fclone_ref] */
struct sk_buff_fclones {
	struct sk_buff	skb1;

	struct sk_buff	skb2;

	atomic_t	fclone_ref;
};

/**
 *	skb_fclone_busy - check if fclone is busy
956
 *	@sk: socket
957 958
 *	@skb: buffer
 *
M
Masanari Iida 已提交
959
 * Returns true if skb is a fast clone, and its clone is not freed.
960 961
 * Some drivers call skb_orphan() in their ndo_start_xmit(),
 * so we also check that this didnt happen.
962
 */
963 964
static inline bool skb_fclone_busy(const struct sock *sk,
				   const struct sk_buff *skb)
965 966 967 968 969 970
{
	const struct sk_buff_fclones *fclones;

	fclones = container_of(skb, struct sk_buff_fclones, skb1);

	return skb->fclone == SKB_FCLONE_ORIG &&
971
	       atomic_read(&fclones->fclone_ref) > 1 &&
972
	       fclones->skb2.sk == sk;
973 974
}

975
static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
A
Al Viro 已提交
976
					       gfp_t priority)
977
{
978
	return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
979 980
}

981
struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
982 983 984 985 986
static inline struct sk_buff *alloc_skb_head(gfp_t priority)
{
	return __alloc_skb_head(priority, -1);
}

987 988 989 990
struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
991 992 993 994 995 996 997
struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
				   gfp_t gfp_mask, bool fclone);
static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
					  gfp_t gfp_mask)
{
	return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
}
998 999 1000 1001 1002 1003

int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
				     unsigned int headroom);
struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
				int newtailroom, gfp_t priority);
1004 1005
int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
			int offset, int len);
1006 1007 1008 1009
int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset,
		 int len);
int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
int skb_pad(struct sk_buff *skb, int pad);
1010
#define dev_kfree_skb(a)	consume_skb(a)
L
Linus Torvalds 已提交
1011

1012 1013 1014 1015
int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
			    int getfrag(void *from, char *to, int offset,
					int len, int odd, struct sk_buff *skb),
			    void *from, int length);
1016

1017 1018 1019
int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
			 int offset, size_t size);

E
Eric Dumazet 已提交
1020
struct skb_seq_state {
1021 1022 1023 1024 1025 1026 1027 1028 1029
	__u32		lower_offset;
	__u32		upper_offset;
	__u32		frag_idx;
	__u32		stepped_offset;
	struct sk_buff	*root_skb;
	struct sk_buff	*cur_skb;
	__u8		*frag_data;
};

1030 1031 1032 1033 1034
void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
			  unsigned int to, struct skb_seq_state *st);
unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
			  struct skb_seq_state *st);
void skb_abort_seq_read(struct skb_seq_state *st);
1035

1036
unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
1037
			   unsigned int to, struct ts_config *config);
1038

T
Tom Herbert 已提交
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
/*
 * Packet hash types specify the type of hash in skb_set_hash.
 *
 * Hash types refer to the protocol layer addresses which are used to
 * construct a packet's hash. The hashes are used to differentiate or identify
 * flows of the protocol layer for the hash type. Hash types are either
 * layer-2 (L2), layer-3 (L3), or layer-4 (L4).
 *
 * Properties of hashes:
 *
 * 1) Two packets in different flows have different hash values
 * 2) Two packets in the same flow should have the same hash value
 *
 * A hash at a higher layer is considered to be more specific. A driver should
 * set the most specific hash possible.
 *
 * A driver cannot indicate a more specific hash than the layer at which a hash
 * was computed. For instance an L3 hash cannot be set as an L4 hash.
 *
 * A driver may indicate a hash level which is less specific than the
 * actual layer the hash was computed on. For instance, a hash computed
 * at L4 may be considered an L3 hash. This should only be done if the
 * driver can't unambiguously determine that the HW computed the hash at
 * the higher layer. Note that the "should" in the second property above
 * permits this.
 */
enum pkt_hash_types {
	PKT_HASH_TYPE_NONE,	/* Undefined type */
	PKT_HASH_TYPE_L2,	/* Input: src_MAC, dest_MAC */
	PKT_HASH_TYPE_L3,	/* Input: src_IP, dst_IP */
	PKT_HASH_TYPE_L4,	/* Input: src_IP, dst_IP, src_port, dst_port */
};

1072
static inline void skb_clear_hash(struct sk_buff *skb)
T
Tom Herbert 已提交
1073
{
1074
	skb->hash = 0;
1075
	skb->sw_hash = 0;
1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089
	skb->l4_hash = 0;
}

static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
{
	if (!skb->l4_hash)
		skb_clear_hash(skb);
}

static inline void
__skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
{
	skb->l4_hash = is_l4;
	skb->sw_hash = is_sw;
1090
	skb->hash = hash;
T
Tom Herbert 已提交
1091 1092
}

1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105
static inline void
skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
{
	/* Used by drivers to set hash from HW */
	__skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
}

static inline void
__skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
{
	__skb_set_hash(skb, hash, true, is_l4);
}

1106
void __skb_get_hash(struct sk_buff *skb);
1107
u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126
u32 skb_get_poff(const struct sk_buff *skb);
u32 __skb_get_poff(const struct sk_buff *skb, void *data,
		   const struct flow_keys *keys, int hlen);
__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
			    void *data, int hlen_proto);

static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
					int thoff, u8 ip_proto)
{
	return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
}

void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
			     const struct flow_dissector_key *key,
			     unsigned int key_count);

bool __skb_flow_dissect(const struct sk_buff *skb,
			struct flow_dissector *flow_dissector,
			void *target_container,
1127 1128
			void *data, __be16 proto, int nhoff, int hlen,
			unsigned int flags);
1129 1130 1131

static inline bool skb_flow_dissect(const struct sk_buff *skb,
				    struct flow_dissector *flow_dissector,
1132
				    void *target_container, unsigned int flags)
1133 1134
{
	return __skb_flow_dissect(skb, flow_dissector, target_container,
1135
				  NULL, 0, 0, 0, flags);
1136 1137 1138
}

static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
1139 1140
					      struct flow_keys *flow,
					      unsigned int flags)
1141 1142 1143
{
	memset(flow, 0, sizeof(*flow));
	return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
1144
				  NULL, 0, 0, 0, flags);
1145 1146 1147 1148
}

static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
						  void *data, __be16 proto,
1149 1150
						  int nhoff, int hlen,
						  unsigned int flags)
1151 1152 1153
{
	memset(flow, 0, sizeof(*flow));
	return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow,
1154
				  data, proto, nhoff, hlen, flags);
1155 1156
}

1157
static inline __u32 skb_get_hash(struct sk_buff *skb)
1158
{
1159
	if (!skb->l4_hash && !skb->sw_hash)
1160
		__skb_get_hash(skb);
1161

1162
	return skb->hash;
1163 1164
}

1165
__u32 __skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6);
1166

1167
static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
1168
{
1169 1170
	if (!skb->l4_hash && !skb->sw_hash) {
		struct flow_keys keys;
1171
		__u32 hash = __get_hash_from_flowi6(fl6, &keys);
1172

1173
		__skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1174
	}
1175 1176 1177 1178

	return skb->hash;
}

1179
__u32 __skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl);
1180

1181
static inline __u32 skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl4)
1182
{
1183 1184
	if (!skb->l4_hash && !skb->sw_hash) {
		struct flow_keys keys;
1185
		__u32 hash = __get_hash_from_flowi4(fl4, &keys);
1186

1187
		__skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1188
	}
1189 1190 1191 1192

	return skb->hash;
}

T
Tom Herbert 已提交
1193 1194
__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);

T
Tom Herbert 已提交
1195 1196
static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
{
1197
	return skb->hash;
T
Tom Herbert 已提交
1198 1199
}

1200 1201
static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
{
1202
	to->hash = from->hash;
1203
	to->sw_hash = from->sw_hash;
1204
	to->l4_hash = from->l4_hash;
1205 1206
};

1207 1208 1209 1210 1211
#ifdef NET_SKBUFF_DATA_USES_OFFSET
static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
{
	return skb->head + skb->end;
}
1212 1213 1214 1215 1216

static inline unsigned int skb_end_offset(const struct sk_buff *skb)
{
	return skb->end;
}
1217 1218 1219 1220 1221
#else
static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
{
	return skb->end;
}
1222 1223 1224 1225 1226

static inline unsigned int skb_end_offset(const struct sk_buff *skb)
{
	return skb->end - skb->head;
}
1227 1228
#endif

L
Linus Torvalds 已提交
1229
/* Internal */
1230
#define skb_shinfo(SKB)	((struct skb_shared_info *)(skb_end_pointer(SKB)))
L
Linus Torvalds 已提交
1231

1232 1233 1234 1235 1236
static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
{
	return &skb_shinfo(skb)->hwtstamps;
}

L
Linus Torvalds 已提交
1237 1238 1239 1240 1241 1242 1243 1244
/**
 *	skb_queue_empty - check if a queue is empty
 *	@list: queue head
 *
 *	Returns true if the queue is empty, false otherwise.
 */
static inline int skb_queue_empty(const struct sk_buff_head *list)
{
1245
	return list->next == (const struct sk_buff *) list;
L
Linus Torvalds 已提交
1246 1247
}

D
David S. Miller 已提交
1248 1249 1250 1251 1252 1253 1254 1255 1256 1257
/**
 *	skb_queue_is_last - check if skb is the last entry in the queue
 *	@list: queue head
 *	@skb: buffer
 *
 *	Returns true if @skb is the last buffer on the list.
 */
static inline bool skb_queue_is_last(const struct sk_buff_head *list,
				     const struct sk_buff *skb)
{
1258
	return skb->next == (const struct sk_buff *) list;
D
David S. Miller 已提交
1259 1260
}

1261 1262 1263 1264 1265 1266 1267 1268 1269 1270
/**
 *	skb_queue_is_first - check if skb is the first entry in the queue
 *	@list: queue head
 *	@skb: buffer
 *
 *	Returns true if @skb is the first buffer on the list.
 */
static inline bool skb_queue_is_first(const struct sk_buff_head *list,
				      const struct sk_buff *skb)
{
1271
	return skb->prev == (const struct sk_buff *) list;
1272 1273
}

D
David S. Miller 已提交
1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291
/**
 *	skb_queue_next - return the next packet in the queue
 *	@list: queue head
 *	@skb: current buffer
 *
 *	Return the next packet in @list after @skb.  It is only valid to
 *	call this if skb_queue_is_last() evaluates to false.
 */
static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
					     const struct sk_buff *skb)
{
	/* This BUG_ON may seem severe, but if we just return then we
	 * are going to dereference garbage.
	 */
	BUG_ON(skb_queue_is_last(list, skb));
	return skb->next;
}

1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309
/**
 *	skb_queue_prev - return the prev packet in the queue
 *	@list: queue head
 *	@skb: current buffer
 *
 *	Return the prev packet in @list before @skb.  It is only valid to
 *	call this if skb_queue_is_first() evaluates to false.
 */
static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
					     const struct sk_buff *skb)
{
	/* This BUG_ON may seem severe, but if we just return then we
	 * are going to dereference garbage.
	 */
	BUG_ON(skb_queue_is_first(list, skb));
	return skb->prev;
}

L
Linus Torvalds 已提交
1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341
/**
 *	skb_get - reference buffer
 *	@skb: buffer to reference
 *
 *	Makes another reference to a socket buffer and returns a pointer
 *	to the buffer.
 */
static inline struct sk_buff *skb_get(struct sk_buff *skb)
{
	atomic_inc(&skb->users);
	return skb;
}

/*
 * If users == 1, we are the only owner and are can avoid redundant
 * atomic change.
 */

/**
 *	skb_cloned - is the buffer a clone
 *	@skb: buffer to check
 *
 *	Returns true if the buffer was generated with skb_clone() and is
 *	one of multiple shared copies of the buffer. Cloned buffers are
 *	shared data so must not be written to under normal circumstances.
 */
static inline int skb_cloned(const struct sk_buff *skb)
{
	return skb->cloned &&
	       (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
}

1342 1343
static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
{
1344
	might_sleep_if(gfpflags_allow_blocking(pri));
1345 1346 1347 1348 1349 1350 1351

	if (skb_cloned(skb))
		return pskb_expand_head(skb, 0, 0, pri);

	return 0;
}

L
Linus Torvalds 已提交
1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370
/**
 *	skb_header_cloned - is the header a clone
 *	@skb: buffer to check
 *
 *	Returns true if modifying the header part of the buffer requires
 *	the data to be copied.
 */
static inline int skb_header_cloned(const struct sk_buff *skb)
{
	int dataref;

	if (!skb->cloned)
		return 0;

	dataref = atomic_read(&skb_shinfo(skb)->dataref);
	dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
	return dataref != 1;
}

1371 1372 1373 1374 1375 1376 1377 1378 1379 1380
static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
{
	might_sleep_if(gfpflags_allow_blocking(pri));

	if (skb_header_cloned(skb))
		return pskb_expand_head(skb, 0, 0, pri);

	return 0;
}

L
Linus Torvalds 已提交
1381 1382 1383 1384 1385 1386 1387
/**
 *	skb_header_release - release reference to header
 *	@skb: buffer to operate on
 *
 *	Drop a reference to the header part of the buffer.  This is done
 *	by acquiring a payload reference.  You must not read from the header
 *	part of skb->data after this.
1388
 *	Note : Check if you can use __skb_header_release() instead.
L
Linus Torvalds 已提交
1389 1390 1391 1392 1393 1394 1395 1396
 */
static inline void skb_header_release(struct sk_buff *skb)
{
	BUG_ON(skb->nohdr);
	skb->nohdr = 1;
	atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
}

1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410
/**
 *	__skb_header_release - release reference to header
 *	@skb: buffer to operate on
 *
 *	Variant of skb_header_release() assuming skb is private to caller.
 *	We can avoid one atomic operation.
 */
static inline void __skb_header_release(struct sk_buff *skb)
{
	skb->nohdr = 1;
	atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
}


L
Linus Torvalds 已提交
1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435
/**
 *	skb_shared - is the buffer shared
 *	@skb: buffer to check
 *
 *	Returns true if more than one person has a reference to this
 *	buffer.
 */
static inline int skb_shared(const struct sk_buff *skb)
{
	return atomic_read(&skb->users) != 1;
}

/**
 *	skb_share_check - check if buffer is shared and if so clone it
 *	@skb: buffer to check
 *	@pri: priority for memory allocation
 *
 *	If the buffer is shared the buffer is cloned and the old copy
 *	drops a reference. A new clone with a single reference is returned.
 *	If the buffer is not shared the original buffer is returned. When
 *	being called from interrupt status or with spinlocks held pri must
 *	be GFP_ATOMIC.
 *
 *	NULL is returned on a memory allocation failure.
 */
1436
static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
L
Linus Torvalds 已提交
1437
{
1438
	might_sleep_if(gfpflags_allow_blocking(pri));
L
Linus Torvalds 已提交
1439 1440
	if (skb_shared(skb)) {
		struct sk_buff *nskb = skb_clone(skb, pri);
1441 1442 1443 1444 1445

		if (likely(nskb))
			consume_skb(skb);
		else
			kfree_skb(skb);
L
Linus Torvalds 已提交
1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470
		skb = nskb;
	}
	return skb;
}

/*
 *	Copy shared buffers into a new sk_buff. We effectively do COW on
 *	packets to handle cases where we have a local reader and forward
 *	and a couple of other messy ones. The normal one is tcpdumping
 *	a packet thats being forwarded.
 */

/**
 *	skb_unshare - make a copy of a shared buffer
 *	@skb: buffer to check
 *	@pri: priority for memory allocation
 *
 *	If the socket buffer is a clone then this function creates a new
 *	copy of the data, drops a reference count on the old copy and returns
 *	the new copy with the reference count at 1. If the buffer is not a clone
 *	the original buffer is returned. When called with a spinlock held or
 *	from interrupt state @pri must be %GFP_ATOMIC
 *
 *	%NULL is returned on a memory allocation failure.
 */
1471
static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
A
Al Viro 已提交
1472
					  gfp_t pri)
L
Linus Torvalds 已提交
1473
{
1474
	might_sleep_if(gfpflags_allow_blocking(pri));
L
Linus Torvalds 已提交
1475 1476
	if (skb_cloned(skb)) {
		struct sk_buff *nskb = skb_copy(skb, pri);
1477 1478 1479 1480 1481 1482

		/* Free our shared copy */
		if (likely(nskb))
			consume_skb(skb);
		else
			kfree_skb(skb);
L
Linus Torvalds 已提交
1483 1484 1485 1486 1487 1488
		skb = nskb;
	}
	return skb;
}

/**
1489
 *	skb_peek - peek at the head of an &sk_buff_head
L
Linus Torvalds 已提交
1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500
 *	@list_: list to peek at
 *
 *	Peek an &sk_buff. Unlike most other operations you _MUST_
 *	be careful with this one. A peek leaves the buffer on the
 *	list and someone else may run off with it. You must hold
 *	the appropriate locks or have a private queue to do this.
 *
 *	Returns %NULL for an empty list or a pointer to the head element.
 *	The reference count is not incremented and the reference is therefore
 *	volatile. Use with caution.
 */
1501
static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
L
Linus Torvalds 已提交
1502
{
1503 1504 1505 1506 1507
	struct sk_buff *skb = list_->next;

	if (skb == (struct sk_buff *)list_)
		skb = NULL;
	return skb;
L
Linus Torvalds 已提交
1508 1509
}

P
Pavel Emelyanov 已提交
1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522
/**
 *	skb_peek_next - peek skb following the given one from a queue
 *	@skb: skb to start from
 *	@list_: list to peek at
 *
 *	Returns %NULL when the end of the list is met or a pointer to the
 *	next element. The reference count is not incremented and the
 *	reference is therefore volatile. Use with caution.
 */
static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
		const struct sk_buff_head *list_)
{
	struct sk_buff *next = skb->next;
1523

P
Pavel Emelyanov 已提交
1524 1525 1526 1527 1528
	if (next == (struct sk_buff *)list_)
		next = NULL;
	return next;
}

L
Linus Torvalds 已提交
1529
/**
1530
 *	skb_peek_tail - peek at the tail of an &sk_buff_head
L
Linus Torvalds 已提交
1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541
 *	@list_: list to peek at
 *
 *	Peek an &sk_buff. Unlike most other operations you _MUST_
 *	be careful with this one. A peek leaves the buffer on the
 *	list and someone else may run off with it. You must hold
 *	the appropriate locks or have a private queue to do this.
 *
 *	Returns %NULL for an empty list or a pointer to the tail element.
 *	The reference count is not incremented and the reference is therefore
 *	volatile. Use with caution.
 */
1542
static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
L
Linus Torvalds 已提交
1543
{
1544 1545 1546 1547 1548 1549
	struct sk_buff *skb = list_->prev;

	if (skb == (struct sk_buff *)list_)
		skb = NULL;
	return skb;

L
Linus Torvalds 已提交
1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562
}

/**
 *	skb_queue_len	- get queue length
 *	@list_: list to measure
 *
 *	Return the length of an &sk_buff queue.
 */
static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
{
	return list_->qlen;
}

1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578
/**
 *	__skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
 *	@list: queue to initialize
 *
 *	This initializes only the list and queue length aspects of
 *	an sk_buff_head object.  This allows to initialize the list
 *	aspects of an sk_buff_head without reinitializing things like
 *	the spinlock.  It can also be used for on-stack sk_buff_head
 *	objects where the spinlock is known to not be used.
 */
static inline void __skb_queue_head_init(struct sk_buff_head *list)
{
	list->prev = list->next = (struct sk_buff *)list;
	list->qlen = 0;
}

1579 1580 1581 1582 1583 1584 1585 1586
/*
 * This function creates a split out lock class for each invocation;
 * this is needed for now since a whole lot of users of the skb-queue
 * infrastructure in drivers have different locking usage (in hardirq)
 * than the networking core (in softirq only). In the long run either the
 * network layer or drivers should need annotation to consolidate the
 * main types of usage into 3 classes.
 */
L
Linus Torvalds 已提交
1587 1588 1589
static inline void skb_queue_head_init(struct sk_buff_head *list)
{
	spin_lock_init(&list->lock);
1590
	__skb_queue_head_init(list);
L
Linus Torvalds 已提交
1591 1592
}

1593 1594 1595 1596 1597 1598 1599
static inline void skb_queue_head_init_class(struct sk_buff_head *list,
		struct lock_class_key *class)
{
	skb_queue_head_init(list);
	lockdep_set_class(&list->lock, class);
}

L
Linus Torvalds 已提交
1600
/*
1601
 *	Insert an sk_buff on a list.
L
Linus Torvalds 已提交
1602 1603 1604 1605
 *
 *	The "__skb_xxxx()" functions are the non-atomic ones that
 *	can only be called with interrupts disabled.
 */
1606 1607
void skb_insert(struct sk_buff *old, struct sk_buff *newsk,
		struct sk_buff_head *list);
1608 1609 1610 1611 1612 1613 1614 1615 1616
static inline void __skb_insert(struct sk_buff *newsk,
				struct sk_buff *prev, struct sk_buff *next,
				struct sk_buff_head *list)
{
	newsk->next = next;
	newsk->prev = prev;
	next->prev  = prev->next = newsk;
	list->qlen++;
}
L
Linus Torvalds 已提交
1617

1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641
static inline void __skb_queue_splice(const struct sk_buff_head *list,
				      struct sk_buff *prev,
				      struct sk_buff *next)
{
	struct sk_buff *first = list->next;
	struct sk_buff *last = list->prev;

	first->prev = prev;
	prev->next = first;

	last->next = next;
	next->prev = last;
}

/**
 *	skb_queue_splice - join two skb lists, this is designed for stacks
 *	@list: the new list to add
 *	@head: the place to add it in the first list
 */
static inline void skb_queue_splice(const struct sk_buff_head *list,
				    struct sk_buff_head *head)
{
	if (!skb_queue_empty(list)) {
		__skb_queue_splice(list, (struct sk_buff *) head, head->next);
1642
		head->qlen += list->qlen;
1643 1644 1645 1646
	}
}

/**
E
Eric Dumazet 已提交
1647
 *	skb_queue_splice_init - join two skb lists and reinitialise the emptied list
1648 1649 1650 1651 1652 1653 1654 1655 1656 1657
 *	@list: the new list to add
 *	@head: the place to add it in the first list
 *
 *	The list at @list is reinitialised
 */
static inline void skb_queue_splice_init(struct sk_buff_head *list,
					 struct sk_buff_head *head)
{
	if (!skb_queue_empty(list)) {
		__skb_queue_splice(list, (struct sk_buff *) head, head->next);
1658
		head->qlen += list->qlen;
1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672
		__skb_queue_head_init(list);
	}
}

/**
 *	skb_queue_splice_tail - join two skb lists, each list being a queue
 *	@list: the new list to add
 *	@head: the place to add it in the first list
 */
static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
					 struct sk_buff_head *head)
{
	if (!skb_queue_empty(list)) {
		__skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1673
		head->qlen += list->qlen;
1674 1675 1676 1677
	}
}

/**
E
Eric Dumazet 已提交
1678
 *	skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list
1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689
 *	@list: the new list to add
 *	@head: the place to add it in the first list
 *
 *	Each of the lists is a queue.
 *	The list at @list is reinitialised
 */
static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
					      struct sk_buff_head *head)
{
	if (!skb_queue_empty(list)) {
		__skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1690
		head->qlen += list->qlen;
1691 1692 1693 1694
		__skb_queue_head_init(list);
	}
}

L
Linus Torvalds 已提交
1695
/**
1696
 *	__skb_queue_after - queue a buffer at the list head
L
Linus Torvalds 已提交
1697
 *	@list: list to use
1698
 *	@prev: place after this buffer
L
Linus Torvalds 已提交
1699 1700
 *	@newsk: buffer to queue
 *
1701
 *	Queue a buffer int the middle of a list. This function takes no locks
L
Linus Torvalds 已提交
1702 1703 1704 1705
 *	and you must therefore hold required locks before calling it.
 *
 *	A buffer cannot be placed on two lists at the same time.
 */
1706 1707 1708
static inline void __skb_queue_after(struct sk_buff_head *list,
				     struct sk_buff *prev,
				     struct sk_buff *newsk)
L
Linus Torvalds 已提交
1709
{
1710
	__skb_insert(newsk, prev, prev->next, list);
L
Linus Torvalds 已提交
1711 1712
}

1713 1714
void skb_append(struct sk_buff *old, struct sk_buff *newsk,
		struct sk_buff_head *list);
1715

1716 1717 1718 1719 1720 1721 1722
static inline void __skb_queue_before(struct sk_buff_head *list,
				      struct sk_buff *next,
				      struct sk_buff *newsk)
{
	__skb_insert(newsk, next->prev, next, list);
}

1723 1724 1725 1726 1727 1728 1729 1730 1731 1732
/**
 *	__skb_queue_head - queue a buffer at the list head
 *	@list: list to use
 *	@newsk: buffer to queue
 *
 *	Queue a buffer at the start of a list. This function takes no locks
 *	and you must therefore hold required locks before calling it.
 *
 *	A buffer cannot be placed on two lists at the same time.
 */
1733
void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1734 1735 1736 1737 1738 1739
static inline void __skb_queue_head(struct sk_buff_head *list,
				    struct sk_buff *newsk)
{
	__skb_queue_after(list, (struct sk_buff *)list, newsk);
}

L
Linus Torvalds 已提交
1740 1741 1742 1743 1744 1745 1746 1747 1748 1749
/**
 *	__skb_queue_tail - queue a buffer at the list tail
 *	@list: list to use
 *	@newsk: buffer to queue
 *
 *	Queue a buffer at the end of a list. This function takes no locks
 *	and you must therefore hold required locks before calling it.
 *
 *	A buffer cannot be placed on two lists at the same time.
 */
1750
void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
L
Linus Torvalds 已提交
1751 1752 1753
static inline void __skb_queue_tail(struct sk_buff_head *list,
				   struct sk_buff *newsk)
{
1754
	__skb_queue_before(list, (struct sk_buff *)list, newsk);
L
Linus Torvalds 已提交
1755 1756 1757 1758 1759 1760
}

/*
 * remove sk_buff from list. _Must_ be called atomically, and with
 * the list known..
 */
1761
void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
L
Linus Torvalds 已提交
1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773
static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
{
	struct sk_buff *next, *prev;

	list->qlen--;
	next	   = skb->next;
	prev	   = skb->prev;
	skb->next  = skb->prev = NULL;
	next->prev = prev;
	prev->next = next;
}

1774 1775 1776 1777 1778 1779 1780 1781
/**
 *	__skb_dequeue - remove from the head of the queue
 *	@list: list to dequeue from
 *
 *	Remove the head of the list. This function does not take any locks
 *	so must be used with appropriate locks held only. The head item is
 *	returned or %NULL if the list is empty.
 */
1782
struct sk_buff *skb_dequeue(struct sk_buff_head *list);
1783 1784 1785 1786 1787 1788 1789
static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
{
	struct sk_buff *skb = skb_peek(list);
	if (skb)
		__skb_unlink(skb, list);
	return skb;
}
L
Linus Torvalds 已提交
1790 1791 1792 1793 1794 1795 1796 1797 1798

/**
 *	__skb_dequeue_tail - remove from the tail of the queue
 *	@list: list to dequeue from
 *
 *	Remove the tail of the list. This function does not take any locks
 *	so must be used with appropriate locks held only. The tail item is
 *	returned or %NULL if the list is empty.
 */
1799
struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
L
Linus Torvalds 已提交
1800 1801 1802 1803 1804 1805 1806 1807 1808
static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
{
	struct sk_buff *skb = skb_peek_tail(list);
	if (skb)
		__skb_unlink(skb, list);
	return skb;
}


1809
static inline bool skb_is_nonlinear(const struct sk_buff *skb)
L
Linus Torvalds 已提交
1810 1811 1812 1813 1814 1815 1816 1817 1818
{
	return skb->data_len;
}

static inline unsigned int skb_headlen(const struct sk_buff *skb)
{
	return skb->len - skb->data_len;
}

1819
static inline unsigned int skb_pagelen(const struct sk_buff *skb)
L
Linus Torvalds 已提交
1820
{
1821
	unsigned int i, len = 0;
L
Linus Torvalds 已提交
1822

1823
	for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--)
E
Eric Dumazet 已提交
1824
		len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
L
Linus Torvalds 已提交
1825 1826 1827
	return len + skb_headlen(skb);
}

1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842
/**
 * __skb_fill_page_desc - initialise a paged fragment in an skb
 * @skb: buffer containing fragment to be initialised
 * @i: paged fragment index to initialise
 * @page: the page to use for this fragment
 * @off: the offset to the data with @page
 * @size: the length of the data
 *
 * Initialises the @i'th fragment of @skb to point to &size bytes at
 * offset @off within @page.
 *
 * Does not take any additional reference on the fragment.
 */
static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
					struct page *page, int off, int size)
L
Linus Torvalds 已提交
1843 1844 1845
{
	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];

1846
	/*
1847 1848 1849
	 * Propagate page pfmemalloc to the skb if we can. The problem is
	 * that not all callers have unique ownership of the page but rely
	 * on page_is_pfmemalloc doing the right thing(tm).
1850
	 */
1851
	frag->page.p		  = page;
L
Linus Torvalds 已提交
1852
	frag->page_offset	  = off;
E
Eric Dumazet 已提交
1853
	skb_frag_size_set(frag, size);
1854 1855

	page = compound_head(page);
1856
	if (page_is_pfmemalloc(page))
1857
		skb->pfmemalloc	= true;
1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868
}

/**
 * skb_fill_page_desc - initialise a paged fragment in an skb
 * @skb: buffer containing fragment to be initialised
 * @i: paged fragment index to initialise
 * @page: the page to use for this fragment
 * @off: the offset to the data with @page
 * @size: the length of the data
 *
 * As per __skb_fill_page_desc() -- initialises the @i'th fragment of
M
Mathias Krause 已提交
1869
 * @skb to point to @size bytes at offset @off within @page. In
1870 1871 1872 1873 1874 1875 1876 1877
 * addition updates @skb such that @i is the last fragment.
 *
 * Does not take any additional reference on the fragment.
 */
static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
				      struct page *page, int off, int size)
{
	__skb_fill_page_desc(skb, i, page, off, size);
L
Linus Torvalds 已提交
1878 1879 1880
	skb_shinfo(skb)->nr_frags = i + 1;
}

1881 1882
void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
		     int size, unsigned int truesize);
P
Peter Zijlstra 已提交
1883

J
Jason Wang 已提交
1884 1885 1886
void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
			  unsigned int truesize);

L
Linus Torvalds 已提交
1887
#define SKB_PAGE_ASSERT(skb) 	BUG_ON(skb_shinfo(skb)->nr_frags)
1888
#define SKB_FRAG_ASSERT(skb) 	BUG_ON(skb_has_frag_list(skb))
L
Linus Torvalds 已提交
1889 1890
#define SKB_LINEAR_ASSERT(skb)  BUG_ON(skb_is_nonlinear(skb))

1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906
#ifdef NET_SKBUFF_DATA_USES_OFFSET
static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
{
	return skb->head + skb->tail;
}

static inline void skb_reset_tail_pointer(struct sk_buff *skb)
{
	skb->tail = skb->data - skb->head;
}

static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
{
	skb_reset_tail_pointer(skb);
	skb->tail += offset;
}
1907

1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922
#else /* NET_SKBUFF_DATA_USES_OFFSET */
static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
{
	return skb->tail;
}

static inline void skb_reset_tail_pointer(struct sk_buff *skb)
{
	skb->tail = skb->data;
}

static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
{
	skb->tail = skb->data + offset;
}
1923

1924 1925
#endif /* NET_SKBUFF_DATA_USES_OFFSET */

L
Linus Torvalds 已提交
1926 1927 1928
/*
 *	Add data to an sk_buff
 */
M
Mathias Krause 已提交
1929
unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
1930
unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
L
Linus Torvalds 已提交
1931 1932
static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
{
1933
	unsigned char *tmp = skb_tail_pointer(skb);
L
Linus Torvalds 已提交
1934 1935 1936 1937 1938 1939
	SKB_LINEAR_ASSERT(skb);
	skb->tail += len;
	skb->len  += len;
	return tmp;
}

1940
unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
L
Linus Torvalds 已提交
1941 1942 1943 1944 1945 1946 1947
static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
{
	skb->data -= len;
	skb->len  += len;
	return skb->data;
}

1948
unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
L
Linus Torvalds 已提交
1949 1950 1951 1952 1953 1954 1955
static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
{
	skb->len -= len;
	BUG_ON(skb->len < skb->data_len);
	return skb->data += len;
}

1956 1957 1958 1959 1960
static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
{
	return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
}

1961
unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
L
Linus Torvalds 已提交
1962 1963 1964 1965

static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
{
	if (len > skb_headlen(skb) &&
G
Gerrit Renker 已提交
1966
	    !__pskb_pull_tail(skb, len - skb_headlen(skb)))
L
Linus Torvalds 已提交
1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982
		return NULL;
	skb->len -= len;
	return skb->data += len;
}

static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
{
	return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
}

static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
{
	if (likely(len <= skb_headlen(skb)))
		return 1;
	if (unlikely(len > skb->len))
		return 0;
G
Gerrit Renker 已提交
1983
	return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
L
Linus Torvalds 已提交
1984 1985
}

1986 1987
void skb_condense(struct sk_buff *skb);

L
Linus Torvalds 已提交
1988 1989 1990 1991 1992 1993
/**
 *	skb_headroom - bytes at buffer head
 *	@skb: buffer to check
 *
 *	Return the number of bytes of free space at the head of an &sk_buff.
 */
1994
static inline unsigned int skb_headroom(const struct sk_buff *skb)
L
Linus Torvalds 已提交
1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006
{
	return skb->data - skb->head;
}

/**
 *	skb_tailroom - bytes at buffer end
 *	@skb: buffer to check
 *
 *	Return the number of bytes of free space at the tail of an sk_buff
 */
static inline int skb_tailroom(const struct sk_buff *skb)
{
2007
	return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
L
Linus Torvalds 已提交
2008 2009
}

2010 2011 2012 2013 2014 2015 2016 2017 2018
/**
 *	skb_availroom - bytes at buffer end
 *	@skb: buffer to check
 *
 *	Return the number of bytes of free space at the tail of an sk_buff
 *	allocated by sk_stream_alloc()
 */
static inline int skb_availroom(const struct sk_buff *skb)
{
E
Eric Dumazet 已提交
2019 2020 2021 2022
	if (skb_is_nonlinear(skb))
		return 0;

	return skb->end - skb->tail - skb->reserved_tailroom;
2023 2024
}

L
Linus Torvalds 已提交
2025 2026 2027 2028 2029 2030 2031 2032
/**
 *	skb_reserve - adjust headroom
 *	@skb: buffer to alter
 *	@len: bytes to move
 *
 *	Increase the headroom of an empty &sk_buff by reducing the tail
 *	room. This is only allowed for an empty buffer.
 */
2033
static inline void skb_reserve(struct sk_buff *skb, int len)
L
Linus Torvalds 已提交
2034 2035 2036 2037 2038
{
	skb->data += len;
	skb->tail += len;
}

2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062
/**
 *	skb_tailroom_reserve - adjust reserved_tailroom
 *	@skb: buffer to alter
 *	@mtu: maximum amount of headlen permitted
 *	@needed_tailroom: minimum amount of reserved_tailroom
 *
 *	Set reserved_tailroom so that headlen can be as large as possible but
 *	not larger than mtu and tailroom cannot be smaller than
 *	needed_tailroom.
 *	The required headroom should already have been reserved before using
 *	this function.
 */
static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
					unsigned int needed_tailroom)
{
	SKB_LINEAR_ASSERT(skb);
	if (mtu < skb_tailroom(skb) - needed_tailroom)
		/* use at most mtu */
		skb->reserved_tailroom = skb_tailroom(skb) - mtu;
	else
		/* use up to all available space */
		skb->reserved_tailroom = needed_tailroom;
}

T
Tom Herbert 已提交
2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079
#define ENCAP_TYPE_ETHER	0
#define ENCAP_TYPE_IPPROTO	1

static inline void skb_set_inner_protocol(struct sk_buff *skb,
					  __be16 protocol)
{
	skb->inner_protocol = protocol;
	skb->inner_protocol_type = ENCAP_TYPE_ETHER;
}

static inline void skb_set_inner_ipproto(struct sk_buff *skb,
					 __u8 ipproto)
{
	skb->inner_ipproto = ipproto;
	skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
}

2080 2081
static inline void skb_reset_inner_headers(struct sk_buff *skb)
{
2082
	skb->inner_mac_header = skb->mac_header;
2083 2084 2085 2086
	skb->inner_network_header = skb->network_header;
	skb->inner_transport_header = skb->transport_header;
}

2087 2088 2089 2090 2091
static inline void skb_reset_mac_len(struct sk_buff *skb)
{
	skb->mac_len = skb->network_header - skb->mac_header;
}

2092 2093 2094 2095 2096 2097
static inline unsigned char *skb_inner_transport_header(const struct sk_buff
							*skb)
{
	return skb->head + skb->inner_transport_header;
}

2098 2099 2100 2101 2102
static inline int skb_inner_transport_offset(const struct sk_buff *skb)
{
	return skb_inner_transport_header(skb) - skb->data;
}

2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131
static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
{
	skb->inner_transport_header = skb->data - skb->head;
}

static inline void skb_set_inner_transport_header(struct sk_buff *skb,
						   const int offset)
{
	skb_reset_inner_transport_header(skb);
	skb->inner_transport_header += offset;
}

static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
{
	return skb->head + skb->inner_network_header;
}

static inline void skb_reset_inner_network_header(struct sk_buff *skb)
{
	skb->inner_network_header = skb->data - skb->head;
}

static inline void skb_set_inner_network_header(struct sk_buff *skb,
						const int offset)
{
	skb_reset_inner_network_header(skb);
	skb->inner_network_header += offset;
}

2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147
static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
{
	return skb->head + skb->inner_mac_header;
}

static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
{
	skb->inner_mac_header = skb->data - skb->head;
}

static inline void skb_set_inner_mac_header(struct sk_buff *skb,
					    const int offset)
{
	skb_reset_inner_mac_header(skb);
	skb->inner_mac_header += offset;
}
2148 2149
static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
{
C
Cong Wang 已提交
2150
	return skb->transport_header != (typeof(skb->transport_header))~0U;
2151 2152
}

2153 2154
static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
{
2155
	return skb->head + skb->transport_header;
2156 2157
}

2158 2159
static inline void skb_reset_transport_header(struct sk_buff *skb)
{
2160
	skb->transport_header = skb->data - skb->head;
2161 2162
}

2163 2164 2165
static inline void skb_set_transport_header(struct sk_buff *skb,
					    const int offset)
{
2166 2167
	skb_reset_transport_header(skb);
	skb->transport_header += offset;
2168 2169
}

2170 2171
static inline unsigned char *skb_network_header(const struct sk_buff *skb)
{
2172
	return skb->head + skb->network_header;
2173 2174
}

2175 2176
static inline void skb_reset_network_header(struct sk_buff *skb)
{
2177
	skb->network_header = skb->data - skb->head;
2178 2179
}

2180 2181
static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
{
2182 2183
	skb_reset_network_header(skb);
	skb->network_header += offset;
2184 2185
}

2186
static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
2187
{
2188
	return skb->head + skb->mac_header;
2189 2190
}

2191 2192 2193 2194 2195
static inline int skb_mac_offset(const struct sk_buff *skb)
{
	return skb_mac_header(skb) - skb->data;
}

2196
static inline int skb_mac_header_was_set(const struct sk_buff *skb)
2197
{
C
Cong Wang 已提交
2198
	return skb->mac_header != (typeof(skb->mac_header))~0U;
2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211
}

static inline void skb_reset_mac_header(struct sk_buff *skb)
{
	skb->mac_header = skb->data - skb->head;
}

static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
{
	skb_reset_mac_header(skb);
	skb->mac_header += offset;
}

2212 2213 2214 2215 2216
static inline void skb_pop_mac_header(struct sk_buff *skb)
{
	skb->mac_header = skb->network_header;
}

2217 2218 2219 2220 2221 2222 2223
static inline void skb_probe_transport_header(struct sk_buff *skb,
					      const int offset_hint)
{
	struct flow_keys keys;

	if (skb_transport_header_was_set(skb))
		return;
2224
	else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
2225
		skb_set_transport_header(skb, keys.control.thoff);
2226 2227 2228 2229
	else
		skb_set_transport_header(skb, offset_hint);
}

2230 2231 2232 2233 2234 2235 2236 2237 2238 2239
static inline void skb_mac_header_rebuild(struct sk_buff *skb)
{
	if (skb_mac_header_was_set(skb)) {
		const unsigned char *old_mac = skb_mac_header(skb);

		skb_set_mac_header(skb, -skb->mac_len);
		memmove(skb_mac_header(skb), old_mac, skb->mac_len);
	}
}

2240 2241 2242 2243 2244
static inline int skb_checksum_start_offset(const struct sk_buff *skb)
{
	return skb->csum_start - skb_headroom(skb);
}

2245 2246 2247 2248 2249
static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
{
	return skb->head + skb->csum_start;
}

2250 2251 2252 2253 2254 2255 2256 2257 2258 2259
static inline int skb_transport_offset(const struct sk_buff *skb)
{
	return skb_transport_header(skb) - skb->data;
}

static inline u32 skb_network_header_len(const struct sk_buff *skb)
{
	return skb->transport_header - skb->network_header;
}

2260 2261 2262 2263 2264
static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
{
	return skb->inner_transport_header - skb->inner_network_header;
}

2265 2266 2267 2268
static inline int skb_network_offset(const struct sk_buff *skb)
{
	return skb_network_header(skb) - skb->data;
}
2269

2270 2271 2272 2273 2274
static inline int skb_inner_network_offset(const struct sk_buff *skb)
{
	return skb_inner_network_header(skb) - skb->data;
}

2275 2276 2277 2278 2279
static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
{
	return pskb_may_pull(skb, skb_network_offset(skb) + len);
}

L
Linus Torvalds 已提交
2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290
/*
 * CPUs often take a performance hit when accessing unaligned memory
 * locations. The actual performance hit varies, it can be small if the
 * hardware handles it or large if we have to take an exception and fix it
 * in software.
 *
 * Since an ethernet header is 14 bytes network drivers often end up with
 * the IP header at an unaligned offset. The IP header can be aligned by
 * shifting the start of the packet by 2 bytes. Drivers should do this
 * with:
 *
2291
 * skb_reserve(skb, NET_IP_ALIGN);
L
Linus Torvalds 已提交
2292 2293 2294 2295
 *
 * The downside to this alignment of the IP header is that the DMA is now
 * unaligned. On some architectures the cost of an unaligned DMA is high
 * and this cost outweighs the gains made by aligning the IP header.
2296
 *
L
Linus Torvalds 已提交
2297 2298 2299 2300 2301 2302 2303
 * Since this trade off varies between architectures, we allow NET_IP_ALIGN
 * to be overridden.
 */
#ifndef NET_IP_ALIGN
#define NET_IP_ALIGN	2
#endif

2304 2305 2306 2307
/*
 * The networking layer reserves some headroom in skb data (via
 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
 * the header has to grow. In the default case, if the header has to grow
2308
 * 32 bytes or less we avoid the reallocation.
2309 2310 2311 2312 2313 2314 2315
 *
 * Unfortunately this headroom changes the DMA alignment of the resulting
 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
 * on some architectures. An architecture can override this value,
 * perhaps setting it to a cacheline in size (since that will maintain
 * cacheline alignment of the DMA). It must be a power of 2.
 *
2316
 * Various parts of the networking layer expect at least 32 bytes of
2317
 * headroom, you should not reduce this.
2318 2319 2320 2321
 *
 * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
 * to reduce average number of cache lines per packet.
 * get_rps_cpus() for example only access one 64 bytes aligned block :
2322
 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
2323 2324
 */
#ifndef NET_SKB_PAD
2325
#define NET_SKB_PAD	max(32, L1_CACHE_BYTES)
2326 2327
#endif

2328
int ___pskb_trim(struct sk_buff *skb, unsigned int len);
L
Linus Torvalds 已提交
2329

2330
static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
L
Linus Torvalds 已提交
2331
{
2332
	if (unlikely(skb_is_nonlinear(skb))) {
2333 2334 2335
		WARN_ON(1);
		return;
	}
2336 2337
	skb->len = len;
	skb_set_tail_pointer(skb, len);
L
Linus Torvalds 已提交
2338 2339
}

2340 2341 2342 2343 2344
static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
{
	__skb_set_length(skb, len);
}

2345
void skb_trim(struct sk_buff *skb, unsigned int len);
L
Linus Torvalds 已提交
2346 2347 2348

static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
{
2349 2350 2351 2352
	if (skb->data_len)
		return ___pskb_trim(skb, len);
	__skb_trim(skb, len);
	return 0;
L
Linus Torvalds 已提交
2353 2354 2355 2356 2357 2358 2359
}

static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
{
	return (len < skb->len) ? __pskb_trim(skb, len) : 0;
}

2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374
/**
 *	pskb_trim_unique - remove end from a paged unique (not cloned) buffer
 *	@skb: buffer to alter
 *	@len: new length
 *
 *	This is identical to pskb_trim except that the caller knows that
 *	the skb is not cloned so we should never get an error due to out-
 *	of-memory.
 */
static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
{
	int err = pskb_trim(skb, len);
	BUG_ON(err);
}

2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388
static inline int __skb_grow(struct sk_buff *skb, unsigned int len)
{
	unsigned int diff = len - skb->len;

	if (skb_tailroom(skb) < diff) {
		int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb),
					   GFP_ATOMIC);
		if (ret)
			return ret;
	}
	__skb_set_length(skb, len);
	return 0;
}

L
Linus Torvalds 已提交
2389 2390 2391 2392 2393 2394 2395 2396 2397 2398
/**
 *	skb_orphan - orphan a buffer
 *	@skb: buffer to orphan
 *
 *	If a buffer currently has an owner then we call the owner's
 *	destructor function and make the @skb unowned. The buffer continues
 *	to exist but is no longer charged to its former owner.
 */
static inline void skb_orphan(struct sk_buff *skb)
{
E
Eric Dumazet 已提交
2399
	if (skb->destructor) {
L
Linus Torvalds 已提交
2400
		skb->destructor(skb);
E
Eric Dumazet 已提交
2401 2402
		skb->destructor = NULL;
		skb->sk		= NULL;
2403 2404
	} else {
		BUG_ON(skb->sk);
E
Eric Dumazet 已提交
2405
	}
L
Linus Torvalds 已提交
2406 2407
}

2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423
/**
 *	skb_orphan_frags - orphan the frags contained in a buffer
 *	@skb: buffer to orphan frags from
 *	@gfp_mask: allocation mask for replacement pages
 *
 *	For each frag in the SKB which needs a destructor (i.e. has an
 *	owner) create a copy of that frag and release the original
 *	page by calling the destructor.
 */
static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
{
	if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)))
		return 0;
	return skb_copy_ubufs(skb, gfp_mask);
}

L
Linus Torvalds 已提交
2424 2425 2426 2427 2428 2429 2430 2431
/**
 *	__skb_queue_purge - empty a list
 *	@list: list to empty
 *
 *	Delete all buffers on an &sk_buff list. Each buffer is removed from
 *	the list and one reference dropped. This function does not take the
 *	list lock and the caller must hold the relevant locks to use it.
 */
2432
void skb_queue_purge(struct sk_buff_head *list);
L
Linus Torvalds 已提交
2433 2434 2435 2436 2437 2438 2439
static inline void __skb_queue_purge(struct sk_buff_head *list)
{
	struct sk_buff *skb;
	while ((skb = __skb_dequeue(list)) != NULL)
		kfree_skb(skb);
}

2440 2441
void skb_rbtree_purge(struct rb_root *root);

2442
void *netdev_alloc_frag(unsigned int fragsz);
L
Linus Torvalds 已提交
2443

2444 2445
struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
				   gfp_t gfp_mask);
2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460

/**
 *	netdev_alloc_skb - allocate an skbuff for rx on a specific device
 *	@dev: network device to receive on
 *	@length: length to allocate
 *
 *	Allocate a new &sk_buff and assign it a usage count of one. The
 *	buffer has unspecified headroom built in. Users should allocate
 *	the headroom they think they need without accounting for the
 *	built in space. The built in space is used for optimisations.
 *
 *	%NULL is returned if there is no free memory. Although this function
 *	allocates memory it can be called from an interrupt.
 */
static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
2461
					       unsigned int length)
2462 2463 2464 2465
{
	return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
}

2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479
/* legacy helper around __netdev_alloc_skb() */
static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
					      gfp_t gfp_mask)
{
	return __netdev_alloc_skb(NULL, length, gfp_mask);
}

/* legacy helper around netdev_alloc_skb() */
static inline struct sk_buff *dev_alloc_skb(unsigned int length)
{
	return netdev_alloc_skb(NULL, length);
}


2480 2481
static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
		unsigned int length, gfp_t gfp)
2482
{
2483
	struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
2484 2485 2486 2487 2488 2489

	if (NET_IP_ALIGN && skb)
		skb_reserve(skb, NET_IP_ALIGN);
	return skb;
}

2490 2491 2492 2493 2494 2495
static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
		unsigned int length)
{
	return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
}

2496 2497
static inline void skb_free_frag(void *addr)
{
2498
	page_frag_free(addr);
2499 2500
}

2501
void *napi_alloc_frag(unsigned int fragsz);
2502 2503 2504 2505 2506 2507 2508
struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
				 unsigned int length, gfp_t gfp_mask);
static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
					     unsigned int length)
{
	return __napi_alloc_skb(napi, length, GFP_ATOMIC);
}
2509 2510 2511
void napi_consume_skb(struct sk_buff *skb, int budget);

void __kfree_skb_flush(void);
2512
void __kfree_skb_defer(struct sk_buff *skb);
2513

2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540
/**
 * __dev_alloc_pages - allocate page for network Rx
 * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
 * @order: size of the allocation
 *
 * Allocate a new page.
 *
 * %NULL is returned if there is no free memory.
*/
static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
					     unsigned int order)
{
	/* This piece of code contains several assumptions.
	 * 1.  This is for device Rx, therefor a cold page is preferred.
	 * 2.  The expectation is the user wants a compound page.
	 * 3.  If requesting a order 0 page it will not be compound
	 *     due to the check to see if order has a value in prep_new_page
	 * 4.  __GFP_MEMALLOC is ignored if __GFP_NOMEMALLOC is set due to
	 *     code in gfp_to_alloc_flags that should be enforcing this.
	 */
	gfp_mask |= __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC;

	return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
}

static inline struct page *dev_alloc_pages(unsigned int order)
{
2541
	return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order);
2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558
}

/**
 * __dev_alloc_page - allocate a page for network Rx
 * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
 *
 * Allocate a new page.
 *
 * %NULL is returned if there is no free memory.
 */
static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
{
	return __dev_alloc_pages(gfp_mask, 0);
}

static inline struct page *dev_alloc_page(void)
{
2559
	return dev_alloc_pages(0);
2560 2561
}

2562 2563 2564 2565 2566 2567 2568 2569
/**
 *	skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page
 *	@page: The page that was allocated from skb_alloc_page
 *	@skb: The skb that may need pfmemalloc set
 */
static inline void skb_propagate_pfmemalloc(struct page *page,
					     struct sk_buff *skb)
{
2570
	if (page_is_pfmemalloc(page))
2571 2572 2573
		skb->pfmemalloc = true;
}

2574
/**
2575
 * skb_frag_page - retrieve the page referred to by a paged fragment
2576 2577 2578 2579 2580 2581
 * @frag: the paged fragment
 *
 * Returns the &struct page associated with @frag.
 */
static inline struct page *skb_frag_page(const skb_frag_t *frag)
{
2582
	return frag->page.p;
2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667
}

/**
 * __skb_frag_ref - take an addition reference on a paged fragment.
 * @frag: the paged fragment
 *
 * Takes an additional reference on the paged fragment @frag.
 */
static inline void __skb_frag_ref(skb_frag_t *frag)
{
	get_page(skb_frag_page(frag));
}

/**
 * skb_frag_ref - take an addition reference on a paged fragment of an skb.
 * @skb: the buffer
 * @f: the fragment offset.
 *
 * Takes an additional reference on the @f'th paged fragment of @skb.
 */
static inline void skb_frag_ref(struct sk_buff *skb, int f)
{
	__skb_frag_ref(&skb_shinfo(skb)->frags[f]);
}

/**
 * __skb_frag_unref - release a reference on a paged fragment.
 * @frag: the paged fragment
 *
 * Releases a reference on the paged fragment @frag.
 */
static inline void __skb_frag_unref(skb_frag_t *frag)
{
	put_page(skb_frag_page(frag));
}

/**
 * skb_frag_unref - release a reference on a paged fragment of an skb.
 * @skb: the buffer
 * @f: the fragment offset
 *
 * Releases a reference on the @f'th paged fragment of @skb.
 */
static inline void skb_frag_unref(struct sk_buff *skb, int f)
{
	__skb_frag_unref(&skb_shinfo(skb)->frags[f]);
}

/**
 * skb_frag_address - gets the address of the data contained in a paged fragment
 * @frag: the paged fragment buffer
 *
 * Returns the address of the data within @frag. The page must already
 * be mapped.
 */
static inline void *skb_frag_address(const skb_frag_t *frag)
{
	return page_address(skb_frag_page(frag)) + frag->page_offset;
}

/**
 * skb_frag_address_safe - gets the address of the data contained in a paged fragment
 * @frag: the paged fragment buffer
 *
 * Returns the address of the data within @frag. Checks that the page
 * is mapped and returns %NULL otherwise.
 */
static inline void *skb_frag_address_safe(const skb_frag_t *frag)
{
	void *ptr = page_address(skb_frag_page(frag));
	if (unlikely(!ptr))
		return NULL;

	return ptr + frag->page_offset;
}

/**
 * __skb_frag_set_page - sets the page contained in a paged fragment
 * @frag: the paged fragment
 * @page: the page to set
 *
 * Sets the fragment @frag to contain @page.
 */
static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
{
2668
	frag->page.p = page;
2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684
}

/**
 * skb_frag_set_page - sets the page contained in a paged fragment of an skb
 * @skb: the buffer
 * @f: the fragment offset
 * @page: the page to set
 *
 * Sets the @f'th fragment of @skb to contain @page.
 */
static inline void skb_frag_set_page(struct sk_buff *skb, int f,
				     struct page *page)
{
	__skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
}

E
Eric Dumazet 已提交
2685 2686
bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);

2687 2688
/**
 * skb_frag_dma_map - maps a paged fragment via the DMA API
2689
 * @dev: the device to map the fragment to
2690 2691 2692 2693
 * @frag: the paged fragment to map
 * @offset: the offset within the fragment (starting at the
 *          fragment's own offset)
 * @size: the number of bytes to map
2694
 * @dir: the direction of the mapping (%PCI_DMA_*)
2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706
 *
 * Maps the page associated with @frag to @device.
 */
static inline dma_addr_t skb_frag_dma_map(struct device *dev,
					  const skb_frag_t *frag,
					  size_t offset, size_t size,
					  enum dma_data_direction dir)
{
	return dma_map_page(dev, skb_frag_page(frag),
			    frag->page_offset + offset, size, dir);
}

E
Eric Dumazet 已提交
2707 2708 2709 2710 2711 2712
static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
					gfp_t gfp_mask)
{
	return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
}

2713 2714 2715 2716 2717 2718 2719 2720

static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
						  gfp_t gfp_mask)
{
	return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
}


2721 2722 2723 2724 2725 2726 2727 2728
/**
 *	skb_clone_writable - is the header of a clone writable
 *	@skb: buffer to check
 *	@len: length up to which to write
 *
 *	Returns true if modifying the header part of the cloned buffer
 *	does not requires the data to be copied.
 */
2729
static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
2730 2731 2732 2733 2734
{
	return !skb_header_cloned(skb) &&
	       skb_headroom(skb) + len <= skb->hdr_len;
}

2735 2736 2737 2738 2739 2740 2741
static inline int skb_try_make_writable(struct sk_buff *skb,
					unsigned int write_len)
{
	return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
	       pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
}

H
Herbert Xu 已提交
2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755
static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
			    int cloned)
{
	int delta = 0;

	if (headroom > skb_headroom(skb))
		delta = headroom - skb_headroom(skb);

	if (delta || cloned)
		return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
					GFP_ATOMIC);
	return 0;
}

L
Linus Torvalds 已提交
2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769
/**
 *	skb_cow - copy header of skb when it is required
 *	@skb: buffer to cow
 *	@headroom: needed headroom
 *
 *	If the skb passed lacks sufficient headroom or its data part
 *	is shared, data is reallocated. If reallocation fails, an error
 *	is returned and original skb is not changed.
 *
 *	The result is skb with writable area skb->head...skb->tail
 *	and at least @headroom of space at head.
 */
static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
{
H
Herbert Xu 已提交
2770 2771
	return __skb_cow(skb, headroom, skb_cloned(skb));
}
L
Linus Torvalds 已提交
2772

H
Herbert Xu 已提交
2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785
/**
 *	skb_cow_head - skb_cow but only making the head writable
 *	@skb: buffer to cow
 *	@headroom: needed headroom
 *
 *	This function is identical to skb_cow except that we replace the
 *	skb_cloned check by skb_header_cloned.  It should be used when
 *	you only need to push on some header and do not need to modify
 *	the data.
 */
static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
{
	return __skb_cow(skb, headroom, skb_header_cloned(skb));
L
Linus Torvalds 已提交
2786 2787 2788 2789 2790 2791 2792 2793 2794
}

/**
 *	skb_padto	- pad an skbuff up to a minimal size
 *	@skb: buffer to pad
 *	@len: minimal length
 *
 *	Pads up a buffer to ensure the trailing bytes exist and are
 *	blanked. If the buffer already contains sufficient data it
2795 2796
 *	is untouched. Otherwise it is extended. Returns zero on
 *	success. The skb is freed on error.
L
Linus Torvalds 已提交
2797
 */
2798
static inline int skb_padto(struct sk_buff *skb, unsigned int len)
L
Linus Torvalds 已提交
2799 2800 2801
{
	unsigned int size = skb->len;
	if (likely(size >= len))
2802
		return 0;
G
Gerrit Renker 已提交
2803
	return skb_pad(skb, len - size);
L
Linus Torvalds 已提交
2804 2805
}

2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828
/**
 *	skb_put_padto - increase size and pad an skbuff up to a minimal size
 *	@skb: buffer to pad
 *	@len: minimal length
 *
 *	Pads up a buffer to ensure the trailing bytes exist and are
 *	blanked. If the buffer already contains sufficient data it
 *	is untouched. Otherwise it is extended. Returns zero on
 *	success. The skb is freed on error.
 */
static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
{
	unsigned int size = skb->len;

	if (unlikely(size < len)) {
		len -= size;
		if (skb_pad(skb, len))
			return -ENOMEM;
		__skb_put(skb, len);
	}
	return 0;
}

L
Linus Torvalds 已提交
2829
static inline int skb_add_data(struct sk_buff *skb,
2830
			       struct iov_iter *from, int copy)
L
Linus Torvalds 已提交
2831 2832 2833 2834
{
	const int off = skb->len;

	if (skb->ip_summed == CHECKSUM_NONE) {
2835
		__wsum csum = 0;
2836 2837
		if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy,
					         &csum, from)) {
L
Linus Torvalds 已提交
2838 2839 2840
			skb->csum = csum_block_add(skb->csum, csum, off);
			return 0;
		}
2841
	} else if (copy_from_iter_full(skb_put(skb, copy), copy, from))
L
Linus Torvalds 已提交
2842 2843 2844 2845 2846 2847
		return 0;

	__skb_trim(skb, off);
	return -EFAULT;
}

2848 2849
static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
				    const struct page *page, int off)
L
Linus Torvalds 已提交
2850 2851
{
	if (i) {
E
Eric Dumazet 已提交
2852
		const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
L
Linus Torvalds 已提交
2853

2854
		return page == skb_frag_page(frag) &&
E
Eric Dumazet 已提交
2855
		       off == frag->page_offset + skb_frag_size(frag);
L
Linus Torvalds 已提交
2856
	}
2857
	return false;
L
Linus Torvalds 已提交
2858 2859
}

H
Herbert Xu 已提交
2860 2861 2862 2863 2864
static inline int __skb_linearize(struct sk_buff *skb)
{
	return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
}

L
Linus Torvalds 已提交
2865 2866 2867 2868 2869 2870 2871
/**
 *	skb_linearize - convert paged skb to linear one
 *	@skb: buffer to linarize
 *
 *	If there is no free memory -ENOMEM is returned, otherwise zero
 *	is returned and the old skb data released.
 */
H
Herbert Xu 已提交
2872 2873 2874 2875 2876
static inline int skb_linearize(struct sk_buff *skb)
{
	return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
}

2877 2878 2879 2880 2881 2882 2883 2884 2885
/**
 * skb_has_shared_frag - can any frag be overwritten
 * @skb: buffer to test
 *
 * Return true if the skb has at least one frag that might be modified
 * by an external entity (as in vmsplice()/sendfile())
 */
static inline bool skb_has_shared_frag(const struct sk_buff *skb)
{
2886 2887
	return skb_is_nonlinear(skb) &&
	       skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
2888 2889
}

H
Herbert Xu 已提交
2890 2891 2892 2893 2894 2895 2896 2897
/**
 *	skb_linearize_cow - make sure skb is linear and writable
 *	@skb: buffer to process
 *
 *	If there is no free memory -ENOMEM is returned, otherwise zero
 *	is returned and the old skb data released.
 */
static inline int skb_linearize_cow(struct sk_buff *skb)
L
Linus Torvalds 已提交
2898
{
H
Herbert Xu 已提交
2899 2900
	return skb_is_nonlinear(skb) || skb_cloned(skb) ?
	       __skb_linearize(skb) : 0;
L
Linus Torvalds 已提交
2901 2902
}

2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914
static __always_inline void
__skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
		     unsigned int off)
{
	if (skb->ip_summed == CHECKSUM_COMPLETE)
		skb->csum = csum_block_sub(skb->csum,
					   csum_partial(start, len, 0), off);
	else if (skb->ip_summed == CHECKSUM_PARTIAL &&
		 skb_checksum_start_offset(skb) < 0)
		skb->ip_summed = CHECKSUM_NONE;
}

L
Linus Torvalds 已提交
2915 2916 2917 2918 2919 2920 2921
/**
 *	skb_postpull_rcsum - update checksum for received skb after pull
 *	@skb: buffer to update
 *	@start: start of data before pull
 *	@len: length of data pulled
 *
 *	After doing a pull on a received packet, you need to call this to
2922 2923
 *	update the CHECKSUM_COMPLETE checksum, or set ip_summed to
 *	CHECKSUM_NONE so that it can be recomputed from scratch.
L
Linus Torvalds 已提交
2924 2925
 */
static inline void skb_postpull_rcsum(struct sk_buff *skb,
2926
				      const void *start, unsigned int len)
L
Linus Torvalds 已提交
2927
{
2928
	__skb_postpull_rcsum(skb, start, len, 0);
L
Linus Torvalds 已提交
2929 2930
}

2931 2932 2933 2934 2935 2936 2937 2938
static __always_inline void
__skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
		     unsigned int off)
{
	if (skb->ip_summed == CHECKSUM_COMPLETE)
		skb->csum = csum_block_add(skb->csum,
					   csum_partial(start, len, 0), off);
}
2939

2940 2941 2942 2943 2944 2945 2946 2947 2948
/**
 *	skb_postpush_rcsum - update checksum for received skb after push
 *	@skb: buffer to update
 *	@start: start of data after push
 *	@len: length of data pushed
 *
 *	After doing a push on a received packet, you need to call this to
 *	update the CHECKSUM_COMPLETE checksum.
 */
2949 2950 2951
static inline void skb_postpush_rcsum(struct sk_buff *skb,
				      const void *start, unsigned int len)
{
2952
	__skb_postpush_rcsum(skb, start, len, 0);
2953 2954
}

2955 2956
unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);

2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975
/**
 *	skb_push_rcsum - push skb and update receive checksum
 *	@skb: buffer to update
 *	@len: length of data pulled
 *
 *	This function performs an skb_push on the packet and updates
 *	the CHECKSUM_COMPLETE checksum.  It should be used on
 *	receive path processing instead of skb_push unless you know
 *	that the checksum difference is zero (e.g., a valid IP header)
 *	or you are setting ip_summed to CHECKSUM_NONE.
 */
static inline unsigned char *skb_push_rcsum(struct sk_buff *skb,
					    unsigned int len)
{
	skb_push(skb, len);
	skb_postpush_rcsum(skb, skb->data, len);
	return skb->data;
}

2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993
/**
 *	pskb_trim_rcsum - trim received skb and update checksum
 *	@skb: buffer to trim
 *	@len: new length
 *
 *	This is exactly the same as pskb_trim except that it ensures the
 *	checksum of received packets are still valid after the operation.
 */

static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
{
	if (likely(len >= skb->len))
		return 0;
	if (skb->ip_summed == CHECKSUM_COMPLETE)
		skb->ip_summed = CHECKSUM_NONE;
	return __pskb_trim(skb, len);
}

2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008
static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
{
	if (skb->ip_summed == CHECKSUM_COMPLETE)
		skb->ip_summed = CHECKSUM_NONE;
	__skb_trim(skb, len);
	return 0;
}

static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
{
	if (skb->ip_summed == CHECKSUM_COMPLETE)
		skb->ip_summed = CHECKSUM_NONE;
	return __skb_grow(skb, len);
}

L
Linus Torvalds 已提交
3009 3010
#define skb_queue_walk(queue, skb) \
		for (skb = (queue)->next;					\
3011
		     skb != (struct sk_buff *)(queue);				\
L
Linus Torvalds 已提交
3012 3013
		     skb = skb->next)

3014 3015 3016 3017 3018
#define skb_queue_walk_safe(queue, skb, tmp)					\
		for (skb = (queue)->next, tmp = skb->next;			\
		     skb != (struct sk_buff *)(queue);				\
		     skb = tmp, tmp = skb->next)

3019
#define skb_queue_walk_from(queue, skb)						\
3020
		for (; skb != (struct sk_buff *)(queue);			\
3021 3022 3023 3024 3025 3026 3027
		     skb = skb->next)

#define skb_queue_walk_from_safe(queue, skb, tmp)				\
		for (tmp = skb->next;						\
		     skb != (struct sk_buff *)(queue);				\
		     skb = tmp, tmp = skb->next)

3028 3029
#define skb_queue_reverse_walk(queue, skb) \
		for (skb = (queue)->prev;					\
3030
		     skb != (struct sk_buff *)(queue);				\
3031 3032
		     skb = skb->prev)

3033 3034 3035 3036 3037 3038 3039 3040 3041
#define skb_queue_reverse_walk_safe(queue, skb, tmp)				\
		for (skb = (queue)->prev, tmp = skb->prev;			\
		     skb != (struct sk_buff *)(queue);				\
		     skb = tmp, tmp = skb->prev)

#define skb_queue_reverse_walk_from_safe(queue, skb, tmp)			\
		for (tmp = skb->prev;						\
		     skb != (struct sk_buff *)(queue);				\
		     skb = tmp, tmp = skb->prev)
L
Linus Torvalds 已提交
3042

3043
static inline bool skb_has_frag_list(const struct sk_buff *skb)
3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055
{
	return skb_shinfo(skb)->frag_list != NULL;
}

static inline void skb_frag_list_init(struct sk_buff *skb)
{
	skb_shinfo(skb)->frag_list = NULL;
}

#define skb_walk_frags(skb, iter)	\
	for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)

3056 3057 3058

int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
				const struct sk_buff *skb);
3059 3060 3061 3062 3063 3064 3065
struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
					  struct sk_buff_head *queue,
					  unsigned int flags,
					  void (*destructor)(struct sock *sk,
							   struct sk_buff *skb),
					  int *peeked, int *off, int *err,
					  struct sk_buff **last);
3066
struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags,
3067 3068
					void (*destructor)(struct sock *sk,
							   struct sk_buff *skb),
3069 3070
					int *peeked, int *off, int *err,
					struct sk_buff **last);
3071
struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
3072 3073
				    void (*destructor)(struct sock *sk,
						       struct sk_buff *skb),
3074 3075 3076 3077 3078
				    int *peeked, int *off, int *err);
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
				  int *err);
unsigned int datagram_poll(struct file *file, struct socket *sock,
			   struct poll_table_struct *wait);
A
Al Viro 已提交
3079 3080
int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
			   struct iov_iter *to, int size);
3081 3082 3083
static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
					struct msghdr *msg, int size)
{
3084
	return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
3085
}
3086 3087
int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
				   struct msghdr *msg);
3088 3089 3090
int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
				 struct iov_iter *from, int len);
int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
3091
void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
3092 3093 3094 3095 3096 3097
void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
static inline void skb_free_datagram_locked(struct sock *sk,
					    struct sk_buff *skb)
{
	__skb_free_datagram_locked(sk, skb, 0);
}
3098 3099 3100 3101 3102
int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
			      int len, __wsum csum);
3103
int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
3104
		    struct pipe_inode_info *pipe, unsigned int len,
A
Al Viro 已提交
3105
		    unsigned int flags);
3106
void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
3107
unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
3108 3109
int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
		 int len, int hlen);
3110 3111 3112
void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
void skb_scrub_packet(struct sk_buff *skb, bool xnet);
3113
unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
3114
bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu);
3115
struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
3116
struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
3117
int skb_ensure_writable(struct sk_buff *skb, int write_len);
3118
int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
3119 3120
int skb_vlan_pop(struct sk_buff *skb);
int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
3121 3122
struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
			     gfp_t gfp);
3123

A
Al Viro 已提交
3124 3125
static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
{
3126
	return copy_from_iter_full(data, len, &msg->msg_iter) ? 0 : -EFAULT;
A
Al Viro 已提交
3127 3128
}

A
Al Viro 已提交
3129 3130
static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
{
3131
	return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
A
Al Viro 已提交
3132 3133
}

3134 3135 3136 3137 3138 3139 3140 3141 3142 3143
struct skb_checksum_ops {
	__wsum (*update)(const void *mem, int len, __wsum wsum);
	__wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
};

__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
		      __wsum csum, const struct skb_checksum_ops *ops);
__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
		    __wsum csum);

3144 3145 3146
static inline void * __must_check
__skb_header_pointer(const struct sk_buff *skb, int offset,
		     int len, void *data, int hlen, void *buffer)
L
Linus Torvalds 已提交
3147
{
3148
	if (hlen - offset >= len)
3149
		return data + offset;
L
Linus Torvalds 已提交
3150

3151 3152
	if (!skb ||
	    skb_copy_bits(skb, offset, buffer, len) < 0)
L
Linus Torvalds 已提交
3153 3154 3155 3156 3157
		return NULL;

	return buffer;
}

3158 3159
static inline void * __must_check
skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
3160 3161 3162 3163 3164
{
	return __skb_header_pointer(skb, offset, len, skb->data,
				    skb_headlen(skb), buffer);
}

3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182
/**
 *	skb_needs_linearize - check if we need to linearize a given skb
 *			      depending on the given device features.
 *	@skb: socket buffer to check
 *	@features: net device features
 *
 *	Returns true if either:
 *	1. skb has frag_list and the device doesn't support FRAGLIST, or
 *	2. skb is fragmented and the device does not support SG.
 */
static inline bool skb_needs_linearize(struct sk_buff *skb,
				       netdev_features_t features)
{
	return skb_is_nonlinear(skb) &&
	       ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
		(skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
}

3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196
static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
					     void *to,
					     const unsigned int len)
{
	memcpy(to, skb->data, len);
}

static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
						    const int offset, void *to,
						    const unsigned int len)
{
	memcpy(to, skb->data + offset, len);
}

3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211
static inline void skb_copy_to_linear_data(struct sk_buff *skb,
					   const void *from,
					   const unsigned int len)
{
	memcpy(skb->data, from, len);
}

static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
						  const int offset,
						  const void *from,
						  const unsigned int len)
{
	memcpy(skb->data + offset, from, len);
}

3212
void skb_init(void);
L
Linus Torvalds 已提交
3213

3214 3215 3216 3217 3218
static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
{
	return skb->tstamp;
}

3219 3220 3221 3222 3223 3224 3225 3226 3227
/**
 *	skb_get_timestamp - get timestamp from a skb
 *	@skb: skb to get stamp from
 *	@stamp: pointer to struct timeval to store stamp in
 *
 *	Timestamps are stored in the skb as offsets to a base timestamp.
 *	This function converts the offset back to a struct timeval and stores
 *	it in stamp.
 */
3228 3229
static inline void skb_get_timestamp(const struct sk_buff *skb,
				     struct timeval *stamp)
3230
{
3231
	*stamp = ktime_to_timeval(skb->tstamp);
3232 3233
}

3234 3235 3236 3237 3238 3239
static inline void skb_get_timestampns(const struct sk_buff *skb,
				       struct timespec *stamp)
{
	*stamp = ktime_to_timespec(skb->tstamp);
}

3240
static inline void __net_timestamp(struct sk_buff *skb)
3241
{
3242
	skb->tstamp = ktime_get_real();
3243 3244
}

3245 3246 3247 3248 3249
static inline ktime_t net_timedelta(ktime_t t)
{
	return ktime_sub(ktime_get_real(), t);
}

3250 3251
static inline ktime_t net_invalid_timestamp(void)
{
T
Thomas Gleixner 已提交
3252
	return 0;
3253
}
3254

3255 3256
struct sk_buff *skb_clone_sk(struct sk_buff *skb);

3257 3258
#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING

3259 3260
void skb_clone_tx_timestamp(struct sk_buff *skb);
bool skb_defer_rx_timestamp(struct sk_buff *skb);
3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277

#else /* CONFIG_NETWORK_PHY_TIMESTAMPING */

static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
{
}

static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
{
	return false;
}

#endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */

/**
 * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps
 *
3278 3279
 * PHY drivers may accept clones of transmitted packets for
 * timestamping via their phy_driver.txtstamp method. These drivers
3280 3281
 * must call this function to return the skb back to the stack with a
 * timestamp.
3282
 *
3283
 * @skb: clone of the the original outgoing packet
3284
 * @hwtstamps: hardware time stamps
3285 3286 3287 3288 3289
 *
 */
void skb_complete_tx_timestamp(struct sk_buff *skb,
			       struct skb_shared_hwtstamps *hwtstamps);

3290 3291 3292 3293
void __skb_tstamp_tx(struct sk_buff *orig_skb,
		     struct skb_shared_hwtstamps *hwtstamps,
		     struct sock *sk, int tstype);

3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304
/**
 * skb_tstamp_tx - queue clone of skb with send time stamps
 * @orig_skb:	the original outgoing packet
 * @hwtstamps:	hardware time stamps, may be NULL if not available
 *
 * If the skb has a socket associated, then this function clones the
 * skb (thus sharing the actual data and optional structures), stores
 * the optional hardware time stamping information (if non NULL) or
 * generates a software time stamp (otherwise), then queues the clone
 * to the error queue of the socket.  Errors are silently ignored.
 */
3305 3306
void skb_tstamp_tx(struct sk_buff *orig_skb,
		   struct skb_shared_hwtstamps *hwtstamps);
3307

3308 3309
static inline void sw_tx_timestamp(struct sk_buff *skb)
{
3310 3311
	if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP &&
	    !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
3312 3313 3314 3315 3316 3317 3318
		skb_tstamp_tx(skb, NULL);
}

/**
 * skb_tx_timestamp() - Driver hook for transmit timestamping
 *
 * Ethernet MAC Drivers should call this function in their hard_xmit()
3319
 * function immediately before giving the sk_buff to the MAC hardware.
3320
 *
3321 3322 3323 3324
 * Specifically, one should make absolutely sure that this function is
 * called before TX completion of this packet can trigger.  Otherwise
 * the packet could potentially already be freed.
 *
3325 3326 3327 3328
 * @skb: A socket buffer.
 */
static inline void skb_tx_timestamp(struct sk_buff *skb)
{
3329
	skb_clone_tx_timestamp(skb);
3330 3331 3332
	sw_tx_timestamp(skb);
}

3333 3334 3335 3336 3337 3338 3339 3340 3341
/**
 * skb_complete_wifi_ack - deliver skb with wifi status
 *
 * @skb: the original outgoing packet
 * @acked: ack status
 *
 */
void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);

3342 3343
__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
__sum16 __skb_checksum_complete(struct sk_buff *skb);
3344

3345 3346
static inline int skb_csum_unnecessary(const struct sk_buff *skb)
{
3347 3348 3349 3350
	return ((skb->ip_summed == CHECKSUM_UNNECESSARY) ||
		skb->csum_valid ||
		(skb->ip_summed == CHECKSUM_PARTIAL &&
		 skb_checksum_start_offset(skb) >= 0));
3351 3352
}

3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368
/**
 *	skb_checksum_complete - Calculate checksum of an entire packet
 *	@skb: packet to process
 *
 *	This function calculates the checksum over the entire packet plus
 *	the value of skb->csum.  The latter can be used to supply the
 *	checksum of a pseudo header as used by TCP/UDP.  It returns the
 *	checksum.
 *
 *	For protocols that contain complete checksums such as ICMP/TCP/UDP,
 *	this function can be used to verify that checksum on received
 *	packets.  In that case the function should return zero if the
 *	checksum is correct.  In particular, this function will return zero
 *	if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
 *	hardware has already verified the correctness of the checksum.
 */
3369
static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
3370
{
3371 3372
	return skb_csum_unnecessary(skb) ?
	       0 : __skb_checksum_complete(skb);
3373 3374
}

3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395
static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
{
	if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
		if (skb->csum_level == 0)
			skb->ip_summed = CHECKSUM_NONE;
		else
			skb->csum_level--;
	}
}

static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
{
	if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
		if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
			skb->csum_level++;
	} else if (skb->ip_summed == CHECKSUM_NONE) {
		skb->ip_summed = CHECKSUM_UNNECESSARY;
		skb->csum_level = 0;
	}
}

3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410
static inline void __skb_mark_checksum_bad(struct sk_buff *skb)
{
	/* Mark current checksum as bad (typically called from GRO
	 * path). In the case that ip_summed is CHECKSUM_NONE
	 * this must be the first checksum encountered in the packet.
	 * When ip_summed is CHECKSUM_UNNECESSARY, this is the first
	 * checksum after the last one validated. For UDP, a zero
	 * checksum can not be marked as bad.
	 */

	if (skb->ip_summed == CHECKSUM_NONE ||
	    skb->ip_summed == CHECKSUM_UNNECESSARY)
		skb->csum_bad = 1;
}

3411 3412 3413 3414 3415 3416 3417 3418 3419
/* Check if we need to perform checksum complete validation.
 *
 * Returns true if checksum complete is needed, false otherwise
 * (either checksum is unnecessary or zero checksum is allowed).
 */
static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
						  bool zero_okay,
						  __sum16 check)
{
3420 3421
	if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
		skb->csum_valid = 1;
3422
		__skb_decr_checksum_unnecessary(skb);
3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433
		return false;
	}

	return true;
}

/* For small packets <= CHECKSUM_BREAK peform checksum complete directly
 * in checksum_init.
 */
#define CHECKSUM_BREAK 76

3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445
/* Unset checksum-complete
 *
 * Unset checksum complete can be done when packet is being modified
 * (uncompressed for instance) and checksum-complete value is
 * invalidated.
 */
static inline void skb_checksum_complete_unset(struct sk_buff *skb)
{
	if (skb->ip_summed == CHECKSUM_COMPLETE)
		skb->ip_summed = CHECKSUM_NONE;
}

3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460
/* Validate (init) checksum based on checksum complete.
 *
 * Return values:
 *   0: checksum is validated or try to in skb_checksum_complete. In the latter
 *	case the ip_summed will not be CHECKSUM_UNNECESSARY and the pseudo
 *	checksum is stored in skb->csum for use in __skb_checksum_complete
 *   non-zero: value of invalid checksum
 *
 */
static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
						       bool complete,
						       __wsum psum)
{
	if (skb->ip_summed == CHECKSUM_COMPLETE) {
		if (!csum_fold(csum_add(psum, skb->csum))) {
3461
			skb->csum_valid = 1;
3462 3463
			return 0;
		}
3464 3465
	} else if (skb->csum_bad) {
		/* ip_summed == CHECKSUM_NONE in this case */
E
Eric Dumazet 已提交
3466
		return (__force __sum16)1;
3467 3468 3469 3470
	}

	skb->csum = psum;

3471 3472 3473 3474 3475 3476 3477
	if (complete || skb->len <= CHECKSUM_BREAK) {
		__sum16 csum;

		csum = __skb_checksum_complete(skb);
		skb->csum_valid = !csum;
		return csum;
	}
3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500

	return 0;
}

static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
{
	return 0;
}

/* Perform checksum validate (init). Note that this is a macro since we only
 * want to calculate the pseudo header which is an input function if necessary.
 * First we try to validate without any computation (checksum unnecessary) and
 * then calculate based on checksum complete calling the function to compute
 * pseudo header.
 *
 * Return values:
 *   0: checksum is validated or try to in skb_checksum_complete
 *   non-zero: value of invalid checksum
 */
#define __skb_checksum_validate(skb, proto, complete,			\
				zero_okay, check, compute_pseudo)	\
({									\
	__sum16 __ret = 0;						\
3501
	skb->csum_valid = 0;						\
3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518
	if (__skb_checksum_validate_needed(skb, zero_okay, check))	\
		__ret = __skb_checksum_validate_complete(skb,		\
				complete, compute_pseudo(skb, proto));	\
	__ret;								\
})

#define skb_checksum_init(skb, proto, compute_pseudo)			\
	__skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)

#define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo)	\
	__skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)

#define skb_checksum_validate(skb, proto, compute_pseudo)		\
	__skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)

#define skb_checksum_validate_zero_check(skb, proto, check,		\
					 compute_pseudo)		\
3519
	__skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
3520 3521 3522 3523

#define skb_checksum_simple_validate(skb)				\
	__skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)

3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543
static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
{
	return (skb->ip_summed == CHECKSUM_NONE &&
		skb->csum_valid && !skb->csum_bad);
}

static inline void __skb_checksum_convert(struct sk_buff *skb,
					  __sum16 check, __wsum pseudo)
{
	skb->csum = ~pseudo;
	skb->ip_summed = CHECKSUM_COMPLETE;
}

#define skb_checksum_try_convert(skb, proto, check, compute_pseudo)	\
do {									\
	if (__skb_checksum_convert_check(skb))				\
		__skb_checksum_convert(skb, check,			\
				       compute_pseudo(skb, proto));	\
} while (0)

3544 3545 3546 3547 3548 3549 3550 3551
static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
					      u16 start, u16 offset)
{
	skb->ip_summed = CHECKSUM_PARTIAL;
	skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
	skb->csum_offset = offset - start;
}

3552 3553 3554 3555 3556 3557
/* Update skbuf and packet to reflect the remote checksum offload operation.
 * When called, ptr indicates the starting point for skb->csum when
 * ip_summed is CHECKSUM_COMPLETE. If we need create checksum complete
 * here, skb_postpull_rcsum is done so skb->csum start is ptr.
 */
static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
3558
				       int start, int offset, bool nopartial)
3559 3560 3561
{
	__wsum delta;

3562 3563 3564 3565 3566
	if (!nopartial) {
		skb_remcsum_adjust_partial(skb, ptr, start, offset);
		return;
	}

3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577
	 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
		__skb_checksum_complete(skb);
		skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
	}

	delta = remcsum_adjust(ptr, skb->csum, start, offset);

	/* Adjust skb->csum since we changed the packet */
	skb->csum = csum_add(skb->csum, delta);
}

3578 3579 3580
static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb)
{
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
3581
	return (void *)(skb->_nfct & SKB_NFCT_PTRMASK);
3582 3583 3584 3585 3586
#else
	return NULL;
#endif
}

3587
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3588
void nf_conntrack_destroy(struct nf_conntrack *nfct);
L
Linus Torvalds 已提交
3589 3590 3591
static inline void nf_conntrack_put(struct nf_conntrack *nfct)
{
	if (nfct && atomic_dec_and_test(&nfct->use))
3592
		nf_conntrack_destroy(nfct);
L
Linus Torvalds 已提交
3593 3594 3595 3596 3597 3598
}
static inline void nf_conntrack_get(struct nf_conntrack *nfct)
{
	if (nfct)
		atomic_inc(&nfct->use);
}
3599
#endif
3600
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
L
Linus Torvalds 已提交
3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611
static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
{
	if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
		kfree(nf_bridge);
}
static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
{
	if (nf_bridge)
		atomic_inc(&nf_bridge->use);
}
#endif /* CONFIG_BRIDGE_NETFILTER */
3612 3613
static inline void nf_reset(struct sk_buff *skb)
{
3614
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3615 3616
	nf_conntrack_put(skb_nfct(skb));
	skb->_nfct = 0;
3617
#endif
3618
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3619 3620 3621 3622 3623
	nf_bridge_put(skb->nf_bridge);
	skb->nf_bridge = NULL;
#endif
}

3624 3625
static inline void nf_reset_trace(struct sk_buff *skb)
{
3626
#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
G
Gao feng 已提交
3627 3628
	skb->nf_trace = 0;
#endif
3629 3630
}

3631
/* Note: This doesn't put any conntrack and bridge info in dst. */
3632 3633
static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
			     bool copy)
3634
{
3635
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3636 3637
	dst->_nfct = src->_nfct;
	nf_conntrack_get(skb_nfct(src));
3638
#endif
3639
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3640 3641 3642
	dst->nf_bridge  = src->nf_bridge;
	nf_bridge_get(src->nf_bridge);
#endif
3643
#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
3644 3645
	if (copy)
		dst->nf_trace = src->nf_trace;
3646
#endif
3647 3648
}

3649 3650 3651
static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
{
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3652
	nf_conntrack_put(skb_nfct(dst));
3653
#endif
3654
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3655 3656
	nf_bridge_put(dst->nf_bridge);
#endif
3657
	__nf_copy(dst, src, true);
3658 3659
}

3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677
#ifdef CONFIG_NETWORK_SECMARK
static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
{
	to->secmark = from->secmark;
}

static inline void skb_init_secmark(struct sk_buff *skb)
{
	skb->secmark = 0;
}
#else
static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
{ }

static inline void skb_init_secmark(struct sk_buff *skb)
{ }
#endif

3678 3679 3680 3681 3682 3683
static inline bool skb_irq_freeable(const struct sk_buff *skb)
{
	return !skb->destructor &&
#if IS_ENABLED(CONFIG_XFRM)
		!skb->sp &&
#endif
3684
		!skb_nfct(skb) &&
3685 3686 3687 3688
		!skb->_skb_refdst &&
		!skb_has_frag_list(skb);
}

3689 3690 3691 3692 3693
static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
{
	skb->queue_mapping = queue_mapping;
}

3694
static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
3695 3696 3697 3698
{
	return skb->queue_mapping;
}

3699 3700 3701 3702 3703
static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
{
	to->queue_mapping = from->queue_mapping;
}

3704 3705 3706 3707 3708
static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
{
	skb->queue_mapping = rx_queue + 1;
}

3709
static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
3710 3711 3712 3713
{
	return skb->queue_mapping - 1;
}

3714
static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
3715
{
E
Eric Dumazet 已提交
3716
	return skb->queue_mapping != 0;
3717 3718
}

3719 3720 3721 3722 3723 3724 3725 3726 3727 3728
static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val)
{
	skb->dst_pending_confirm = val;
}

static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb)
{
	return skb->dst_pending_confirm != 0;
}

3729 3730
static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
{
3731
#ifdef CONFIG_XFRM
3732 3733 3734 3735
	return skb->sp;
#else
	return NULL;
#endif
3736
}
3737

3738 3739 3740
/* Keeps track of mac header offset relative to skb->head.
 * It is useful for TSO of Tunneling protocol. e.g. GRE.
 * For non-tunnel skb it points to skb_mac_header() and for
3741 3742 3743
 * tunnel skb it points to outer mac header.
 * Keeps track of level of encapsulation of network headers.
 */
3744
struct skb_gso_cb {
3745 3746 3747 3748
	union {
		int	mac_offset;
		int	data_offset;
	};
3749
	int	encap_level;
3750
	__wsum	csum;
3751
	__u16	csum_start;
3752
};
3753 3754
#define SKB_SGO_CB_OFFSET	32
#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET))
3755 3756 3757 3758 3759 3760 3761

static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
{
	return (skb_mac_header(inner_skb) - inner_skb->head) -
		SKB_GSO_CB(inner_skb)->mac_offset;
}

3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776
static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
{
	int new_headroom, headroom;
	int ret;

	headroom = skb_headroom(skb);
	ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
	if (ret)
		return ret;

	new_headroom = skb_headroom(skb);
	SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
	return 0;
}

3777 3778 3779 3780 3781 3782 3783 3784 3785 3786
static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
{
	/* Do not update partial checksums if remote checksum is enabled. */
	if (skb->remcsum_offload)
		return;

	SKB_GSO_CB(skb)->csum = res;
	SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
}

3787 3788 3789 3790 3791 3792 3793 3794 3795 3796
/* Compute the checksum for a gso segment. First compute the checksum value
 * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and
 * then add in skb->csum (checksum from csum_start to end of packet).
 * skb->csum and csum_start are then updated to reflect the checksum of the
 * resultant packet starting from the transport header-- the resultant checksum
 * is in the res argument (i.e. normally zero or ~ of checksum of a pseudo
 * header.
 */
static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
{
3797 3798 3799
	unsigned char *csum_start = skb_transport_header(skb);
	int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
	__wsum partial = SKB_GSO_CB(skb)->csum;
3800

3801 3802
	SKB_GSO_CB(skb)->csum = res;
	SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
3803

3804
	return csum_fold(csum_partial(csum_start, plen, partial));
3805 3806
}

3807
static inline bool skb_is_gso(const struct sk_buff *skb)
H
Herbert Xu 已提交
3808 3809 3810 3811
{
	return skb_shinfo(skb)->gso_size;
}

3812
/* Note: Should be called only if skb_is_gso(skb) is true */
3813
static inline bool skb_is_gso_v6(const struct sk_buff *skb)
B
Brice Goglin 已提交
3814 3815 3816 3817
{
	return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
}

3818 3819 3820 3821 3822 3823 3824
static inline void skb_gso_reset(struct sk_buff *skb)
{
	skb_shinfo(skb)->gso_size = 0;
	skb_shinfo(skb)->gso_segs = 0;
	skb_shinfo(skb)->gso_type = 0;
}

3825
void __skb_warn_lro_forwarding(const struct sk_buff *skb);
3826 3827 3828 3829 3830

static inline bool skb_warn_if_lro(const struct sk_buff *skb)
{
	/* LRO sets gso_size but not gso_type, whereas if GSO is really
	 * wanted then gso_type will be set. */
3831 3832
	const struct skb_shared_info *shinfo = skb_shinfo(skb);

3833 3834
	if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
	    unlikely(shinfo->gso_type == 0)) {
3835 3836 3837 3838 3839 3840
		__skb_warn_lro_forwarding(skb);
		return true;
	}
	return false;
}

3841 3842 3843 3844 3845 3846 3847
static inline void skb_forward_csum(struct sk_buff *skb)
{
	/* Unfortunately we don't support this one.  Any brave souls? */
	if (skb->ip_summed == CHECKSUM_COMPLETE)
		skb->ip_summed = CHECKSUM_NONE;
}

3848 3849 3850 3851 3852 3853 3854 3855
/**
 * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE
 * @skb: skb to check
 *
 * fresh skbs have their ip_summed set to CHECKSUM_NONE.
 * Instead of forcing ip_summed to CHECKSUM_NONE, we can
 * use this helper, to document places where we make this assertion.
 */
3856
static inline void skb_checksum_none_assert(const struct sk_buff *skb)
3857 3858 3859 3860 3861 3862
{
#ifdef DEBUG
	BUG_ON(skb->ip_summed != CHECKSUM_NONE);
#endif
}

3863
bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
3864

P
Paul Durrant 已提交
3865
int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
3866 3867 3868
struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
				     unsigned int transport_len,
				     __sum16(*skb_chkf)(struct sk_buff *skb));
P
Paul Durrant 已提交
3869

3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882
/**
 * skb_head_is_locked - Determine if the skb->head is locked down
 * @skb: skb to check
 *
 * The head on skbs build around a head frag can be removed if they are
 * not cloned.  This function returns true if the skb head is locked down
 * due to either being allocated via kmalloc, or by being a clone with
 * multiple references to the head.
 */
static inline bool skb_head_is_locked(const struct sk_buff *skb)
{
	return !skb->head_frag || skb_cloned(skb);
}
3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899

/**
 * skb_gso_network_seglen - Return length of individual segments of a gso packet
 *
 * @skb: GSO skb
 *
 * skb_gso_network_seglen is used to determine the real size of the
 * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
 *
 * The MAC/L2 header is not accounted for.
 */
static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
{
	unsigned int hdr_len = skb_transport_header(skb) -
			       skb_network_header(skb);
	return hdr_len + skb_gso_transport_seglen(skb);
}
T
Thomas Graf 已提交
3900

3901 3902 3903
/* Local Checksum Offload.
 * Compute outer checksum based on the assumption that the
 * inner checksum will be offloaded later.
3904 3905
 * See Documentation/networking/checksum-offloads.txt for
 * explanation of how this works.
3906 3907 3908 3909 3910 3911
 * Fill in outer checksum adjustment (e.g. with sum of outer
 * pseudo-header) before calling.
 * Also ensure that inner checksum is in linear data area.
 */
static inline __wsum lco_csum(struct sk_buff *skb)
{
3912 3913 3914
	unsigned char *csum_start = skb_checksum_start(skb);
	unsigned char *l4_hdr = skb_transport_header(skb);
	__wsum partial;
3915 3916

	/* Start with complement of inner checksum adjustment */
3917 3918 3919
	partial = ~csum_unfold(*(__force __sum16 *)(csum_start +
						    skb->csum_offset));

3920
	/* Add in checksum of our headers (incl. outer checksum
3921
	 * adjustment filled in by caller) and return result.
3922
	 */
3923
	return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
3924 3925
}

L
Linus Torvalds 已提交
3926 3927
#endif	/* __KERNEL__ */
#endif	/* _LINUX_SKBUFF_H */