diff --git a/arch/x86_64/kernel/functionlist b/arch/x86_64/kernel/functionlist index 01fa23580c85f4dd707f166dca06f49226924078..7ae18ec12454e05ef4e45c56bb685a42d61559ca 100644 --- a/arch/x86_64/kernel/functionlist +++ b/arch/x86_64/kernel/functionlist @@ -514,7 +514,6 @@ *(.text.dentry_open) *(.text.dentry_iput) *(.text.bio_alloc) -*(.text.alloc_skb_from_cache) *(.text.wait_on_page_bit) *(.text.vfs_readdir) *(.text.vfs_lstat) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 82f43ad478c7848be70cefa382c123956b7db562..5992f65b418423a7e28dd70636a1d2bc12a23ed2 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -346,9 +346,6 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size, return __alloc_skb(size, priority, 1, -1); } -extern struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp, - unsigned int size, - gfp_t priority); extern void kfree_skbmem(struct sk_buff *skb); extern struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority); @@ -622,6 +619,13 @@ static inline void skb_queue_head_init(struct sk_buff_head *list) list->qlen = 0; } +static inline void skb_queue_head_init_class(struct sk_buff_head *list, + struct lock_class_key *class) +{ + skb_queue_head_init(list); + lockdep_set_class(&list->lock, class); +} + /* * Insert an sk_buff at the start of a list. * diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c index 58d13f2bd121471eb5ee8adf90e5b4ec2dcd1530..a285897a2fb462ada2b81b10dc7c0bbc9286c8d4 100644 --- a/net/bridge/br_stp_if.c +++ b/net/bridge/br_stp_if.c @@ -126,7 +126,9 @@ void br_stp_disable_port(struct net_bridge_port *p) /* called under bridge lock */ void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr) { - unsigned char oldaddr[6]; + /* should be aligned on 2 bytes for compare_ether_addr() */ + unsigned short oldaddr_aligned[ETH_ALEN >> 1]; + unsigned char *oldaddr = (unsigned char *)oldaddr_aligned; struct net_bridge_port *p; int wasroot; @@ -151,11 +153,14 @@ void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr) br_become_root_bridge(br); } -static const unsigned char br_mac_zero[6]; +/* should be aligned on 2 bytes for compare_ether_addr() */ +static const unsigned short br_mac_zero_aligned[ETH_ALEN >> 1]; /* called under bridge lock */ void br_stp_recalculate_bridge_id(struct net_bridge *br) { + const unsigned char *br_mac_zero = + (const unsigned char *)br_mac_zero_aligned; const unsigned char *addr = br_mac_zero; struct net_bridge_port *p; diff --git a/net/core/neighbour.c b/net/core/neighbour.c index cfc60019cf920508cc9c416d950c147512c73766..841e3f32cab181c51ec5d50605e3f7560add79ae 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1331,6 +1331,8 @@ void neigh_parms_destroy(struct neigh_parms *parms) kfree(parms); } +static struct lock_class_key neigh_table_proxy_queue_class; + void neigh_table_init_no_netlink(struct neigh_table *tbl) { unsigned long now = jiffies; @@ -1379,7 +1381,8 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl) init_timer(&tbl->proxy_timer); tbl->proxy_timer.data = (unsigned long)tbl; tbl->proxy_timer.function = neigh_proxy_process; - skb_queue_head_init(&tbl->proxy_queue); + skb_queue_head_init_class(&tbl->proxy_queue, + &neigh_table_proxy_queue_class); tbl->last_flush = now; tbl->last_rand = now + tbl->parms.reachable_time * 20; diff --git a/net/core/netpoll.c b/net/core/netpoll.c index da1019451ccbe68185af1db8b7db3bb30a305c8f..4581ece48bb24e1308854525306ce4a86f92c56e 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -471,6 +471,13 @@ int __netpoll_rx(struct sk_buff *skb) if (skb->len < len || len < iph->ihl*4) goto out; + /* + * Our transport medium may have padded the buffer out. + * Now We trim to the true length of the frame. + */ + if (pskb_trim_rcsum(skb, len)) + goto out; + if (iph->protocol != IPPROTO_UDP) goto out; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 87573ae35b025c6d4d971ff0b65cbf8931d1b973..336958fbbcb2bc87b5391240846d21260fa72765 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -196,61 +196,6 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, goto out; } -/** - * alloc_skb_from_cache - allocate a network buffer - * @cp: kmem_cache from which to allocate the data area - * (object size must be big enough for @size bytes + skb overheads) - * @size: size to allocate - * @gfp_mask: allocation mask - * - * Allocate a new &sk_buff. The returned buffer has no headroom and - * tail room of size bytes. The object has a reference count of one. - * The return is the buffer. On a failure the return is %NULL. - * - * Buffers may only be allocated from interrupts using a @gfp_mask of - * %GFP_ATOMIC. - */ -struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp, - unsigned int size, - gfp_t gfp_mask) -{ - struct sk_buff *skb; - u8 *data; - - /* Get the HEAD */ - skb = kmem_cache_alloc(skbuff_head_cache, - gfp_mask & ~__GFP_DMA); - if (!skb) - goto out; - - /* Get the DATA. */ - size = SKB_DATA_ALIGN(size); - data = kmem_cache_alloc(cp, gfp_mask); - if (!data) - goto nodata; - - memset(skb, 0, offsetof(struct sk_buff, truesize)); - skb->truesize = size + sizeof(struct sk_buff); - atomic_set(&skb->users, 1); - skb->head = data; - skb->data = data; - skb->tail = data; - skb->end = data + size; - - atomic_set(&(skb_shinfo(skb)->dataref), 1); - skb_shinfo(skb)->nr_frags = 0; - skb_shinfo(skb)->gso_size = 0; - skb_shinfo(skb)->gso_segs = 0; - skb_shinfo(skb)->gso_type = 0; - skb_shinfo(skb)->frag_list = NULL; -out: - return skb; -nodata: - kmem_cache_free(skbuff_head_cache, skb); - skb = NULL; - goto out; -} - /** * __netdev_alloc_skb - allocate an skbuff for rx on a specific device * @dev: network device to receive on diff --git a/net/key/af_key.c b/net/key/af_key.c index a4e7e2db0ff35402bbc722d6e977811689e8eeeb..cf77930ee51637c145afad1853e7bc97fbad7ee0 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -630,6 +630,35 @@ pfkey_sockaddr_size(sa_family_t family) /* NOTREACHED */ } +static inline int pfkey_mode_from_xfrm(int mode) +{ + switch(mode) { + case XFRM_MODE_TRANSPORT: + return IPSEC_MODE_TRANSPORT; + case XFRM_MODE_TUNNEL: + return IPSEC_MODE_TUNNEL; + case XFRM_MODE_BEET: + return IPSEC_MODE_BEET; + default: + return -1; + } +} + +static inline int pfkey_mode_to_xfrm(int mode) +{ + switch(mode) { + case IPSEC_MODE_ANY: /*XXX*/ + case IPSEC_MODE_TRANSPORT: + return XFRM_MODE_TRANSPORT; + case IPSEC_MODE_TUNNEL: + return XFRM_MODE_TUNNEL; + case IPSEC_MODE_BEET: + return XFRM_MODE_BEET; + default: + return -1; + } +} + static struct sk_buff * pfkey_xfrm_state2msg(struct xfrm_state *x, int add_keys, int hsc) { struct sk_buff *skb; @@ -651,6 +680,7 @@ static struct sk_buff * pfkey_xfrm_state2msg(struct xfrm_state *x, int add_keys, int encrypt_key_size = 0; int sockaddr_size; struct xfrm_encap_tmpl *natt = NULL; + int mode; /* address family check */ sockaddr_size = pfkey_sockaddr_size(x->props.family); @@ -928,7 +958,11 @@ static struct sk_buff * pfkey_xfrm_state2msg(struct xfrm_state *x, int add_keys, sa2 = (struct sadb_x_sa2 *) skb_put(skb, sizeof(struct sadb_x_sa2)); sa2->sadb_x_sa2_len = sizeof(struct sadb_x_sa2)/sizeof(uint64_t); sa2->sadb_x_sa2_exttype = SADB_X_EXT_SA2; - sa2->sadb_x_sa2_mode = x->props.mode + 1; + if ((mode = pfkey_mode_from_xfrm(x->props.mode)) < 0) { + kfree_skb(skb); + return ERR_PTR(-EINVAL); + } + sa2->sadb_x_sa2_mode = mode; sa2->sadb_x_sa2_reserved1 = 0; sa2->sadb_x_sa2_reserved2 = 0; sa2->sadb_x_sa2_sequence = 0; @@ -1155,9 +1189,12 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct sadb_msg *hdr, if (ext_hdrs[SADB_X_EXT_SA2-1]) { struct sadb_x_sa2 *sa2 = (void*)ext_hdrs[SADB_X_EXT_SA2-1]; - x->props.mode = sa2->sadb_x_sa2_mode; - if (x->props.mode) - x->props.mode--; + int mode = pfkey_mode_to_xfrm(sa2->sadb_x_sa2_mode); + if (mode < 0) { + err = -EINVAL; + goto out; + } + x->props.mode = mode; x->props.reqid = sa2->sadb_x_sa2_reqid; } @@ -1218,7 +1255,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h struct sadb_address *saddr, *daddr; struct sadb_msg *out_hdr; struct xfrm_state *x = NULL; - u8 mode; + int mode; u32 reqid; u8 proto; unsigned short family; @@ -1233,7 +1270,9 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h return -EINVAL; if ((sa2 = ext_hdrs[SADB_X_EXT_SA2-1]) != NULL) { - mode = sa2->sadb_x_sa2_mode - 1; + mode = pfkey_mode_to_xfrm(sa2->sadb_x_sa2_mode); + if (mode < 0) + return -EINVAL; reqid = sa2->sadb_x_sa2_reqid; } else { mode = 0; @@ -1756,6 +1795,7 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq) #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) struct sockaddr_in6 *sin6; #endif + int mode; if (xp->xfrm_nr >= XFRM_MAX_DEPTH) return -ELOOP; @@ -1764,7 +1804,9 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq) return -EINVAL; t->id.proto = rq->sadb_x_ipsecrequest_proto; /* XXX check proto */ - t->mode = rq->sadb_x_ipsecrequest_mode-1; + if ((mode = pfkey_mode_to_xfrm(rq->sadb_x_ipsecrequest_mode)) < 0) + return -EINVAL; + t->mode = mode; if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_USE) t->optional = 1; else if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_UNIQUE) { @@ -1877,7 +1919,7 @@ static struct sk_buff * pfkey_xfrm_policy2msg_prep(struct xfrm_policy *xp) return skb; } -static void pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, int dir) +static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, int dir) { struct sadb_msg *hdr; struct sadb_address *addr; @@ -2014,6 +2056,7 @@ static void pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, i struct sadb_x_ipsecrequest *rq; struct xfrm_tmpl *t = xp->xfrm_vec + i; int req_size; + int mode; req_size = sizeof(struct sadb_x_ipsecrequest); if (t->mode == XFRM_MODE_TUNNEL) @@ -2027,7 +2070,9 @@ static void pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, i memset(rq, 0, sizeof(*rq)); rq->sadb_x_ipsecrequest_len = req_size; rq->sadb_x_ipsecrequest_proto = t->id.proto; - rq->sadb_x_ipsecrequest_mode = t->mode+1; + if ((mode = pfkey_mode_from_xfrm(t->mode)) < 0) + return -EINVAL; + mode = pfkey_mode_from_xfrm(t->mode); rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_REQUIRE; if (t->reqid) rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_UNIQUE; @@ -2089,6 +2134,8 @@ static void pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, i hdr->sadb_msg_len = size / sizeof(uint64_t); hdr->sadb_msg_reserved = atomic_read(&xp->refcnt); + + return 0; } static int key_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c) @@ -2102,7 +2149,9 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c err = PTR_ERR(out_skb); goto out; } - pfkey_xfrm_policy2msg(out_skb, xp, dir); + err = pfkey_xfrm_policy2msg(out_skb, xp, dir); + if (err < 0) + return err; out_hdr = (struct sadb_msg *) out_skb->data; out_hdr->sadb_msg_version = PF_KEY_V2; @@ -2327,7 +2376,9 @@ static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, struct sadb err = PTR_ERR(out_skb); goto out; } - pfkey_xfrm_policy2msg(out_skb, xp, dir); + err = pfkey_xfrm_policy2msg(out_skb, xp, dir); + if (err < 0) + goto out; out_hdr = (struct sadb_msg *) out_skb->data; out_hdr->sadb_msg_version = hdr->sadb_msg_version; @@ -2409,6 +2460,7 @@ static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len, { int err; struct sadb_x_ipsecrequest *rq2; + int mode; if (len <= sizeof(struct sadb_x_ipsecrequest) || len < rq1->sadb_x_ipsecrequest_len) @@ -2439,7 +2491,9 @@ static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len, return -EINVAL; m->proto = rq1->sadb_x_ipsecrequest_proto; - m->mode = rq1->sadb_x_ipsecrequest_mode - 1; + if ((mode = pfkey_mode_to_xfrm(rq1->sadb_x_ipsecrequest_mode)) < 0) + return -EINVAL; + m->mode = mode; m->reqid = rq1->sadb_x_ipsecrequest_reqid; return ((int)(rq1->sadb_x_ipsecrequest_len + @@ -2579,12 +2633,15 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr) struct pfkey_dump_data *data = ptr; struct sk_buff *out_skb; struct sadb_msg *out_hdr; + int err; out_skb = pfkey_xfrm_policy2msg_prep(xp); if (IS_ERR(out_skb)) return PTR_ERR(out_skb); - pfkey_xfrm_policy2msg(out_skb, xp, dir); + err = pfkey_xfrm_policy2msg(out_skb, xp, dir); + if (err < 0) + return err; out_hdr = (struct sadb_msg *) out_skb->data; out_hdr->sadb_msg_version = data->hdr->sadb_msg_version; @@ -3513,7 +3570,10 @@ static int pfkey_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type, for (i = 0, mp = m; i < num_bundles; i++, mp++) { /* old ipsecrequest */ - if (set_ipsecrequest(skb, mp->proto, mp->mode + 1, + int mode = pfkey_mode_from_xfrm(mp->mode); + if (mode < 0) + return -EINVAL; + if (set_ipsecrequest(skb, mp->proto, mode, (mp->reqid ? IPSEC_LEVEL_UNIQUE : IPSEC_LEVEL_REQUIRE), mp->reqid, mp->old_family, &mp->old_saddr, &mp->old_daddr) < 0) { @@ -3521,7 +3581,7 @@ static int pfkey_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type, } /* new ipsecrequest */ - if (set_ipsecrequest(skb, mp->proto, mp->mode + 1, + if (set_ipsecrequest(skb, mp->proto, mode, (mp->reqid ? IPSEC_LEVEL_UNIQUE : IPSEC_LEVEL_REQUIRE), mp->reqid, mp->new_family, &mp->new_saddr, &mp->new_daddr) < 0) { diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 536298c2eda26034d3f0afab142ad95cbcee2e7d..a1d026f12b0ed85c68715f4fc861abb081334e8d 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -627,6 +627,12 @@ int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt) retval = -EINVAL; goto err_bindx_rem; } + + if (!af->addr_valid(sa_addr, sp, NULL)) { + retval = -EADDRNOTAVAIL; + goto err_bindx_rem; + } + if (sa_addr->v4.sin_port != htons(bp->port)) { retval = -EINVAL; goto err_bindx_rem; @@ -5638,6 +5644,36 @@ void sctp_wait_for_close(struct sock *sk, long timeout) finish_wait(sk->sk_sleep, &wait); } +static void sctp_sock_rfree_frag(struct sk_buff *skb) +{ + struct sk_buff *frag; + + if (!skb->data_len) + goto done; + + /* Don't forget the fragments. */ + for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) + sctp_sock_rfree_frag(frag); + +done: + sctp_sock_rfree(skb); +} + +static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk) +{ + struct sk_buff *frag; + + if (!skb->data_len) + goto done; + + /* Don't forget the fragments. */ + for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) + sctp_skb_set_owner_r_frag(frag, sk); + +done: + sctp_skb_set_owner_r(skb, sk); +} + /* Populate the fields of the newsk from the oldsk and migrate the assoc * and its messages to the newsk. */ @@ -5692,10 +5728,10 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { event = sctp_skb2event(skb); if (event->asoc == assoc) { - sctp_sock_rfree(skb); + sctp_sock_rfree_frag(skb); __skb_unlink(skb, &oldsk->sk_receive_queue); __skb_queue_tail(&newsk->sk_receive_queue, skb); - sctp_skb_set_owner_r(skb, newsk); + sctp_skb_set_owner_r_frag(skb, newsk); } } @@ -5723,10 +5759,10 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { event = sctp_skb2event(skb); if (event->asoc == assoc) { - sctp_sock_rfree(skb); + sctp_sock_rfree_frag(skb); __skb_unlink(skb, &oldsp->pd_lobby); __skb_queue_tail(queue, skb); - sctp_skb_set_owner_r(skb, newsk); + sctp_skb_set_owner_r_frag(skb, newsk); } } @@ -5738,6 +5774,16 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, } + sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp) { + sctp_sock_rfree_frag(skb); + sctp_skb_set_owner_r_frag(skb, newsk); + } + + sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp) { + sctp_sock_rfree_frag(skb); + sctp_skb_set_owner_r_frag(skb, newsk); + } + /* Set the type of socket to indicate that it is peeled off from the * original UDP-style socket or created with the accept() call on a * TCP-style socket..