提交 2e4e4410 编写于 作者: E Eric Dumazet 提交者: David S. Miller

net: add alloc_skb_with_frags() helper

Extract from sock_alloc_send_pskb() code building skb with frags,
so that we can reuse this in other contexts.

Intent is to use it from tcp_send_rcvq(), tcp_collapse(), ...

We also want to replace some skb_linearize() calls to a more reliable
strategy in pathological cases where we need to reduce number of frags.
Signed-off-by: NEric Dumazet <edumazet@google.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 cb93471a
...@@ -769,6 +769,12 @@ static inline struct sk_buff *alloc_skb(unsigned int size, ...@@ -769,6 +769,12 @@ static inline struct sk_buff *alloc_skb(unsigned int size,
return __alloc_skb(size, priority, 0, NUMA_NO_NODE); return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
} }
struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
unsigned long data_len,
int max_page_order,
int *errcode,
gfp_t gfp_mask);
static inline struct sk_buff *alloc_skb_fclone(unsigned int size, static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
gfp_t priority) gfp_t priority)
{ {
......
...@@ -4102,3 +4102,81 @@ struct sk_buff *skb_vlan_untag(struct sk_buff *skb) ...@@ -4102,3 +4102,81 @@ struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
return NULL; return NULL;
} }
EXPORT_SYMBOL(skb_vlan_untag); EXPORT_SYMBOL(skb_vlan_untag);
/**
* alloc_skb_with_frags - allocate skb with page frags
*
* header_len: size of linear part
* data_len: needed length in frags
* max_page_order: max page order desired.
* errcode: pointer to error code if any
* gfp_mask: allocation mask
*
* This can be used to allocate a paged skb, given a maximal order for frags.
*/
struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
unsigned long data_len,
int max_page_order,
int *errcode,
gfp_t gfp_mask)
{
int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
unsigned long chunk;
struct sk_buff *skb;
struct page *page;
gfp_t gfp_head;
int i;
*errcode = -EMSGSIZE;
/* Note this test could be relaxed, if we succeed to allocate
* high order pages...
*/
if (npages > MAX_SKB_FRAGS)
return NULL;
gfp_head = gfp_mask;
if (gfp_head & __GFP_WAIT)
gfp_head |= __GFP_REPEAT;
*errcode = -ENOBUFS;
skb = alloc_skb(header_len, gfp_head);
if (!skb)
return NULL;
skb->truesize += npages << PAGE_SHIFT;
for (i = 0; npages > 0; i++) {
int order = max_page_order;
while (order) {
if (npages >= 1 << order) {
page = alloc_pages(gfp_mask |
__GFP_COMP |
__GFP_NOWARN |
__GFP_NORETRY,
order);
if (page)
goto fill_page;
/* Do not retry other high order allocations */
order = 1;
max_page_order = 0;
}
order--;
}
page = alloc_page(gfp_mask);
if (!page)
goto failure;
fill_page:
chunk = min_t(unsigned long, data_len,
PAGE_SIZE << order);
skb_fill_page_desc(skb, i, page, 0, chunk);
data_len -= chunk;
npages -= 1 << order;
}
return skb;
failure:
kfree_skb(skb);
return NULL;
}
EXPORT_SYMBOL(alloc_skb_with_frags);
...@@ -1762,21 +1762,12 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, ...@@ -1762,21 +1762,12 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
unsigned long data_len, int noblock, unsigned long data_len, int noblock,
int *errcode, int max_page_order) int *errcode, int max_page_order)
{ {
struct sk_buff *skb = NULL; struct sk_buff *skb;
unsigned long chunk;
gfp_t gfp_mask;
long timeo; long timeo;
int err; int err;
int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
struct page *page;
int i;
err = -EMSGSIZE;
if (npages > MAX_SKB_FRAGS)
goto failure;
timeo = sock_sndtimeo(sk, noblock); timeo = sock_sndtimeo(sk, noblock);
while (!skb) { for (;;) {
err = sock_error(sk); err = sock_error(sk);
if (err != 0) if (err != 0)
goto failure; goto failure;
...@@ -1785,66 +1776,27 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, ...@@ -1785,66 +1776,27 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
if (sk->sk_shutdown & SEND_SHUTDOWN) if (sk->sk_shutdown & SEND_SHUTDOWN)
goto failure; goto failure;
if (atomic_read(&sk->sk_wmem_alloc) >= sk->sk_sndbuf) { if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); break;
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
err = -EAGAIN;
if (!timeo)
goto failure;
if (signal_pending(current))
goto interrupted;
timeo = sock_wait_for_wmem(sk, timeo);
continue;
}
err = -ENOBUFS;
gfp_mask = sk->sk_allocation;
if (gfp_mask & __GFP_WAIT)
gfp_mask |= __GFP_REPEAT;
skb = alloc_skb(header_len, gfp_mask); set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
if (!skb) set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
err = -EAGAIN;
if (!timeo)
goto failure; goto failure;
if (signal_pending(current))
skb->truesize += data_len; goto interrupted;
timeo = sock_wait_for_wmem(sk, timeo);
for (i = 0; npages > 0; i++) {
int order = max_page_order;
while (order) {
if (npages >= 1 << order) {
page = alloc_pages(sk->sk_allocation |
__GFP_COMP |
__GFP_NOWARN |
__GFP_NORETRY,
order);
if (page)
goto fill_page;
/* Do not retry other high order allocations */
order = 1;
max_page_order = 0;
}
order--;
}
page = alloc_page(sk->sk_allocation);
if (!page)
goto failure;
fill_page:
chunk = min_t(unsigned long, data_len,
PAGE_SIZE << order);
skb_fill_page_desc(skb, i, page, 0, chunk);
data_len -= chunk;
npages -= 1 << order;
}
} }
skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
skb_set_owner_w(skb, sk); errcode, sk->sk_allocation);
if (skb)
skb_set_owner_w(skb, sk);
return skb; return skb;
interrupted: interrupted:
err = sock_intr_errno(timeo); err = sock_intr_errno(timeo);
failure: failure:
kfree_skb(skb);
*errcode = err; *errcode = err;
return NULL; return NULL;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册