提交 a6686f2f 编写于 作者: S Shirley Ma 提交者: David S. Miller

skbuff: skb supports zero-copy buffers

This patch adds userspace buffers support in skb shared info. A new
struct skb_ubuf_info is needed to maintain the userspace buffers
argument and index, a callback is used to notify userspace to release
the buffers once lower device has done DMA (Last reference to that skb
has gone).

If there is any userspace apps to reference these userspace buffers,
then these userspaces buffers will be copied into kernel. This way we
can prevent userspace apps from holding these userspace buffers too long.

Use destructor_arg to point to the userspace buffer info; a new tx flags
SKBTX_DEV_ZEROCOPY is added for zero-copy buffer check.
Signed-off-by: NShirley Ma <xma@...ibm.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 1cdebb42
...@@ -187,6 +187,20 @@ enum { ...@@ -187,6 +187,20 @@ enum {
/* ensure the originating sk reference is available on driver level */ /* ensure the originating sk reference is available on driver level */
SKBTX_DRV_NEEDS_SK_REF = 1 << 3, SKBTX_DRV_NEEDS_SK_REF = 1 << 3,
/* device driver supports TX zero-copy buffers */
SKBTX_DEV_ZEROCOPY = 1 << 4,
};
/*
* The callback notifies userspace to release buffers when skb DMA is done in
* lower device, the skb last reference should be 0 when calling this.
* The desc is used to track userspace buffer index.
*/
struct ubuf_info {
void (*callback)(void *);
void *arg;
unsigned long desc;
}; };
/* This data is invariant across clones and lives at /* This data is invariant across clones and lives at
...@@ -211,6 +225,7 @@ struct skb_shared_info { ...@@ -211,6 +225,7 @@ struct skb_shared_info {
/* Intermediate layers must ensure that destructor_arg /* Intermediate layers must ensure that destructor_arg
* remains valid until skb destructor */ * remains valid until skb destructor */
void * destructor_arg; void * destructor_arg;
/* must be last field, see pskb_expand_head() */ /* must be last field, see pskb_expand_head() */
skb_frag_t frags[MAX_SKB_FRAGS]; skb_frag_t frags[MAX_SKB_FRAGS];
}; };
...@@ -2265,5 +2280,6 @@ static inline void skb_checksum_none_assert(struct sk_buff *skb) ...@@ -2265,5 +2280,6 @@ static inline void skb_checksum_none_assert(struct sk_buff *skb)
} }
bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */ #endif /* _LINUX_SKBUFF_H */
...@@ -329,6 +329,18 @@ static void skb_release_data(struct sk_buff *skb) ...@@ -329,6 +329,18 @@ static void skb_release_data(struct sk_buff *skb)
put_page(skb_shinfo(skb)->frags[i].page); put_page(skb_shinfo(skb)->frags[i].page);
} }
/*
* If skb buf is from userspace, we need to notify the caller
* the lower device DMA has done;
*/
if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
struct ubuf_info *uarg;
uarg = skb_shinfo(skb)->destructor_arg;
if (uarg->callback)
uarg->callback(uarg);
}
if (skb_has_frag_list(skb)) if (skb_has_frag_list(skb))
skb_drop_fraglist(skb); skb_drop_fraglist(skb);
...@@ -481,6 +493,9 @@ bool skb_recycle_check(struct sk_buff *skb, int skb_size) ...@@ -481,6 +493,9 @@ bool skb_recycle_check(struct sk_buff *skb, int skb_size)
if (irqs_disabled()) if (irqs_disabled())
return false; return false;
if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
return false;
if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
return false; return false;
...@@ -596,6 +611,51 @@ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) ...@@ -596,6 +611,51 @@ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
} }
EXPORT_SYMBOL_GPL(skb_morph); EXPORT_SYMBOL_GPL(skb_morph);
/* skb frags copy userspace buffers to kernel */
static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
{
int i;
int num_frags = skb_shinfo(skb)->nr_frags;
struct page *page, *head = NULL;
struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg;
for (i = 0; i < num_frags; i++) {
u8 *vaddr;
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
page = alloc_page(GFP_ATOMIC);
if (!page) {
while (head) {
struct page *next = (struct page *)head->private;
put_page(head);
head = next;
}
return -ENOMEM;
}
vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
memcpy(page_address(page),
vaddr + f->page_offset, f->size);
kunmap_skb_frag(vaddr);
page->private = (unsigned long)head;
head = page;
}
/* skb frags release userspace buffers */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
put_page(skb_shinfo(skb)->frags[i].page);
uarg->callback(uarg);
/* skb frags point to kernel buffers */
for (i = skb_shinfo(skb)->nr_frags; i > 0; i--) {
skb_shinfo(skb)->frags[i - 1].page_offset = 0;
skb_shinfo(skb)->frags[i - 1].page = head;
head = (struct page *)head->private;
}
return 0;
}
/** /**
* skb_clone - duplicate an sk_buff * skb_clone - duplicate an sk_buff
* @skb: buffer to clone * @skb: buffer to clone
...@@ -614,6 +674,11 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) ...@@ -614,6 +674,11 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
{ {
struct sk_buff *n; struct sk_buff *n;
if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
if (skb_copy_ubufs(skb, gfp_mask))
return NULL;
}
n = skb + 1; n = skb + 1;
if (skb->fclone == SKB_FCLONE_ORIG && if (skb->fclone == SKB_FCLONE_ORIG &&
n->fclone == SKB_FCLONE_UNAVAILABLE) { n->fclone == SKB_FCLONE_UNAVAILABLE) {
...@@ -731,6 +796,12 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) ...@@ -731,6 +796,12 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
if (skb_shinfo(skb)->nr_frags) { if (skb_shinfo(skb)->nr_frags) {
int i; int i;
if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
if (skb_copy_ubufs(skb, gfp_mask)) {
kfree(n);
goto out;
}
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
get_page(skb_shinfo(n)->frags[i].page); get_page(skb_shinfo(n)->frags[i].page);
...@@ -788,7 +859,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, ...@@ -788,7 +859,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
fastpath = true; fastpath = true;
else { else {
int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1; int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1;
fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta; fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta;
} }
...@@ -819,6 +889,11 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, ...@@ -819,6 +889,11 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
if (fastpath) { if (fastpath) {
kfree(skb->head); kfree(skb->head);
} else { } else {
/* copy this zero copy skb frags */
if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
if (skb_copy_ubufs(skb, gfp_mask))
goto nofrags;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
get_page(skb_shinfo(skb)->frags[i].page); get_page(skb_shinfo(skb)->frags[i].page);
...@@ -853,6 +928,8 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, ...@@ -853,6 +928,8 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
atomic_set(&skb_shinfo(skb)->dataref, 1); atomic_set(&skb_shinfo(skb)->dataref, 1);
return 0; return 0;
nofrags:
kfree(data);
nodata: nodata:
return -ENOMEM; return -ENOMEM;
} }
...@@ -1354,6 +1431,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) ...@@ -1354,6 +1431,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
} }
start = end; start = end;
} }
if (!len) if (!len)
return 0; return 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册