提交 7dbea3e8 编写于 作者: D David S. Miller

Merge branch 'napi_page_frags'

Alexander Duyck says:

====================
net: Alloc NAPI page frags from their own pool

This patch series implements a means of allocating page fragments without
the need for the local_irq_save/restore in __netdev_alloc_frag.  By doing
this I am able to decrease packet processing time by 11ns per packet in my
test environment.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
...@@ -836,7 +836,7 @@ static int b44_rx(struct b44 *bp, int budget) ...@@ -836,7 +836,7 @@ static int b44_rx(struct b44 *bp, int budget)
struct sk_buff *copy_skb; struct sk_buff *copy_skb;
b44_recycle_rx(bp, cons, bp->rx_prod); b44_recycle_rx(bp, cons, bp->rx_prod);
copy_skb = netdev_alloc_skb_ip_align(bp->dev, len); copy_skb = napi_alloc_skb(&bp->napi, len);
if (copy_skb == NULL) if (copy_skb == NULL)
goto drop_it_no_recycle; goto drop_it_no_recycle;
......
...@@ -385,7 +385,7 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget) ...@@ -385,7 +385,7 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
if (len < copybreak) { if (len < copybreak) {
struct sk_buff *nskb; struct sk_buff *nskb;
nskb = netdev_alloc_skb_ip_align(dev, len); nskb = napi_alloc_skb(&priv->napi, len);
if (!nskb) { if (!nskb) {
/* forget packet, just rearm desc */ /* forget packet, just rearm desc */
dev->stats.rx_dropped++; dev->stats.rx_dropped++;
......
...@@ -1015,7 +1015,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) ...@@ -1015,7 +1015,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
*/ */
if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) && if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
(len <= RX_COPY_THRESH)) { (len <= RX_COPY_THRESH)) {
skb = netdev_alloc_skb_ip_align(bp->dev, len); skb = napi_alloc_skb(&fp->napi, len);
if (skb == NULL) { if (skb == NULL) {
DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
"ERROR packet dropped because of alloc failure\n"); "ERROR packet dropped because of alloc failure\n");
......
...@@ -1025,7 +1025,7 @@ MODULE_PARM_DESC(copybreak, "Receive copy threshold"); ...@@ -1025,7 +1025,7 @@ MODULE_PARM_DESC(copybreak, "Receive copy threshold");
/** /**
* get_packet - return the next ingress packet buffer * get_packet - return the next ingress packet buffer
* @pdev: the PCI device that received the packet * @adapter: the adapter that received the packet
* @fl: the SGE free list holding the packet * @fl: the SGE free list holding the packet
* @len: the actual packet length, excluding any SGE padding * @len: the actual packet length, excluding any SGE padding
* *
...@@ -1037,14 +1037,15 @@ MODULE_PARM_DESC(copybreak, "Receive copy threshold"); ...@@ -1037,14 +1037,15 @@ MODULE_PARM_DESC(copybreak, "Receive copy threshold");
* threshold and the packet is too big to copy, or (b) the packet should * threshold and the packet is too big to copy, or (b) the packet should
* be copied but there is no memory for the copy. * be copied but there is no memory for the copy.
*/ */
static inline struct sk_buff *get_packet(struct pci_dev *pdev, static inline struct sk_buff *get_packet(struct adapter *adapter,
struct freelQ *fl, unsigned int len) struct freelQ *fl, unsigned int len)
{ {
struct sk_buff *skb;
const struct freelQ_ce *ce = &fl->centries[fl->cidx]; const struct freelQ_ce *ce = &fl->centries[fl->cidx];
struct pci_dev *pdev = adapter->pdev;
struct sk_buff *skb;
if (len < copybreak) { if (len < copybreak) {
skb = netdev_alloc_skb_ip_align(NULL, len); skb = napi_alloc_skb(&adapter->napi, len);
if (!skb) if (!skb)
goto use_orig_buf; goto use_orig_buf;
...@@ -1357,7 +1358,7 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) ...@@ -1357,7 +1358,7 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
struct sge_port_stats *st; struct sge_port_stats *st;
struct net_device *dev; struct net_device *dev;
skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad); skb = get_packet(adapter, fl, len - sge->rx_pkt_pad);
if (unlikely(!skb)) { if (unlikely(!skb)) {
sge->stats.rx_drops++; sge->stats.rx_drops++;
return; return;
......
...@@ -4100,7 +4100,7 @@ static bool e1000_tbi_should_accept(struct e1000_adapter *adapter, ...@@ -4100,7 +4100,7 @@ static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter, static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
unsigned int bufsz) unsigned int bufsz)
{ {
struct sk_buff *skb = netdev_alloc_skb_ip_align(adapter->netdev, bufsz); struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz);
if (unlikely(!skb)) if (unlikely(!skb))
adapter->alloc_rx_buff_failed++; adapter->alloc_rx_buff_failed++;
......
...@@ -1016,7 +1016,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, ...@@ -1016,7 +1016,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
*/ */
if (length < copybreak) { if (length < copybreak) {
struct sk_buff *new_skb = struct sk_buff *new_skb =
netdev_alloc_skb_ip_align(netdev, length); napi_alloc_skb(&adapter->napi, length);
if (new_skb) { if (new_skb) {
skb_copy_to_linear_data_offset(new_skb, skb_copy_to_linear_data_offset(new_skb,
-NET_IP_ALIGN, -NET_IP_ALIGN,
......
...@@ -308,8 +308,8 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring, ...@@ -308,8 +308,8 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
#endif #endif
/* allocate a skb to store the frags */ /* allocate a skb to store the frags */
skb = netdev_alloc_skb_ip_align(rx_ring->netdev, skb = napi_alloc_skb(&rx_ring->q_vector->napi,
FM10K_RX_HDR_LEN); FM10K_RX_HDR_LEN);
if (unlikely(!skb)) { if (unlikely(!skb)) {
rx_ring->rx_stats.alloc_failed++; rx_ring->rx_stats.alloc_failed++;
return NULL; return NULL;
......
...@@ -6644,8 +6644,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, ...@@ -6644,8 +6644,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
#endif #endif
/* allocate a skb to store the frags */ /* allocate a skb to store the frags */
skb = netdev_alloc_skb_ip_align(rx_ring->netdev, skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
IGB_RX_HDR_LEN);
if (unlikely(!skb)) { if (unlikely(!skb)) {
rx_ring->rx_stats.alloc_failed++; rx_ring->rx_stats.alloc_failed++;
return NULL; return NULL;
......
...@@ -1963,7 +1963,7 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter, ...@@ -1963,7 +1963,7 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter,
* this should improve performance for small packets with large amounts * this should improve performance for small packets with large amounts
* of reassembly being done in the stack * of reassembly being done in the stack
*/ */
static void ixgb_check_copybreak(struct net_device *netdev, static void ixgb_check_copybreak(struct napi_struct *napi,
struct ixgb_buffer *buffer_info, struct ixgb_buffer *buffer_info,
u32 length, struct sk_buff **skb) u32 length, struct sk_buff **skb)
{ {
...@@ -1972,7 +1972,7 @@ static void ixgb_check_copybreak(struct net_device *netdev, ...@@ -1972,7 +1972,7 @@ static void ixgb_check_copybreak(struct net_device *netdev,
if (length > copybreak) if (length > copybreak)
return; return;
new_skb = netdev_alloc_skb_ip_align(netdev, length); new_skb = napi_alloc_skb(napi, length);
if (!new_skb) if (!new_skb)
return; return;
...@@ -2064,7 +2064,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do) ...@@ -2064,7 +2064,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
goto rxdesc_done; goto rxdesc_done;
} }
ixgb_check_copybreak(netdev, buffer_info, length, &skb); ixgb_check_copybreak(&adapter->napi, buffer_info, length, &skb);
/* Good Receive */ /* Good Receive */
skb_put(skb, length); skb_put(skb, length);
......
...@@ -1913,8 +1913,8 @@ static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring, ...@@ -1913,8 +1913,8 @@ static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
#endif #endif
/* allocate a skb to store the frags */ /* allocate a skb to store the frags */
skb = netdev_alloc_skb_ip_align(rx_ring->netdev, skb = napi_alloc_skb(&rx_ring->q_vector->napi,
IXGBE_RX_HDR_SIZE); IXGBE_RX_HDR_SIZE);
if (unlikely(!skb)) { if (unlikely(!skb)) {
rx_ring->rx_stats.alloc_rx_buff_failed++; rx_ring->rx_stats.alloc_rx_buff_failed++;
return NULL; return NULL;
......
...@@ -507,7 +507,7 @@ static int cp_rx_poll(struct napi_struct *napi, int budget) ...@@ -507,7 +507,7 @@ static int cp_rx_poll(struct napi_struct *napi, int budget)
netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n", netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n",
rx_tail, status, len); rx_tail, status, len);
new_skb = netdev_alloc_skb_ip_align(dev, buflen); new_skb = napi_alloc_skb(napi, buflen);
if (!new_skb) { if (!new_skb) {
dev->stats.rx_dropped++; dev->stats.rx_dropped++;
goto rx_next; goto rx_next;
......
...@@ -2037,7 +2037,7 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp, ...@@ -2037,7 +2037,7 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
/* Malloc up new buffer, compatible with net-2e. */ /* Malloc up new buffer, compatible with net-2e. */
/* Omit the four octet CRC from the length. */ /* Omit the four octet CRC from the length. */
skb = netdev_alloc_skb_ip_align(dev, pkt_size); skb = napi_alloc_skb(&tp->napi, pkt_size);
if (likely(skb)) { if (likely(skb)) {
#if RX_BUF_IDX == 3 #if RX_BUF_IDX == 3
wrap_copy(skb, rx_ring, ring_offset+4, pkt_size); wrap_copy(skb, rx_ring, ring_offset+4, pkt_size);
......
...@@ -7260,7 +7260,7 @@ static struct sk_buff *rtl8169_try_rx_copy(void *data, ...@@ -7260,7 +7260,7 @@ static struct sk_buff *rtl8169_try_rx_copy(void *data,
data = rtl8169_align(data); data = rtl8169_align(data);
dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE); dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
prefetch(data); prefetch(data);
skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size); skb = napi_alloc_skb(&tp->napi, pkt_size);
if (skb) if (skb)
memcpy(skb->data, data, pkt_size); memcpy(skb->data, data, pkt_size);
dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE); dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
......
...@@ -151,6 +151,7 @@ struct net_device; ...@@ -151,6 +151,7 @@ struct net_device;
struct scatterlist; struct scatterlist;
struct pipe_inode_info; struct pipe_inode_info;
struct iov_iter; struct iov_iter;
struct napi_struct;
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
struct nf_conntrack { struct nf_conntrack {
...@@ -673,6 +674,7 @@ struct sk_buff { ...@@ -673,6 +674,7 @@ struct sk_buff {
#define SKB_ALLOC_FCLONE 0x01 #define SKB_ALLOC_FCLONE 0x01
#define SKB_ALLOC_RX 0x02 #define SKB_ALLOC_RX 0x02
#define SKB_ALLOC_NAPI 0x04
/* Returns true if the skb was allocated from PFMEMALLOC reserves */ /* Returns true if the skb was allocated from PFMEMALLOC reserves */
static inline bool skb_pfmemalloc(const struct sk_buff *skb) static inline bool skb_pfmemalloc(const struct sk_buff *skb)
...@@ -2164,6 +2166,15 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, ...@@ -2164,6 +2166,15 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC); return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
} }
void *napi_alloc_frag(unsigned int fragsz);
struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
unsigned int length, gfp_t gfp_mask);
static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
unsigned int length)
{
return __napi_alloc_skb(napi, length, GFP_ATOMIC);
}
/** /**
* __dev_alloc_pages - allocate page for network Rx * __dev_alloc_pages - allocate page for network Rx
* @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
......
...@@ -4172,7 +4172,7 @@ struct sk_buff *napi_get_frags(struct napi_struct *napi) ...@@ -4172,7 +4172,7 @@ struct sk_buff *napi_get_frags(struct napi_struct *napi)
struct sk_buff *skb = napi->skb; struct sk_buff *skb = napi->skb;
if (!skb) { if (!skb) {
skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD); skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
napi->skb = skb; napi->skb = skb;
} }
return skb; return skb;
......
...@@ -336,59 +336,85 @@ struct netdev_alloc_cache { ...@@ -336,59 +336,85 @@ struct netdev_alloc_cache {
unsigned int pagecnt_bias; unsigned int pagecnt_bias;
}; };
static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache); static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
static DEFINE_PER_CPU(struct netdev_alloc_cache, napi_alloc_cache);
static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) static struct page *__page_frag_refill(struct netdev_alloc_cache *nc,
gfp_t gfp_mask)
{ {
struct netdev_alloc_cache *nc; const unsigned int order = NETDEV_FRAG_PAGE_MAX_ORDER;
void *data = NULL; struct page *page = NULL;
int order; gfp_t gfp = gfp_mask;
unsigned long flags;
if (order) {
gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
nc->frag.size = PAGE_SIZE << (page ? order : 0);
}
local_irq_save(flags); if (unlikely(!page))
nc = this_cpu_ptr(&netdev_alloc_cache); page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
if (unlikely(!nc->frag.page)) {
nc->frag.page = page;
return page;
}
static void *__alloc_page_frag(struct netdev_alloc_cache __percpu *cache,
unsigned int fragsz, gfp_t gfp_mask)
{
struct netdev_alloc_cache *nc = this_cpu_ptr(cache);
struct page *page = nc->frag.page;
unsigned int size;
int offset;
if (unlikely(!page)) {
refill: refill:
for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) { page = __page_frag_refill(nc, gfp_mask);
gfp_t gfp = gfp_mask; if (!page)
return NULL;
/* if size can vary use frag.size else just use PAGE_SIZE */
size = NETDEV_FRAG_PAGE_MAX_ORDER ? nc->frag.size : PAGE_SIZE;
if (order)
gfp |= __GFP_COMP | __GFP_NOWARN;
nc->frag.page = alloc_pages(gfp, order);
if (likely(nc->frag.page))
break;
if (--order < 0)
goto end;
}
nc->frag.size = PAGE_SIZE << order;
/* Even if we own the page, we do not use atomic_set(). /* Even if we own the page, we do not use atomic_set().
* This would break get_page_unless_zero() users. * This would break get_page_unless_zero() users.
*/ */
atomic_add(NETDEV_PAGECNT_MAX_BIAS - 1, atomic_add(size - 1, &page->_count);
&nc->frag.page->_count);
nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS; /* reset page count bias and offset to start of new frag */
nc->frag.offset = 0; nc->pagecnt_bias = size;
nc->frag.offset = size;
} }
if (nc->frag.offset + fragsz > nc->frag.size) { offset = nc->frag.offset - fragsz;
if (atomic_read(&nc->frag.page->_count) != nc->pagecnt_bias) { if (unlikely(offset < 0)) {
if (!atomic_sub_and_test(nc->pagecnt_bias, if (!atomic_sub_and_test(nc->pagecnt_bias, &page->_count))
&nc->frag.page->_count)) goto refill;
goto refill;
/* OK, page count is 0, we can safely set it */ /* if size can vary use frag.size else just use PAGE_SIZE */
atomic_set(&nc->frag.page->_count, size = NETDEV_FRAG_PAGE_MAX_ORDER ? nc->frag.size : PAGE_SIZE;
NETDEV_PAGECNT_MAX_BIAS);
} else { /* OK, page count is 0, we can safely set it */
atomic_add(NETDEV_PAGECNT_MAX_BIAS - nc->pagecnt_bias, atomic_set(&page->_count, size);
&nc->frag.page->_count);
} /* reset page count bias and offset to start of new frag */
nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS; nc->pagecnt_bias = size;
nc->frag.offset = 0; offset = size - fragsz;
} }
data = page_address(nc->frag.page) + nc->frag.offset;
nc->frag.offset += fragsz;
nc->pagecnt_bias--; nc->pagecnt_bias--;
end: nc->frag.offset = offset;
return page_address(page) + offset;
}
static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
unsigned long flags;
void *data;
local_irq_save(flags);
data = __alloc_page_frag(&netdev_alloc_cache, fragsz, gfp_mask);
local_irq_restore(flags); local_irq_restore(flags);
return data; return data;
} }
...@@ -406,11 +432,25 @@ void *netdev_alloc_frag(unsigned int fragsz) ...@@ -406,11 +432,25 @@ void *netdev_alloc_frag(unsigned int fragsz)
} }
EXPORT_SYMBOL(netdev_alloc_frag); EXPORT_SYMBOL(netdev_alloc_frag);
static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
return __alloc_page_frag(&napi_alloc_cache, fragsz, gfp_mask);
}
void *napi_alloc_frag(unsigned int fragsz)
{
return __napi_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD);
}
EXPORT_SYMBOL(napi_alloc_frag);
/** /**
* __netdev_alloc_skb - allocate an skbuff for rx on a specific device * __alloc_rx_skb - allocate an skbuff for rx
* @dev: network device to receive on
* @length: length to allocate * @length: length to allocate
* @gfp_mask: get_free_pages mask, passed to alloc_skb * @gfp_mask: get_free_pages mask, passed to alloc_skb
* @flags: If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
* allocations in case we have to fallback to __alloc_skb()
* If SKB_ALLOC_NAPI is set, page fragment will be allocated
* from napi_cache instead of netdev_cache.
* *
* Allocate a new &sk_buff and assign it a usage count of one. The * Allocate a new &sk_buff and assign it a usage count of one. The
* buffer has unspecified headroom built in. Users should allocate * buffer has unspecified headroom built in. Users should allocate
...@@ -419,11 +459,11 @@ EXPORT_SYMBOL(netdev_alloc_frag); ...@@ -419,11 +459,11 @@ EXPORT_SYMBOL(netdev_alloc_frag);
* *
* %NULL is returned if there is no free memory. * %NULL is returned if there is no free memory.
*/ */
struct sk_buff *__netdev_alloc_skb(struct net_device *dev, static struct sk_buff *__alloc_rx_skb(unsigned int length, gfp_t gfp_mask,
unsigned int length, gfp_t gfp_mask) int flags)
{ {
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) + unsigned int fragsz = SKB_DATA_ALIGN(length) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) { if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) {
...@@ -432,7 +472,9 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, ...@@ -432,7 +472,9 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
if (sk_memalloc_socks()) if (sk_memalloc_socks())
gfp_mask |= __GFP_MEMALLOC; gfp_mask |= __GFP_MEMALLOC;
data = __netdev_alloc_frag(fragsz, gfp_mask); data = (flags & SKB_ALLOC_NAPI) ?
__napi_alloc_frag(fragsz, gfp_mask) :
__netdev_alloc_frag(fragsz, gfp_mask);
if (likely(data)) { if (likely(data)) {
skb = build_skb(data, fragsz); skb = build_skb(data, fragsz);
...@@ -440,17 +482,72 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, ...@@ -440,17 +482,72 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
put_page(virt_to_head_page(data)); put_page(virt_to_head_page(data));
} }
} else { } else {
skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, skb = __alloc_skb(length, gfp_mask,
SKB_ALLOC_RX, NUMA_NO_NODE); SKB_ALLOC_RX, NUMA_NO_NODE);
} }
return skb;
}
/**
* __netdev_alloc_skb - allocate an skbuff for rx on a specific device
* @dev: network device to receive on
* @length: length to allocate
* @gfp_mask: get_free_pages mask, passed to alloc_skb
*
* Allocate a new &sk_buff and assign it a usage count of one. The
* buffer has NET_SKB_PAD headroom built in. Users should allocate
* the headroom they think they need without accounting for the
* built in space. The built in space is used for optimisations.
*
* %NULL is returned if there is no free memory.
*/
struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
unsigned int length, gfp_t gfp_mask)
{
struct sk_buff *skb;
length += NET_SKB_PAD;
skb = __alloc_rx_skb(length, gfp_mask, 0);
if (likely(skb)) { if (likely(skb)) {
skb_reserve(skb, NET_SKB_PAD); skb_reserve(skb, NET_SKB_PAD);
skb->dev = dev; skb->dev = dev;
} }
return skb; return skb;
} }
EXPORT_SYMBOL(__netdev_alloc_skb); EXPORT_SYMBOL(__netdev_alloc_skb);
/**
* __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
* @napi: napi instance this buffer was allocated for
* @length: length to allocate
* @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages
*
* Allocate a new sk_buff for use in NAPI receive. This buffer will
* attempt to allocate the head from a special reserved region used
* only for NAPI Rx allocation. By doing this we can save several
* CPU cycles by avoiding having to disable and re-enable IRQs.
*
* %NULL is returned if there is no free memory.
*/
struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
unsigned int length, gfp_t gfp_mask)
{
struct sk_buff *skb;
length += NET_SKB_PAD + NET_IP_ALIGN;
skb = __alloc_rx_skb(length, gfp_mask, SKB_ALLOC_NAPI);
if (likely(skb)) {
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
skb->dev = napi->dev;
}
return skb;
}
EXPORT_SYMBOL(__napi_alloc_skb);
void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
int size, unsigned int truesize) int size, unsigned int truesize)
{ {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册