提交 59ae9fc6 编写于 作者: Z Zoltan Kiss 提交者: David S. Miller

xen-netback: Fix handling of skbs requiring too many slots

A recent commit (a02eb4 "xen-netback: worse-case estimate in xenvif_rx_action is
underestimating") capped the slot estimation to MAX_SKB_FRAGS, but that triggers
the next BUG_ON a few lines down, as the packet consumes more slots than
estimated.
This patch introduces full_coalesce on the skb callback buffer, which is used in
start_new_rx_buffer() to decide whether netback needs coalescing more
aggresively. By doing that, no packet should need more than
(XEN_NETIF_MAX_TX_SIZE + 1) / PAGE_SIZE data slots (excluding the optional GSO
slot, it doesn't carry data, therefore irrelevant in this case), as the provided
buffers are fully utilized.
Signed-off-by: NZoltan Kiss <zoltan.kiss@citrix.com>
Cc: Paul Durrant <paul.durrant@citrix.com>
Cc: Wei Liu <wei.liu2@citrix.com>
Cc: Ian Campbell <ian.campbell@citrix.com>
Cc: David Vrabel <david.vrabel@citrix.com>
Reviewed-by: NPaul Durrant <paul.durrant@gmail.com>
Acked-by: NWei Liu <wei.liu2@citrix.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 c5316920
...@@ -163,7 +163,8 @@ bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed) ...@@ -163,7 +163,8 @@ bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed)
* adding 'size' bytes to a buffer which currently contains 'offset' * adding 'size' bytes to a buffer which currently contains 'offset'
* bytes. * bytes.
*/ */
static bool start_new_rx_buffer(int offset, unsigned long size, int head) static bool start_new_rx_buffer(int offset, unsigned long size, int head,
bool full_coalesce)
{ {
/* simple case: we have completely filled the current buffer. */ /* simple case: we have completely filled the current buffer. */
if (offset == MAX_BUFFER_OFFSET) if (offset == MAX_BUFFER_OFFSET)
...@@ -175,6 +176,7 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head) ...@@ -175,6 +176,7 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
* (i) this frag would fit completely in the next buffer * (i) this frag would fit completely in the next buffer
* and (ii) there is already some data in the current buffer * and (ii) there is already some data in the current buffer
* and (iii) this is not the head buffer. * and (iii) this is not the head buffer.
* and (iv) there is no need to fully utilize the buffers
* *
* Where: * Where:
* - (i) stops us splitting a frag into two copies * - (i) stops us splitting a frag into two copies
...@@ -185,6 +187,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head) ...@@ -185,6 +187,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
* by (ii) but is explicitly checked because * by (ii) but is explicitly checked because
* netfront relies on the first buffer being * netfront relies on the first buffer being
* non-empty and can crash otherwise. * non-empty and can crash otherwise.
* - (iv) is needed for skbs which can use up more than MAX_SKB_FRAGS
* slot
* *
* This means we will effectively linearise small * This means we will effectively linearise small
* frags but do not needlessly split large buffers * frags but do not needlessly split large buffers
...@@ -192,7 +196,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head) ...@@ -192,7 +196,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
* own buffers as before. * own buffers as before.
*/ */
BUG_ON(size > MAX_BUFFER_OFFSET); BUG_ON(size > MAX_BUFFER_OFFSET);
if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head) if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head &&
!full_coalesce)
return true; return true;
return false; return false;
...@@ -227,6 +232,13 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif, ...@@ -227,6 +232,13 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
return meta; return meta;
} }
struct xenvif_rx_cb {
int meta_slots_used;
bool full_coalesce;
};
#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
/* /*
* Set up the grant operations for this fragment. If it's a flipping * Set up the grant operations for this fragment. If it's a flipping
* interface, we also set up the unmap request from here. * interface, we also set up the unmap request from here.
...@@ -261,7 +273,10 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, ...@@ -261,7 +273,10 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
if (bytes > size) if (bytes > size)
bytes = size; bytes = size;
if (start_new_rx_buffer(npo->copy_off, bytes, *head)) { if (start_new_rx_buffer(npo->copy_off,
bytes,
*head,
XENVIF_RX_CB(skb)->full_coalesce)) {
/* /*
* Netfront requires there to be some data in the head * Netfront requires there to be some data in the head
* buffer. * buffer.
...@@ -541,12 +556,6 @@ static void xenvif_add_frag_responses(struct xenvif *vif, int status, ...@@ -541,12 +556,6 @@ static void xenvif_add_frag_responses(struct xenvif *vif, int status,
} }
} }
struct xenvif_rx_cb {
int meta_slots_used;
};
#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
void xenvif_kick_thread(struct xenvif *vif) void xenvif_kick_thread(struct xenvif *vif)
{ {
wake_up(&vif->wq); wake_up(&vif->wq);
...@@ -602,10 +611,15 @@ static void xenvif_rx_action(struct xenvif *vif) ...@@ -602,10 +611,15 @@ static void xenvif_rx_action(struct xenvif *vif)
/* To avoid the estimate becoming too pessimal for some /* To avoid the estimate becoming too pessimal for some
* frontends that limit posted rx requests, cap the estimate * frontends that limit posted rx requests, cap the estimate
* at MAX_SKB_FRAGS. * at MAX_SKB_FRAGS. In this case netback will fully coalesce
* the skb into the provided slots.
*/ */
if (max_slots_needed > MAX_SKB_FRAGS) if (max_slots_needed > MAX_SKB_FRAGS) {
max_slots_needed = MAX_SKB_FRAGS; max_slots_needed = MAX_SKB_FRAGS;
XENVIF_RX_CB(skb)->full_coalesce = true;
} else {
XENVIF_RX_CB(skb)->full_coalesce = false;
}
/* We may need one more slot for GSO metadata */ /* We may need one more slot for GSO metadata */
if (skb_is_gso(skb) && if (skb_is_gso(skb) &&
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册