提交 1bf9116d 编写于 作者: J Jesper Dangaard Brouer 提交者: Daniel Borkmann

xdp: fix bug in devmap teardown code path

Like cpumap teardown, the devmap teardown code also flush remaining
xdp_frames, via bq_xmit_all() in case map entry is removed.  The code
can call xdp_return_frame_rx_napi, from the the wrong context, in-case
ndo_xdp_xmit() fails.

Fixes: 389ab7f0 ("xdp: introduce xdp_return_frame_rx_napi")
Fixes: 735fc405 ("xdp: change ndo_xdp_xmit API to support bulking")
Signed-off-by: NJesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: NDaniel Borkmann <daniel@iogearbox.net>
上级 37d7ff25
...@@ -217,7 +217,8 @@ void __dev_map_insert_ctx(struct bpf_map *map, u32 bit) ...@@ -217,7 +217,8 @@ void __dev_map_insert_ctx(struct bpf_map *map, u32 bit)
} }
static int bq_xmit_all(struct bpf_dtab_netdev *obj, static int bq_xmit_all(struct bpf_dtab_netdev *obj,
struct xdp_bulk_queue *bq, u32 flags) struct xdp_bulk_queue *bq, u32 flags,
bool in_napi_ctx)
{ {
struct net_device *dev = obj->dev; struct net_device *dev = obj->dev;
int sent = 0, drops = 0, err = 0; int sent = 0, drops = 0, err = 0;
...@@ -254,7 +255,10 @@ static int bq_xmit_all(struct bpf_dtab_netdev *obj, ...@@ -254,7 +255,10 @@ static int bq_xmit_all(struct bpf_dtab_netdev *obj,
struct xdp_frame *xdpf = bq->q[i]; struct xdp_frame *xdpf = bq->q[i];
/* RX path under NAPI protection, can return frames faster */ /* RX path under NAPI protection, can return frames faster */
xdp_return_frame_rx_napi(xdpf); if (likely(in_napi_ctx))
xdp_return_frame_rx_napi(xdpf);
else
xdp_return_frame(xdpf);
drops++; drops++;
} }
goto out; goto out;
...@@ -286,7 +290,7 @@ void __dev_map_flush(struct bpf_map *map) ...@@ -286,7 +290,7 @@ void __dev_map_flush(struct bpf_map *map)
__clear_bit(bit, bitmap); __clear_bit(bit, bitmap);
bq = this_cpu_ptr(dev->bulkq); bq = this_cpu_ptr(dev->bulkq);
bq_xmit_all(dev, bq, XDP_XMIT_FLUSH); bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, true);
} }
} }
...@@ -316,7 +320,7 @@ static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf, ...@@ -316,7 +320,7 @@ static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf,
struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq); struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq);
if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
bq_xmit_all(obj, bq, 0); bq_xmit_all(obj, bq, 0, true);
/* Ingress dev_rx will be the same for all xdp_frame's in /* Ingress dev_rx will be the same for all xdp_frame's in
* bulk_queue, because bq stored per-CPU and must be flushed * bulk_queue, because bq stored per-CPU and must be flushed
...@@ -385,7 +389,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev) ...@@ -385,7 +389,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
__clear_bit(dev->bit, bitmap); __clear_bit(dev->bit, bitmap);
bq = per_cpu_ptr(dev->bulkq, cpu); bq = per_cpu_ptr(dev->bulkq, cpu);
bq_xmit_all(dev, bq, XDP_XMIT_FLUSH); bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, false);
} }
} }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册