diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index b036710ba52cf4684936397aeb706bd6a3e2b039..42cd687e6608145be1dcbbe030162e73e57b7a52 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -737,10 +737,10 @@ static inline struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, u16 wqe_counter, u32 cqe_bcnt) { - struct bpf_prog *xdp_prog = READ_ONCE(rq->xdp_prog); struct mlx5e_dma_info *di; struct sk_buff *skb; void *va, *data; + bool consumed; di = &rq->dma_info[wqe_counter]; va = page_address(di->page); @@ -759,7 +759,11 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, return NULL; } - if (mlx5e_xdp_handle(rq, xdp_prog, di, data, cqe_bcnt)) + rcu_read_lock(); + consumed = mlx5e_xdp_handle(rq, READ_ONCE(rq->xdp_prog), di, data, + cqe_bcnt); + rcu_read_unlock(); + if (consumed) return NULL; /* page/packet was consumed by XDP */ skb = build_skb(va, RQ_PAGE_SIZE(rq)); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 876ab3a92ad532c347afdd2baf9f017f0b9fd160..00d9a03be31df518b986eb5a14c1fb6377c1e1f8 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1518,7 +1518,7 @@ static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, unsigned int len) xdp.data = data; xdp.data_end = data + len; - return BPF_PROG_RUN(prog, &xdp); + return bpf_prog_run_xdp(prog, &xdp); } /** diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 172ff6da92ad1f37fbf06aa0e0cee35031a2c8e5..faeaa9f3b197b58422f4d0308230f95e328ea0fb 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1497,7 +1497,14 @@ static bool qede_rx_xdp(struct qede_dev *edev, xdp.data = page_address(bd->data) + cqe->placement_offset; xdp.data_end = xdp.data + len; + + /* Queues always have a full reset currently, so for the time + * being until there's atomic program replace just mark read + * side for map helpers. + */ + rcu_read_lock(); act = bpf_prog_run_xdp(prog, &xdp); + rcu_read_unlock(); if (act == XDP_PASS) return true; diff --git a/include/linux/filter.h b/include/linux/filter.h index 7ba644626553274120d9b34467a931ab6c4bf051..97338134398f678bb96515df98960325fc09f85e 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -498,16 +498,16 @@ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, return BPF_PROG_RUN(prog, skb); } -static inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, - struct xdp_buff *xdp) +static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, + struct xdp_buff *xdp) { - u32 ret; - - rcu_read_lock(); - ret = BPF_PROG_RUN(prog, xdp); - rcu_read_unlock(); - - return ret; + /* Caller needs to hold rcu_read_lock() (!), otherwise program + * can be released while still running, or map elements could be + * freed early while still having concurrent users. XDP fastpath + * already takes rcu_read_lock() when fetching the program, so + * it's not necessary here anymore. + */ + return BPF_PROG_RUN(prog, xdp); } static inline unsigned int bpf_prog_size(unsigned int proglen)