xsk_queue.h 8.2 KB
Newer Older
B
Björn Töpel 已提交
1 2
/* SPDX-License-Identifier: GPL-2.0 */
/* XDP user-space ring structure
3 4 5 6 7 8 9 10
 * Copyright(c) 2018 Intel Corporation.
 */

#ifndef _LINUX_XSK_QUEUE_H
#define _LINUX_XSK_QUEUE_H

#include <linux/types.h>
#include <linux/if_xdp.h>
11
#include <net/xdp_sock.h>
12

13
#define RX_BATCH_SIZE 16
14
#define LAZY_UPDATE_THRESHOLD 128
15

16 17 18 19 20 21 22 23 24 25 26 27 28 29
struct xdp_ring {
	u32 producer ____cacheline_aligned_in_smp;
	u32 consumer ____cacheline_aligned_in_smp;
};

/* Used for the RX and TX queues for packets */
struct xdp_rxtx_ring {
	struct xdp_ring ptrs;
	struct xdp_desc desc[0] ____cacheline_aligned_in_smp;
};

/* Used for the fill and completion queues for buffers */
struct xdp_umem_ring {
	struct xdp_ring ptrs;
30
	u64 desc[0] ____cacheline_aligned_in_smp;
31 32
};

33
struct xsk_queue {
34 35
	u64 chunk_mask;
	u64 size;
36 37 38 39 40 41 42 43 44 45
	u32 ring_mask;
	u32 nentries;
	u32 prod_head;
	u32 prod_tail;
	u32 cons_head;
	u32 cons_tail;
	struct xdp_ring *ring;
	u64 invalid_descs;
};

46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
/* The structure of the shared state of the rings are the same as the
 * ring buffer in kernel/events/ring_buffer.c. For the Rx and completion
 * ring, the kernel is the producer and user space is the consumer. For
 * the Tx and fill rings, the kernel is the consumer and user space is
 * the producer.
 *
 * producer                         consumer
 *
 * if (LOAD ->consumer) {           LOAD ->producer
 *                    (A)           smp_rmb()       (C)
 *    STORE $data                   LOAD $data
 *    smp_wmb()       (B)           smp_mb()        (D)
 *    STORE ->producer              STORE ->consumer
 * }
 *
 * (A) pairs with (D), and (B) pairs with (C).
 *
 * Starting with (B), it protects the data from being written after
 * the producer pointer. If this barrier was missing, the consumer
 * could observe the producer pointer being set and thus load the data
 * before the producer has written the new data. The consumer would in
 * this case load the old data.
 *
 * (C) protects the consumer from speculatively loading the data before
 * the producer pointer actually has been read. If we do not have this
 * barrier, some architectures could load old data as speculative loads
 * are not discarded as the CPU does not know there is a dependency
 * between ->producer and data.
 *
 * (A) is a control dependency that separates the load of ->consumer
 * from the stores of $data. In case ->consumer indicates there is no
 * room in the buffer to store $data we do not. So no barrier is needed.
 *
 * (D) protects the load of the data to be observed to happen after the
 * store of the consumer pointer. If we did not have this memory
 * barrier, the producer could observe the consumer pointer being set
 * and overwrite the data with a new value before the consumer got the
 * chance to read the old value. The consumer would thus miss reading
 * the old entry and very likely read the new entry twice, once right
 * now and again after circling through the ring.
 */

88 89
/* Common functions operating for both RXTX and umem queues */

M
Magnus Karlsson 已提交
90 91 92 93 94
static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
{
	return q ? q->invalid_descs : 0;
}

95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
static inline u32 xskq_nb_avail(struct xsk_queue *q, u32 dcnt)
{
	u32 entries = q->prod_tail - q->cons_tail;

	if (entries == 0) {
		/* Refresh the local pointer */
		q->prod_tail = READ_ONCE(q->ring->producer);
		entries = q->prod_tail - q->cons_tail;
	}

	return (entries > dcnt) ? dcnt : entries;
}

static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt)
{
110
	u32 free_entries = q->nentries - (producer - q->cons_tail);
111 112 113 114 115 116 117 118 119

	if (free_entries >= dcnt)
		return free_entries;

	/* Refresh the local tail pointer */
	q->cons_tail = READ_ONCE(q->ring->consumer);
	return q->nentries - (producer - q->cons_tail);
}

120 121 122 123 124 125 126 127 128 129 130 131 132 133
static inline bool xskq_has_addrs(struct xsk_queue *q, u32 cnt)
{
	u32 entries = q->prod_tail - q->cons_tail;

	if (entries >= cnt)
		return true;

	/* Refresh the local pointer. */
	q->prod_tail = READ_ONCE(q->ring->producer);
	entries = q->prod_tail - q->cons_tail;

	return entries >= cnt;
}

134 135
/* UMEM queue */

136
static inline bool xskq_is_valid_addr(struct xsk_queue *q, u64 addr)
137
{
138
	if (addr >= q->size) {
139 140 141
		q->invalid_descs++;
		return false;
	}
142

143 144 145
	return true;
}

146
static inline u64 *xskq_validate_addr(struct xsk_queue *q, u64 *addr)
147 148 149 150 151
{
	while (q->cons_tail != q->cons_head) {
		struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
		unsigned int idx = q->cons_tail & q->ring_mask;

152
		*addr = READ_ONCE(ring->desc[idx]) & q->chunk_mask;
153 154
		if (xskq_is_valid_addr(q, *addr))
			return addr;
155 156 157 158 159 160 161

		q->cons_tail++;
	}

	return NULL;
}

162
static inline u64 *xskq_peek_addr(struct xsk_queue *q, u64 *addr)
163 164
{
	if (q->cons_tail == q->cons_head) {
165
		smp_mb(); /* D, matches A */
166 167 168 169 170 171 172
		WRITE_ONCE(q->ring->consumer, q->cons_tail);
		q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);

		/* Order consumer and data */
		smp_rmb();
	}

173
	return xskq_validate_addr(q, addr);
174 175
}

176
static inline void xskq_discard_addr(struct xsk_queue *q)
177 178 179 180
{
	q->cons_tail++;
}

181
static inline int xskq_produce_addr(struct xsk_queue *q, u64 addr)
M
Magnus Karlsson 已提交
182 183 184
{
	struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;

185
	if (xskq_nb_free(q, q->prod_tail, 1) == 0)
186 187
		return -ENOSPC;

188
	/* A, matches D */
189
	ring->desc[q->prod_tail++ & q->ring_mask] = addr;
M
Magnus Karlsson 已提交
190 191

	/* Order producer and data */
192
	smp_wmb(); /* B, matches C */
M
Magnus Karlsson 已提交
193 194 195 196 197

	WRITE_ONCE(q->ring->producer, q->prod_tail);
	return 0;
}

198 199 200 201 202 203 204
static inline int xskq_produce_addr_lazy(struct xsk_queue *q, u64 addr)
{
	struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;

	if (xskq_nb_free(q, q->prod_head, LAZY_UPDATE_THRESHOLD) == 0)
		return -ENOSPC;

205
	/* A, matches D */
206 207 208 209 210 211 212 213
	ring->desc[q->prod_head++ & q->ring_mask] = addr;
	return 0;
}

static inline void xskq_produce_flush_addr_n(struct xsk_queue *q,
					     u32 nb_entries)
{
	/* Order producer and data */
214
	smp_wmb(); /* B, matches C */
215 216 217 218 219

	q->prod_tail += nb_entries;
	WRITE_ONCE(q->ring->producer, q->prod_tail);
}

220
static inline int xskq_reserve_addr(struct xsk_queue *q)
M
Magnus Karlsson 已提交
221 222 223 224
{
	if (xskq_nb_free(q, q->prod_head, 1) == 0)
		return -ENOSPC;

225
	/* A, matches D */
M
Magnus Karlsson 已提交
226 227 228 229 230 231 232 233
	q->prod_head++;
	return 0;
}

/* Rx/Tx queue */

static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d)
{
234
	if (!xskq_is_valid_addr(q, d->addr))
M
Magnus Karlsson 已提交
235 236
		return false;

237 238
	if (((d->addr + d->len) & q->chunk_mask) != (d->addr & q->chunk_mask) ||
	    d->options) {
M
Magnus Karlsson 已提交
239 240 241 242 243 244 245 246 247 248 249 250 251 252
		q->invalid_descs++;
		return false;
	}

	return true;
}

static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q,
						  struct xdp_desc *desc)
{
	while (q->cons_tail != q->cons_head) {
		struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
		unsigned int idx = q->cons_tail & q->ring_mask;

253 254
		*desc = READ_ONCE(ring->desc[idx]);
		if (xskq_is_valid_desc(q, desc))
M
Magnus Karlsson 已提交
255 256 257 258 259 260 261 262 263 264 265 266
			return desc;

		q->cons_tail++;
	}

	return NULL;
}

static inline struct xdp_desc *xskq_peek_desc(struct xsk_queue *q,
					      struct xdp_desc *desc)
{
	if (q->cons_tail == q->cons_head) {
267
		smp_mb(); /* D, matches A */
M
Magnus Karlsson 已提交
268 269 270 271
		WRITE_ONCE(q->ring->consumer, q->cons_tail);
		q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);

		/* Order consumer and data */
272
		smp_rmb(); /* C, matches B */
M
Magnus Karlsson 已提交
273 274
	}

275
	return xskq_validate_desc(q, desc);
M
Magnus Karlsson 已提交
276 277 278 279 280 281
}

static inline void xskq_discard_desc(struct xsk_queue *q)
{
	q->cons_tail++;
}
282 283

static inline int xskq_produce_batch_desc(struct xsk_queue *q,
284
					  u64 addr, u32 len)
285 286 287 288 289 290 291
{
	struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
	unsigned int idx;

	if (xskq_nb_free(q, q->prod_head, 1) == 0)
		return -ENOSPC;

292
	/* A, matches D */
293
	idx = (q->prod_head++) & q->ring_mask;
294
	ring->desc[idx].addr = addr;
295 296 297 298 299 300 301 302
	ring->desc[idx].len = len;

	return 0;
}

static inline void xskq_produce_flush_desc(struct xsk_queue *q)
{
	/* Order producer and data */
303
	smp_wmb(); /* B, matches C */
304

305
	q->prod_tail = q->prod_head;
306 307 308
	WRITE_ONCE(q->ring->producer, q->prod_tail);
}

M
Magnus Karlsson 已提交
309 310
static inline bool xskq_full_desc(struct xsk_queue *q)
{
311
	return xskq_nb_avail(q, q->nentries) == q->nentries;
M
Magnus Karlsson 已提交
312 313
}

314 315
static inline bool xskq_empty_desc(struct xsk_queue *q)
{
316
	return xskq_nb_free(q, q->prod_tail, q->nentries) == q->nentries;
317 318
}

319
void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask);
320
struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
321
void xskq_destroy(struct xsk_queue *q_ops);
322

323 324 325
/* Executed by the core when the entire UMEM gets freed */
void xsk_reuseq_destroy(struct xdp_umem *umem);

326
#endif /* _LINUX_XSK_QUEUE_H */