xsk_queue.h 5.1 KB
Newer Older
B
Björn Töpel 已提交
1 2
/* SPDX-License-Identifier: GPL-2.0 */
/* XDP user-space ring structure
3 4 5 6 7 8 9 10
 * Copyright(c) 2018 Intel Corporation.
 */

#ifndef _LINUX_XSK_QUEUE_H
#define _LINUX_XSK_QUEUE_H

#include <linux/types.h>
#include <linux/if_xdp.h>
11
#include <net/xdp_sock.h>
12

13 14
#define RX_BATCH_SIZE 16

15 16 17 18 19 20 21 22 23 24 25 26 27 28
struct xdp_ring {
	u32 producer ____cacheline_aligned_in_smp;
	u32 consumer ____cacheline_aligned_in_smp;
};

/* Used for the RX and TX queues for packets */
struct xdp_rxtx_ring {
	struct xdp_ring ptrs;
	struct xdp_desc desc[0] ____cacheline_aligned_in_smp;
};

/* Used for the fill and completion queues for buffers */
struct xdp_umem_ring {
	struct xdp_ring ptrs;
29
	u64 desc[0] ____cacheline_aligned_in_smp;
30 31
};

32 33 34 35 36 37 38 39 40 41 42 43
struct xsk_queue {
	struct xdp_umem_props umem_props;
	u32 ring_mask;
	u32 nentries;
	u32 prod_head;
	u32 prod_tail;
	u32 cons_head;
	u32 cons_tail;
	struct xdp_ring *ring;
	u64 invalid_descs;
};

44 45
/* Common functions operating for both RXTX and umem queues */

M
Magnus Karlsson 已提交
46 47 48 49 50
static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
{
	return q ? q->invalid_descs : 0;
}

51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
static inline u32 xskq_nb_avail(struct xsk_queue *q, u32 dcnt)
{
	u32 entries = q->prod_tail - q->cons_tail;

	if (entries == 0) {
		/* Refresh the local pointer */
		q->prod_tail = READ_ONCE(q->ring->producer);
		entries = q->prod_tail - q->cons_tail;
	}

	return (entries > dcnt) ? dcnt : entries;
}

static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt)
{
	u32 free_entries = q->nentries - (producer - q->cons_tail);

	if (free_entries >= dcnt)
		return free_entries;

	/* Refresh the local tail pointer */
	q->cons_tail = READ_ONCE(q->ring->consumer);
	return q->nentries - (producer - q->cons_tail);
}

/* UMEM queue */

78
static inline bool xskq_is_valid_addr(struct xsk_queue *q, u64 addr)
79
{
80
	if (addr >= q->umem_props.size) {
81 82 83
		q->invalid_descs++;
		return false;
	}
84

85 86 87
	return true;
}

88
static inline u64 *xskq_validate_addr(struct xsk_queue *q, u64 *addr)
89 90 91 92 93
{
	while (q->cons_tail != q->cons_head) {
		struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
		unsigned int idx = q->cons_tail & q->ring_mask;

94 95 96
		*addr = READ_ONCE(ring->desc[idx]) & q->umem_props.chunk_mask;
		if (xskq_is_valid_addr(q, *addr))
			return addr;
97 98 99 100 101 102 103

		q->cons_tail++;
	}

	return NULL;
}

104
static inline u64 *xskq_peek_addr(struct xsk_queue *q, u64 *addr)
105 106 107 108 109 110 111 112 113
{
	if (q->cons_tail == q->cons_head) {
		WRITE_ONCE(q->ring->consumer, q->cons_tail);
		q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);

		/* Order consumer and data */
		smp_rmb();
	}

114
	return xskq_validate_addr(q, addr);
115 116
}

117
static inline void xskq_discard_addr(struct xsk_queue *q)
118 119 120 121
{
	q->cons_tail++;
}

122
static inline int xskq_produce_addr(struct xsk_queue *q, u64 addr)
M
Magnus Karlsson 已提交
123 124 125
{
	struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;

126
	ring->desc[q->prod_tail++ & q->ring_mask] = addr;
M
Magnus Karlsson 已提交
127 128 129 130 131 132 133 134

	/* Order producer and data */
	smp_wmb();

	WRITE_ONCE(q->ring->producer, q->prod_tail);
	return 0;
}

135
static inline int xskq_reserve_addr(struct xsk_queue *q)
M
Magnus Karlsson 已提交
136 137 138 139 140 141 142 143 144 145 146 147
{
	if (xskq_nb_free(q, q->prod_head, 1) == 0)
		return -ENOSPC;

	q->prod_head++;
	return 0;
}

/* Rx/Tx queue */

static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d)
{
148
	if (!xskq_is_valid_addr(q, d->addr))
M
Magnus Karlsson 已提交
149 150
		return false;

151 152
	if (((d->addr + d->len) & q->umem_props.chunk_mask) !=
	    (d->addr & q->umem_props.chunk_mask)) {
M
Magnus Karlsson 已提交
153 154 155 156 157 158 159 160 161 162 163 164 165 166
		q->invalid_descs++;
		return false;
	}

	return true;
}

static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q,
						  struct xdp_desc *desc)
{
	while (q->cons_tail != q->cons_head) {
		struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
		unsigned int idx = q->cons_tail & q->ring_mask;

167 168
		*desc = READ_ONCE(ring->desc[idx]);
		if (xskq_is_valid_desc(q, desc))
M
Magnus Karlsson 已提交
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
			return desc;

		q->cons_tail++;
	}

	return NULL;
}

static inline struct xdp_desc *xskq_peek_desc(struct xsk_queue *q,
					      struct xdp_desc *desc)
{
	if (q->cons_tail == q->cons_head) {
		WRITE_ONCE(q->ring->consumer, q->cons_tail);
		q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);

		/* Order consumer and data */
		smp_rmb();
	}

188
	return xskq_validate_desc(q, desc);
M
Magnus Karlsson 已提交
189 190 191 192 193 194
}

static inline void xskq_discard_desc(struct xsk_queue *q)
{
	q->cons_tail++;
}
195 196

static inline int xskq_produce_batch_desc(struct xsk_queue *q,
197
					  u64 addr, u32 len)
198 199 200 201 202 203 204 205
{
	struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
	unsigned int idx;

	if (xskq_nb_free(q, q->prod_head, 1) == 0)
		return -ENOSPC;

	idx = (q->prod_head++) & q->ring_mask;
206
	ring->desc[idx].addr = addr;
207 208 209 210 211 212 213 214 215 216 217 218 219 220
	ring->desc[idx].len = len;

	return 0;
}

static inline void xskq_produce_flush_desc(struct xsk_queue *q)
{
	/* Order producer and data */
	smp_wmb();

	q->prod_tail = q->prod_head,
	WRITE_ONCE(q->ring->producer, q->prod_tail);
}

M
Magnus Karlsson 已提交
221 222
static inline bool xskq_full_desc(struct xsk_queue *q)
{
223
	return xskq_nb_avail(q, q->nentries) == q->nentries;
M
Magnus Karlsson 已提交
224 225
}

226 227
static inline bool xskq_empty_desc(struct xsk_queue *q)
{
228
	return xskq_nb_free(q, q->prod_tail, 1) == q->nentries;
229 230
}

231
void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props);
232
struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
233
void xskq_destroy(struct xsk_queue *q_ops);
234 235

#endif /* _LINUX_XSK_QUEUE_H */