common.h 12.3 KB
Newer Older
I
Ian Campbell 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
/*
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation; or, when distributed
 * separately from the Linux kernel or incorporated into other
 * software packages, subject to the following license:
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this source file (the "Software"), to deal in the Software without
 * restriction, including without limitation the rights to use, copy, modify,
 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 * and to permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */

#ifndef __XEN_NETBACK__COMMON_H__
#define __XEN_NETBACK__COMMON_H__

#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__

#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/ip.h>
#include <linux/in.h>
#include <linux/io.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/wait.h>
#include <linux/sched.h>

#include <xen/interface/io/netif.h>
#include <xen/interface/grant_table.h>
#include <xen/grant_table.h>
#include <xen/xenbus.h>
47
#include <xen/page.h>
48
#include <linux/debugfs.h>
I
Ian Campbell 已提交
49

50 51 52 53
typedef unsigned int pending_ring_idx_t;
#define INVALID_PENDING_RING_IDX (~0U)

struct pending_tx_info {
54
	struct xen_netif_tx_request req; /* tx request */
55
	unsigned int extra_count;
56 57 58 59 60 61 62 63 64 65 66
	/* Callback data for released SKBs. The callback is always
	 * xenvif_zerocopy_callback, desc contains the pending_idx, which is
	 * also an index in pending_tx_info array. It is initialized in
	 * xenvif_alloc and it never changes.
	 * skb_shinfo(skb)->destructor_arg points to the first mapped slot's
	 * callback_struct in this array of struct pending_tx_info's, then ctx
	 * to the next, or NULL if there is no more slot for this skb.
	 * ubuf_to_vif is a helper which finds the struct xenvif from a pointer
	 * to this field.
	 */
	struct ubuf_info callback_struct;
67 68
};

69 70
#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
#define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
71 72 73 74

struct xenvif_rx_meta {
	int id;
	int size;
75
	int gso_type;
76 77 78
	int gso_size;
};

79 80 81
#define GSO_BIT(type) \
	(1 << XEN_NETIF_GSO_TYPE_ ## type)

82 83 84
/* Discriminate from any valid pending_idx value. */
#define INVALID_PENDING_IDX 0xFFFF

85
#define MAX_BUFFER_OFFSET XEN_PAGE_SIZE
86

87
#define MAX_PENDING_REQS XEN_NETIF_TX_RING_SIZE
I
Ian Campbell 已提交
88

89 90 91 92 93
/* The maximum number of frags is derived from the size of a grant (same
 * as a Xen page size for now).
 */
#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)

94 95
/* It's possible for an skb to have a maximal number of frags
 * but still be less than MAX_BUFFER_OFFSET in size. Thus the
96
 * worst-case number of copy operations is MAX_XEN_SKB_FRAGS per
97 98
 * ring slot.
 */
99
#define MAX_GRANT_COPY_OPS (MAX_XEN_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE)
100

101 102 103 104 105 106 107 108 109
#define NETBACK_INVALID_HANDLE -1

/* To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
 * the maximum slots a valid packet can use. Now this value is defined
 * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
 * all backend.
 */
#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN

110 111
/* Queue name is interface name with "-qNNN" appended */
#define QUEUE_NAME_SIZE (IFNAMSIZ + 5)
I
Ian Campbell 已提交
112

113 114 115 116 117 118 119 120 121
/* IRQ name is queue name with "-tx" or "-rx" appended */
#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)

struct xenvif;

struct xenvif_stats {
	/* Stats fields to be updated per-queue.
	 * A subset of struct net_device_stats that contains only the
	 * fields that are updated in netback.c for each queue.
122
	 */
123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
	unsigned int rx_bytes;
	unsigned int rx_packets;
	unsigned int tx_bytes;
	unsigned int tx_packets;

	/* Additional stats used by xenvif */
	unsigned long rx_gso_checksum_fixup;
	unsigned long tx_zerocopy_sent;
	unsigned long tx_zerocopy_success;
	unsigned long tx_zerocopy_fail;
	unsigned long tx_frag_overflow;
};

struct xenvif_queue { /* Per-queue data for xenvif */
	unsigned int id; /* Queue ID, 0-based */
	char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
	struct xenvif *vif; /* Parent VIF */
140

141 142 143 144 145
	/* Use NAPI for guest TX */
	struct napi_struct napi;
	/* When feature-split-event-channels = 0, tx_irq = rx_irq. */
	unsigned int tx_irq;
	/* Only used when feature-split-event-channels = 1 */
146
	char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
147 148 149 150 151 152 153
	struct xen_netif_tx_back_ring tx;
	struct sk_buff_head tx_queue;
	struct page *mmap_pages[MAX_PENDING_REQS];
	pending_ring_idx_t pending_prod;
	pending_ring_idx_t pending_cons;
	u16 pending_ring[MAX_PENDING_REQS];
	struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
154
	grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
155

156
	struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS];
157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
	struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
	struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
	/* passed to gnttab_[un]map_refs with pages under (un)mapping */
	struct page *pages_to_map[MAX_PENDING_REQS];
	struct page *pages_to_unmap[MAX_PENDING_REQS];

	/* This prevents zerocopy callbacks  to race over dealloc_ring */
	spinlock_t callback_lock;
	/* This prevents dealloc thread and NAPI instance to race over response
	 * creation and pending_ring in xenvif_idx_release. In xenvif_tx_err
	 * it only protect response creation
	 */
	spinlock_t response_lock;
	pending_ring_idx_t dealloc_prod;
	pending_ring_idx_t dealloc_cons;
	u16 dealloc_ring[MAX_PENDING_REQS];
	struct task_struct *dealloc_task;
	wait_queue_head_t dealloc_wq;
175
	atomic_t inflight_packets;
I
Ian Campbell 已提交
176

177 178 179
	/* Use kthread for guest RX */
	struct task_struct *task;
	wait_queue_head_t wq;
180 181 182
	/* When feature-split-event-channels = 0, tx_irq = rx_irq. */
	unsigned int rx_irq;
	/* Only used when feature-split-event-channels = 1 */
183
	char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
184 185
	struct xen_netif_rx_back_ring rx;
	struct sk_buff_head rx_queue;
186

187 188
	unsigned int rx_queue_max;
	unsigned int rx_queue_len;
189 190
	unsigned long last_rx_time;
	bool stalled;
191

192
	struct gnttab_copy grant_copy_op[MAX_GRANT_COPY_OPS];
I
Ian Campbell 已提交
193

194 195 196 197
	/* We create one meta structure per ring request we consume, so
	 * the maximum number is the same as the ring size.
	 */
	struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];
198

199 200 201 202 203 204 205 206 207 208 209
	/* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
	unsigned long   credit_bytes;
	unsigned long   credit_usec;
	unsigned long   remaining_credit;
	struct timer_list credit_timeout;
	u64 credit_window_start;

	/* Statistics */
	struct xenvif_stats stats;
};

210 211
enum state_bit_shift {
	/* This bit marks that the vif is connected */
212
	VIF_STATUS_CONNECTED,
213 214
};

215 216 217 218 219 220 221 222
struct xenvif_mcast_addr {
	struct list_head entry;
	struct rcu_head rcu;
	u8 addr[6];
};

#define XEN_NETBK_MCAST_MAX 64

223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
#define XEN_NETBK_MAX_HASH_KEY_SIZE 40
#define XEN_NETBK_MAX_HASH_MAPPING_SIZE 128
#define XEN_NETBK_HASH_TAG_SIZE 40

struct xenvif_hash_cache_entry {
	struct list_head link;
	struct rcu_head rcu;
	u8 tag[XEN_NETBK_HASH_TAG_SIZE];
	unsigned int len;
	u32 val;
	int seq;
};

struct xenvif_hash_cache {
	spinlock_t lock;
	struct list_head list;
	unsigned int count;
	atomic_t seq;
};

struct xenvif_hash {
	unsigned int alg;
	u32 flags;
	u8 key[XEN_NETBK_MAX_HASH_KEY_SIZE];
	u32 mapping[XEN_NETBK_MAX_HASH_MAPPING_SIZE];
	unsigned int size;
	struct xenvif_hash_cache cache;
};

252 253 254 255 256
struct xenvif {
	/* Unique identifier for this interface. */
	domid_t          domid;
	unsigned int     handle;

257
	u8               fe_dev_addr[6];
258 259
	struct list_head fe_mcast_addr;
	unsigned int     fe_mcast_count;
I
Ian Campbell 已提交
260 261

	/* Frontend feature information. */
262 263 264
	int gso_mask;
	int gso_prefix_mask;

I
Ian Campbell 已提交
265
	u8 can_sg:1;
266 267
	u8 ip_csum:1;
	u8 ipv6_csum:1;
268
	u8 multicast_control:1;
I
Ian Campbell 已提交
269

270 271 272 273
	/* Is this interface disabled? True when backend discovers
	 * frontend is rogue.
	 */
	bool disabled;
274
	unsigned long status;
275 276
	unsigned long drain_timeout;
	unsigned long stall_timeout;
I
Ian Campbell 已提交
277

278 279
	/* Queues */
	struct xenvif_queue *queues;
280
	unsigned int num_queues; /* active queues, resource allocated */
281 282
	unsigned int stalled_queues;

283 284
	struct xenvif_hash hash;

285
	struct xenbus_watch credit_watch;
286
	struct xenbus_watch mcast_ctrl_watch;
287

288
	spinlock_t lock;
I
Ian Campbell 已提交
289

290 291 292 293
#ifdef CONFIG_DEBUG_FS
	struct dentry *xenvif_dbg_root;
#endif

294 295 296
	struct xen_netif_ctrl_back_ring ctrl;
	unsigned int ctrl_irq;

I
Ian Campbell 已提交
297 298 299 300
	/* Miscellaneous private stuff. */
	struct net_device *dev;
};

301 302 303 304 305 306 307
struct xenvif_rx_cb {
	unsigned long expires;
	int meta_slots_used;
};

#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)

308 309 310 311 312
static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif)
{
	return to_xenbus_device(vif->dev->dev.parent);
}

313 314
void xenvif_tx_credit_callback(unsigned long data);

I
Ian Campbell 已提交
315 316 317 318
struct xenvif *xenvif_alloc(struct device *parent,
			    domid_t domid,
			    unsigned int handle);

319
int xenvif_init_queue(struct xenvif_queue *queue);
320
void xenvif_deinit_queue(struct xenvif_queue *queue);
321

322 323 324 325 326 327 328 329 330
int xenvif_connect_data(struct xenvif_queue *queue,
			unsigned long tx_ring_ref,
			unsigned long rx_ring_ref,
			unsigned int tx_evtchn,
			unsigned int rx_evtchn);
void xenvif_disconnect_data(struct xenvif *vif);
int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
			unsigned int evtchn);
void xenvif_disconnect_ctrl(struct xenvif *vif);
331
void xenvif_free(struct xenvif *vif);
I
Ian Campbell 已提交
332 333

int xenvif_xenbus_init(void);
334
void xenvif_xenbus_fini(void);
I
Ian Campbell 已提交
335 336 337

int xenvif_schedulable(struct xenvif *vif);

338 339
int xenvif_queue_stopped(struct xenvif_queue *queue);
void xenvif_wake_queue(struct xenvif_queue *queue);
I
Ian Campbell 已提交
340 341

/* (Un)Map communication rings. */
342 343 344 345
void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue);
int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
				   grant_ref_t tx_ring_ref,
				   grant_ref_t rx_ring_ref);
I
Ian Campbell 已提交
346 347

/* Check for SKBs from frontend and schedule backend processing */
348
void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue);
I
Ian Campbell 已提交
349

350 351 352
/* Prevent the device from generating any further traffic. */
void xenvif_carrier_off(struct xenvif *vif);

353
int xenvif_tx_action(struct xenvif_queue *queue, int budget);
354

355
int xenvif_kthread_guest_rx(void *data);
356
void xenvif_kick_thread(struct xenvif_queue *queue);
357

358 359
int xenvif_dealloc_kthread(void *data);

360
irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
361

362 363
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);

364
void xenvif_carrier_on(struct xenvif *vif);
365

366 367 368 369
/* Callback from stack when TX packet can be released */
void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);

/* Unmap a pending page and release it back to the guest */
370
void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
371

372
static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
373 374
{
	return MAX_PENDING_REQS -
375
		queue->pending_prod + queue->pending_cons;
376 377
}

378 379
irqreturn_t xenvif_interrupt(int irq, void *dev_id);

380 381
extern bool separate_tx_rx_irq;

382
extern unsigned int rx_drain_timeout_msecs;
383
extern unsigned int rx_stall_timeout_msecs;
384
extern unsigned int xenvif_max_queues;
385
extern unsigned int xenvif_hash_cache_size;
386

387 388 389 390
#ifdef CONFIG_DEBUG_FS
extern struct dentry *xen_netback_dbg_root;
#endif

391 392 393 394
void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
				 struct sk_buff *skb);
void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue);

395 396 397 398
/* Multicast control */
bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr);
void xenvif_mcast_addr_list_free(struct xenvif *vif);

399 400 401 402 403 404 405 406 407 408 409 410 411 412
/* Hash */
void xenvif_init_hash(struct xenvif *vif);
void xenvif_deinit_hash(struct xenvif *vif);

u32 xenvif_set_hash_alg(struct xenvif *vif, u32 alg);
u32 xenvif_get_hash_flags(struct xenvif *vif, u32 *flags);
u32 xenvif_set_hash_flags(struct xenvif *vif, u32 flags);
u32 xenvif_set_hash_key(struct xenvif *vif, u32 gref, u32 len);
u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size);
u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
			    u32 off);

void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb);

I
Ian Campbell 已提交
413
#endif /* __XEN_NETBACK__COMMON_H__ */