netback.c 58.8 KB
Newer Older
I
Ian Campbell 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
/*
 * Back-end of the driver for virtual network devices. This portion of the
 * driver exports a 'unified' network-device interface that can be accessed
 * by any operating system that implements a compatible front end. A
 * reference front-end implementation can be found in:
 *  drivers/net/xen-netfront.c
 *
 * Copyright (c) 2002-2005, K A Fraser
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation; or, when distributed
 * separately from the Linux kernel or incorporated into other
 * software packages, subject to the following license:
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this source file (the "Software"), to deal in the Software without
 * restriction, including without limitation the rights to use, copy, modify,
 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 * and to permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */

#include "common.h"

#include <linux/kthread.h>
#include <linux/if_vlan.h>
#include <linux/udp.h>
40
#include <linux/highmem.h>
I
Ian Campbell 已提交
41 42 43

#include <net/tcp.h>

S
Stefano Stabellini 已提交
44
#include <xen/xen.h>
I
Ian Campbell 已提交
45 46 47 48 49 50
#include <xen/events.h>
#include <xen/interface/memory.h>

#include <asm/xen/hypercall.h>
#include <asm/xen/page.h>

51 52 53 54 55 56 57
/* Provide an option to disable split event channels at load time as
 * event channels are limited resource. Split event channels are
 * enabled by default.
 */
bool separate_tx_rx_irq = 1;
module_param(separate_tx_rx_irq, bool, 0644);

58 59
/* The time that packets can stay on the guest Rx internal queue
 * before they are dropped.
60 61 62 63
 */
unsigned int rx_drain_timeout_msecs = 10000;
module_param(rx_drain_timeout_msecs, uint, 0444);

64 65 66
/* The length of time before the frontend is considered unresponsive
 * because it isn't providing Rx slots.
 */
67
unsigned int rx_stall_timeout_msecs = 60000;
68 69
module_param(rx_stall_timeout_msecs, uint, 0444);

70 71 72 73 74
unsigned int xenvif_max_queues;
module_param_named(max_queues, xenvif_max_queues, uint, 0644);
MODULE_PARM_DESC(max_queues,
		 "Maximum number of queues per virtual interface");

75 76 77 78
/*
 * This is the maximum slots a skb can have. If a guest sends a skb
 * which exceeds this limit it is considered malicious.
 */
79 80 81 82
#define FATAL_SKB_SLOTS_DEFAULT 20
static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
module_param(fatal_skb_slots, uint, 0444);

83 84 85 86 87 88 89 90 91 92
/* The amount to copy out of the first guest Tx slot into the skb's
 * linear area.  If the first slot has more data, it will be mapped
 * and put into the first frag.
 *
 * This is sized to avoid pulling headers from the frags for most
 * TCP/IP packets.
 */
#define XEN_NETBACK_TX_COPY_LEN 128


93
static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
W
Wei Liu 已提交
94 95
			       u8 status);

96
static void make_tx_response(struct xenvif_queue *queue,
I
Ian Campbell 已提交
97 98
			     struct xen_netif_tx_request *txp,
			     s8       st);
99

100
static inline int tx_work_todo(struct xenvif_queue *queue);
101

102
static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
I
Ian Campbell 已提交
103 104 105 106 107 108
					     u16      id,
					     s8       st,
					     u16      offset,
					     u16      size,
					     u16      flags);

109
static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
110
				       u16 idx)
I
Ian Campbell 已提交
111
{
112
	return page_to_pfn(queue->mmap_pages[idx]);
I
Ian Campbell 已提交
113 114
}

115
static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
116
					 u16 idx)
I
Ian Campbell 已提交
117
{
118
	return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
I
Ian Campbell 已提交
119 120
}

121 122 123
#define callback_param(vif, pending_idx) \
	(vif->pending_tx_info[pending_idx].callback_struct)

124 125
/* Find the containing VIF's structure from a pointer in pending_tx_info array
 */
126
static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
127
{
128 129 130 131
	u16 pending_idx = ubuf->desc;
	struct pending_tx_info *temp =
		container_of(ubuf, struct pending_tx_info, callback_struct);
	return container_of(temp - pending_idx,
132
			    struct xenvif_queue,
133
			    pending_tx_info[0]);
134
}
135

136 137 138 139 140 141 142 143 144 145
static u16 frag_get_pending_idx(skb_frag_t *frag)
{
	return (u16)frag->page_offset;
}

static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
{
	frag->page_offset = pending_idx;
}

I
Ian Campbell 已提交
146 147 148 149 150
static inline pending_ring_idx_t pending_index(unsigned i)
{
	return i & (MAX_PENDING_REQS-1);
}

151
bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed)
I
Ian Campbell 已提交
152
{
153
	RING_IDX prod, cons;
I
Ian Campbell 已提交
154

155
	do {
156 157
		prod = queue->rx.sring->req_prod;
		cons = queue->rx.req_cons;
I
Ian Campbell 已提交
158

159 160
		if (prod - cons >= needed)
			return true;
I
Ian Campbell 已提交
161

162
		queue->rx.sring->req_event = prod + 1;
I
Ian Campbell 已提交
163

164 165 166 167
		/* Make sure event is visible before we check prod
		 * again.
		 */
		mb();
168
	} while (queue->rx.sring->req_prod != prod);
I
Ian Campbell 已提交
169

170
	return false;
I
Ian Campbell 已提交
171 172
}

173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
{
	unsigned long flags;

	spin_lock_irqsave(&queue->rx_queue.lock, flags);

	__skb_queue_tail(&queue->rx_queue, skb);

	queue->rx_queue_len += skb->len;
	if (queue->rx_queue_len > queue->rx_queue_max)
		netif_tx_stop_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));

	spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
}

static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
{
	struct sk_buff *skb;

	spin_lock_irq(&queue->rx_queue.lock);

	skb = __skb_dequeue(&queue->rx_queue);
	if (skb)
		queue->rx_queue_len -= skb->len;

	spin_unlock_irq(&queue->rx_queue.lock);

	return skb;
}

static void xenvif_rx_queue_maybe_wake(struct xenvif_queue *queue)
{
	spin_lock_irq(&queue->rx_queue.lock);

	if (queue->rx_queue_len < queue->rx_queue_max)
		netif_tx_wake_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));

	spin_unlock_irq(&queue->rx_queue.lock);
}


static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
{
	struct sk_buff *skb;
	while ((skb = xenvif_rx_dequeue(queue)) != NULL)
		kfree_skb(skb);
}

static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
{
	struct sk_buff *skb;

	for(;;) {
		skb = skb_peek(&queue->rx_queue);
		if (!skb)
			break;
		if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
			break;
		xenvif_rx_dequeue(queue);
		kfree_skb(skb);
	}
}

I
Ian Campbell 已提交
236 237 238 239 240
/*
 * Returns true if we should start a new receive buffer instead of
 * adding 'size' bytes to a buffer which currently contains 'offset'
 * bytes.
 */
241 242
static bool start_new_rx_buffer(int offset, unsigned long size, int head,
				bool full_coalesce)
I
Ian Campbell 已提交
243 244 245 246 247 248 249 250 251 252 253
{
	/* simple case: we have completely filled the current buffer. */
	if (offset == MAX_BUFFER_OFFSET)
		return true;

	/*
	 * complex case: start a fresh buffer if the current frag
	 * would overflow the current buffer but only if:
	 *     (i)   this frag would fit completely in the next buffer
	 * and (ii)  there is already some data in the current buffer
	 * and (iii) this is not the head buffer.
254
	 * and (iv)  there is no need to fully utilize the buffers
I
Ian Campbell 已提交
255 256 257 258 259 260 261 262 263 264
	 *
	 * Where:
	 * - (i) stops us splitting a frag into two copies
	 *   unless the frag is too large for a single buffer.
	 * - (ii) stops us from leaving a buffer pointlessly empty.
	 * - (iii) stops us leaving the first buffer
	 *   empty. Strictly speaking this is already covered
	 *   by (ii) but is explicitly checked because
	 *   netfront relies on the first buffer being
	 *   non-empty and can crash otherwise.
265 266
	 * - (iv) is needed for skbs which can use up more than MAX_SKB_FRAGS
	 *   slot
I
Ian Campbell 已提交
267 268 269 270 271 272
	 *
	 * This means we will effectively linearise small
	 * frags but do not needlessly split large buffers
	 * into multiple copies tend to give large frags their
	 * own buffers as before.
	 */
273
	BUG_ON(size > MAX_BUFFER_OFFSET);
274 275
	if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head &&
	    !full_coalesce)
I
Ian Campbell 已提交
276 277 278 279 280 281 282 283 284
		return true;

	return false;
}

struct netrx_pending_operations {
	unsigned copy_prod, copy_cons;
	unsigned meta_prod, meta_cons;
	struct gnttab_copy *copy;
285
	struct xenvif_rx_meta *meta;
I
Ian Campbell 已提交
286 287 288 289
	int copy_off;
	grant_ref_t copy_gref;
};

290
static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
291
						 struct netrx_pending_operations *npo)
I
Ian Campbell 已提交
292
{
293
	struct xenvif_rx_meta *meta;
I
Ian Campbell 已提交
294 295
	struct xen_netif_rx_request *req;

296
	req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
I
Ian Campbell 已提交
297 298

	meta = npo->meta + npo->meta_prod++;
299
	meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
I
Ian Campbell 已提交
300 301 302 303 304 305 306 307 308 309
	meta->gso_size = 0;
	meta->size = 0;
	meta->id = req->id;

	npo->copy_off = 0;
	npo->copy_gref = req->gref;

	return meta;
}

310 311 312 313
/*
 * Set up the grant operations for this fragment. If it's a flipping
 * interface, we also set up the unmap request from here.
 */
314
static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb,
W
Wei Liu 已提交
315 316
				 struct netrx_pending_operations *npo,
				 struct page *page, unsigned long size,
317
				 unsigned long offset, int *head,
318
				 struct xenvif_queue *foreign_queue,
319
				 grant_ref_t foreign_gref)
I
Ian Campbell 已提交
320 321
{
	struct gnttab_copy *copy_gop;
322
	struct xenvif_rx_meta *meta;
I
Ian Campbell 已提交
323
	unsigned long bytes;
324
	int gso_type = XEN_NETIF_GSO_TYPE_NONE;
I
Ian Campbell 已提交
325 326

	/* Data must not cross a page boundary. */
327
	BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
I
Ian Campbell 已提交
328 329 330

	meta = npo->meta + npo->meta_prod - 1;

331 332 333 334
	/* Skip unused frames from start of page */
	page += offset >> PAGE_SHIFT;
	offset &= ~PAGE_MASK;

I
Ian Campbell 已提交
335
	while (size > 0) {
336
		BUG_ON(offset >= PAGE_SIZE);
I
Ian Campbell 已提交
337 338
		BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);

339 340 341 342 343
		bytes = PAGE_SIZE - offset;

		if (bytes > size)
			bytes = size;

344 345 346 347
		if (start_new_rx_buffer(npo->copy_off,
					bytes,
					*head,
					XENVIF_RX_CB(skb)->full_coalesce)) {
I
Ian Campbell 已提交
348 349 350 351
			/*
			 * Netfront requires there to be some data in the head
			 * buffer.
			 */
352
			BUG_ON(*head);
I
Ian Campbell 已提交
353

354
			meta = get_next_rx_buffer(queue, npo);
I
Ian Campbell 已提交
355 356 357 358 359 360 361
		}

		if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
			bytes = MAX_BUFFER_OFFSET - npo->copy_off;

		copy_gop = npo->copy + npo->copy_prod++;
		copy_gop->flags = GNTCOPY_dest_gref;
362 363
		copy_gop->len = bytes;

364 365
		if (foreign_queue) {
			copy_gop->source.domid = foreign_queue->vif->domid;
366 367 368 369 370 371 372
			copy_gop->source.u.ref = foreign_gref;
			copy_gop->flags |= GNTCOPY_source_gref;
		} else {
			copy_gop->source.domid = DOMID_SELF;
			copy_gop->source.u.gmfn =
				virt_to_mfn(page_address(page));
		}
I
Ian Campbell 已提交
373 374
		copy_gop->source.offset = offset;

375
		copy_gop->dest.domid = queue->vif->domid;
I
Ian Campbell 已提交
376 377 378 379 380 381 382 383 384
		copy_gop->dest.offset = npo->copy_off;
		copy_gop->dest.u.ref = npo->copy_gref;

		npo->copy_off += bytes;
		meta->size += bytes;

		offset += bytes;
		size -= bytes;

385 386 387 388 389 390 391
		/* Next frame */
		if (offset == PAGE_SIZE && size) {
			BUG_ON(!PageCompound(page));
			page++;
			offset = 0;
		}

I
Ian Campbell 已提交
392
		/* Leave a gap for the GSO descriptor. */
393 394 395 396 397 398
		if (skb_is_gso(skb)) {
			if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
				gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
			else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
				gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
		}
399

400 401
		if (*head && ((1 << gso_type) & queue->vif->gso_mask))
			queue->rx.req_cons++;
I
Ian Campbell 已提交
402

403
		*head = 0; /* There must be something in this buffer now. */
I
Ian Campbell 已提交
404 405 406 407

	}
}

408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
/*
 * Find the grant ref for a given frag in a chain of struct ubuf_info's
 * skb: the skb itself
 * i: the frag's number
 * ubuf: a pointer to an element in the chain. It should not be NULL
 *
 * Returns a pointer to the element in the chain where the page were found. If
 * not found, returns NULL.
 * See the definition of callback_struct in common.h for more details about
 * the chain.
 */
static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
						const int i,
						const struct ubuf_info *ubuf)
{
423
	struct xenvif_queue *foreign_queue = ubuf_to_queue(ubuf);
424 425 426 427 428

	do {
		u16 pending_idx = ubuf->desc;

		if (skb_shinfo(skb)->frags[i].page.p ==
429
		    foreign_queue->mmap_pages[pending_idx])
430 431 432 433 434 435 436
			break;
		ubuf = (struct ubuf_info *) ubuf->ctx;
	} while (ubuf);

	return ubuf;
}

I
Ian Campbell 已提交
437 438 439 440 441 442 443 444 445 446 447 448
/*
 * Prepare an SKB to be transmitted to the frontend.
 *
 * This function is responsible for allocating grant operations, meta
 * structures, etc.
 *
 * It returns the number of meta structures consumed. The number of
 * ring slots used is always equal to the number of meta slots used
 * plus the number of GSO descriptors used. Currently, we use either
 * zero GSO descriptors (for non-GSO packets) or one descriptor (for
 * frontend-side LRO).
 */
W
Wei Liu 已提交
449
static int xenvif_gop_skb(struct sk_buff *skb,
450 451
			  struct netrx_pending_operations *npo,
			  struct xenvif_queue *queue)
I
Ian Campbell 已提交
452 453 454 455 456
{
	struct xenvif *vif = netdev_priv(skb->dev);
	int nr_frags = skb_shinfo(skb)->nr_frags;
	int i;
	struct xen_netif_rx_request *req;
457
	struct xenvif_rx_meta *meta;
I
Ian Campbell 已提交
458
	unsigned char *data;
459
	int head = 1;
I
Ian Campbell 已提交
460
	int old_meta_prod;
461
	int gso_type;
462 463
	const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
	const struct ubuf_info *const head_ubuf = ubuf;
I
Ian Campbell 已提交
464 465 466

	old_meta_prod = npo->meta_prod;

467 468 469 470 471 472
	gso_type = XEN_NETIF_GSO_TYPE_NONE;
	if (skb_is_gso(skb)) {
		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
			gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
			gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
473 474
	}

I
Ian Campbell 已提交
475
	/* Set up a GSO prefix descriptor, if necessary */
P
Paul Durrant 已提交
476
	if ((1 << gso_type) & vif->gso_prefix_mask) {
477
		req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
I
Ian Campbell 已提交
478
		meta = npo->meta + npo->meta_prod++;
479
		meta->gso_type = gso_type;
480
		meta->gso_size = skb_shinfo(skb)->gso_size;
I
Ian Campbell 已提交
481 482 483 484
		meta->size = 0;
		meta->id = req->id;
	}

485
	req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
I
Ian Campbell 已提交
486 487
	meta = npo->meta + npo->meta_prod++;

488 489
	if ((1 << gso_type) & vif->gso_mask) {
		meta->gso_type = gso_type;
490
		meta->gso_size = skb_shinfo(skb)->gso_size;
491 492
	} else {
		meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
I
Ian Campbell 已提交
493
		meta->gso_size = 0;
494
	}
I
Ian Campbell 已提交
495 496 497 498 499 500 501 502 503 504 505 506 507 508

	meta->size = 0;
	meta->id = req->id;
	npo->copy_off = 0;
	npo->copy_gref = req->gref;

	data = skb->data;
	while (data < skb_tail_pointer(skb)) {
		unsigned int offset = offset_in_page(data);
		unsigned int len = PAGE_SIZE - offset;

		if (data + len > skb_tail_pointer(skb))
			len = skb_tail_pointer(skb) - data;

509
		xenvif_gop_frag_copy(queue, skb, npo,
510 511 512
				     virt_to_page(data), len, offset, &head,
				     NULL,
				     0);
I
Ian Campbell 已提交
513 514 515 516
		data += len;
	}

	for (i = 0; i < nr_frags; i++) {
517 518 519
		/* This variable also signals whether foreign_gref has a real
		 * value or not.
		 */
520
		struct xenvif_queue *foreign_queue = NULL;
521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544
		grant_ref_t foreign_gref;

		if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
			(ubuf->callback == &xenvif_zerocopy_callback)) {
			const struct ubuf_info *const startpoint = ubuf;

			/* Ideally ubuf points to the chain element which
			 * belongs to this frag. Or if frags were removed from
			 * the beginning, then shortly before it.
			 */
			ubuf = xenvif_find_gref(skb, i, ubuf);

			/* Try again from the beginning of the list, if we
			 * haven't tried from there. This only makes sense in
			 * the unlikely event of reordering the original frags.
			 * For injected local pages it's an unnecessary second
			 * run.
			 */
			if (unlikely(!ubuf) && startpoint != head_ubuf)
				ubuf = xenvif_find_gref(skb, i, head_ubuf);

			if (likely(ubuf)) {
				u16 pending_idx = ubuf->desc;

545 546 547
				foreign_queue = ubuf_to_queue(ubuf);
				foreign_gref =
					foreign_queue->pending_tx_info[pending_idx].req.gref;
548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564
				/* Just a safety measure. If this was the last
				 * element on the list, the for loop will
				 * iterate again if a local page were added to
				 * the end. Using head_ubuf here prevents the
				 * second search on the chain. Or the original
				 * frags changed order, but that's less likely.
				 * In any way, ubuf shouldn't be NULL.
				 */
				ubuf = ubuf->ctx ?
					(struct ubuf_info *) ubuf->ctx :
					head_ubuf;
			} else
				/* This frag was a local page, added to the
				 * array after the skb left netback.
				 */
				ubuf = head_ubuf;
		}
565
		xenvif_gop_frag_copy(queue, skb, npo,
W
Wei Liu 已提交
566 567 568
				     skb_frag_page(&skb_shinfo(skb)->frags[i]),
				     skb_frag_size(&skb_shinfo(skb)->frags[i]),
				     skb_shinfo(skb)->frags[i].page_offset,
569
				     &head,
570 571
				     foreign_queue,
				     foreign_queue ? foreign_gref : UINT_MAX);
I
Ian Campbell 已提交
572 573 574 575 576 577
	}

	return npo->meta_prod - old_meta_prod;
}

/*
W
Wei Liu 已提交
578
 * This is a twin to xenvif_gop_skb.  Assume that xenvif_gop_skb was
I
Ian Campbell 已提交
579 580 581 582
 * used to set up the operations on the top of
 * netrx_pending_operations, which have since been done.  Check that
 * they didn't give any errors and advance over them.
 */
W
Wei Liu 已提交
583 584
static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
			    struct netrx_pending_operations *npo)
I
Ian Campbell 已提交
585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602
{
	struct gnttab_copy     *copy_op;
	int status = XEN_NETIF_RSP_OKAY;
	int i;

	for (i = 0; i < nr_meta_slots; i++) {
		copy_op = npo->copy + npo->copy_cons++;
		if (copy_op->status != GNTST_okay) {
			netdev_dbg(vif->dev,
				   "Bad status %d from copy to DOM%d.\n",
				   copy_op->status, vif->domid);
			status = XEN_NETIF_RSP_ERROR;
		}
	}

	return status;
}

603
static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status,
W
Wei Liu 已提交
604 605
				      struct xenvif_rx_meta *meta,
				      int nr_meta_slots)
I
Ian Campbell 已提交
606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623
{
	int i;
	unsigned long offset;

	/* No fragments used */
	if (nr_meta_slots <= 1)
		return;

	nr_meta_slots--;

	for (i = 0; i < nr_meta_slots; i++) {
		int flags;
		if (i == nr_meta_slots - 1)
			flags = 0;
		else
			flags = XEN_NETRXF_more_data;

		offset = 0;
624
		make_rx_response(queue, meta[i].id, status, offset,
I
Ian Campbell 已提交
625 626 627 628
				 meta[i].size, flags);
	}
}

629
void xenvif_kick_thread(struct xenvif_queue *queue)
630
{
631
	wake_up(&queue->wq);
632 633
}

634
static void xenvif_rx_action(struct xenvif_queue *queue)
I
Ian Campbell 已提交
635 636
{
	s8 status;
637
	u16 flags;
I
Ian Campbell 已提交
638 639 640 641 642 643
	struct xen_netif_rx_response *resp;
	struct sk_buff_head rxq;
	struct sk_buff *skb;
	LIST_HEAD(notify);
	int ret;
	unsigned long offset;
644
	bool need_to_notify = false;
I
Ian Campbell 已提交
645 646

	struct netrx_pending_operations npo = {
647 648
		.copy  = queue->grant_copy_op,
		.meta  = queue->meta,
I
Ian Campbell 已提交
649 650 651 652
	};

	skb_queue_head_init(&rxq);

653 654
	while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)
	       && (skb = xenvif_rx_dequeue(queue)) != NULL) {
655
		RING_IDX max_slots_needed;
656 657
		RING_IDX old_req_cons;
		RING_IDX ring_slots_used;
658 659
		int i;

660 661
		queue->last_rx_time = jiffies;

662 663 664 665 666 667 668 669 670
		/* We need a cheap worse case estimate for the number of
		 * slots we'll use.
		 */

		max_slots_needed = DIV_ROUND_UP(offset_in_page(skb->data) +
						skb_headlen(skb),
						PAGE_SIZE);
		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
			unsigned int size;
671 672
			unsigned int offset;

673
			size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
674 675 676 677 678 679 680 681 682
			offset = skb_shinfo(skb)->frags[i].page_offset;

			/* For a worse-case estimate we need to factor in
			 * the fragment page offset as this will affect the
			 * number of times xenvif_gop_frag_copy() will
			 * call start_new_rx_buffer().
			 */
			max_slots_needed += DIV_ROUND_UP(offset + size,
							 PAGE_SIZE);
683
		}
684 685 686

		/* To avoid the estimate becoming too pessimal for some
		 * frontends that limit posted rx requests, cap the estimate
687 688
		 * at MAX_SKB_FRAGS. In this case netback will fully coalesce
		 * the skb into the provided slots.
689
		 */
690
		if (max_slots_needed > MAX_SKB_FRAGS) {
691
			max_slots_needed = MAX_SKB_FRAGS;
692 693 694 695
			XENVIF_RX_CB(skb)->full_coalesce = true;
		} else {
			XENVIF_RX_CB(skb)->full_coalesce = false;
		}
696 697

		/* We may need one more slot for GSO metadata */
698 699 700
		if (skb_is_gso(skb) &&
		   (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
		    skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
701 702
			max_slots_needed++;

703 704 705
		old_req_cons = queue->rx.req_cons;
		XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
		ring_slots_used = queue->rx.req_cons - old_req_cons;
706 707

		BUG_ON(ring_slots_used > max_slots_needed);
I
Ian Campbell 已提交
708 709 710 711

		__skb_queue_tail(&rxq, skb);
	}

712
	BUG_ON(npo.meta_prod > ARRAY_SIZE(queue->meta));
I
Ian Campbell 已提交
713 714

	if (!npo.copy_prod)
715
		goto done;
I
Ian Campbell 已提交
716

717
	BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
718
	gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
I
Ian Campbell 已提交
719 720 721

	while ((skb = __skb_dequeue(&rxq)) != NULL) {

722 723 724 725
		if ((1 << queue->meta[npo.meta_cons].gso_type) &
		    queue->vif->gso_prefix_mask) {
			resp = RING_GET_RESPONSE(&queue->rx,
						 queue->rx.rsp_prod_pvt++);
I
Ian Campbell 已提交
726 727 728

			resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;

729 730
			resp->offset = queue->meta[npo.meta_cons].gso_size;
			resp->id = queue->meta[npo.meta_cons].id;
731
			resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
I
Ian Campbell 已提交
732 733

			npo.meta_cons++;
734
			XENVIF_RX_CB(skb)->meta_slots_used--;
I
Ian Campbell 已提交
735 736 737
		}


738 739
		queue->stats.tx_bytes += skb->len;
		queue->stats.tx_packets++;
I
Ian Campbell 已提交
740

741
		status = xenvif_check_gop(queue->vif,
742 743
					  XENVIF_RX_CB(skb)->meta_slots_used,
					  &npo);
I
Ian Campbell 已提交
744

745
		if (XENVIF_RX_CB(skb)->meta_slots_used == 1)
I
Ian Campbell 已提交
746 747 748 749 750 751 752 753 754 755 756
			flags = 0;
		else
			flags = XEN_NETRXF_more_data;

		if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
			flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
		else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
			/* remote but checksummed. */
			flags |= XEN_NETRXF_data_validated;

		offset = 0;
757
		resp = make_rx_response(queue, queue->meta[npo.meta_cons].id,
I
Ian Campbell 已提交
758
					status, offset,
759
					queue->meta[npo.meta_cons].size,
I
Ian Campbell 已提交
760 761
					flags);

762 763
		if ((1 << queue->meta[npo.meta_cons].gso_type) &
		    queue->vif->gso_mask) {
I
Ian Campbell 已提交
764 765
			struct xen_netif_extra_info *gso =
				(struct xen_netif_extra_info *)
766 767
				RING_GET_RESPONSE(&queue->rx,
						  queue->rx.rsp_prod_pvt++);
I
Ian Campbell 已提交
768 769 770

			resp->flags |= XEN_NETRXF_extra_info;

771 772
			gso->u.gso.type = queue->meta[npo.meta_cons].gso_type;
			gso->u.gso.size = queue->meta[npo.meta_cons].gso_size;
I
Ian Campbell 已提交
773 774 775 776 777 778 779
			gso->u.gso.pad = 0;
			gso->u.gso.features = 0;

			gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
			gso->flags = 0;
		}

780 781
		xenvif_add_frag_responses(queue, status,
					  queue->meta + npo.meta_cons + 1,
782
					  XENVIF_RX_CB(skb)->meta_slots_used);
I
Ian Campbell 已提交
783

784
		RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, ret);
I
Ian Campbell 已提交
785

786
		need_to_notify |= !!ret;
787

788
		npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used;
I
Ian Campbell 已提交
789 790 791
		dev_kfree_skb(skb);
	}

792
done:
793
	if (need_to_notify)
794
		notify_remote_via_irq(queue->rx_irq);
I
Ian Campbell 已提交
795 796
}

797
void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
I
Ian Campbell 已提交
798 799 800
{
	int more_to_do;

801
	RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
I
Ian Campbell 已提交
802 803

	if (more_to_do)
804
		napi_schedule(&queue->napi);
I
Ian Campbell 已提交
805 806
}

807
static void tx_add_credit(struct xenvif_queue *queue)
I
Ian Campbell 已提交
808 809 810 811 812 813 814
{
	unsigned long max_burst, max_credit;

	/*
	 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
	 * Otherwise the interface can seize up due to insufficient credit.
	 */
815
	max_burst = RING_GET_REQUEST(&queue->tx, queue->tx.req_cons)->size;
I
Ian Campbell 已提交
816
	max_burst = min(max_burst, 131072UL);
817
	max_burst = max(max_burst, queue->credit_bytes);
I
Ian Campbell 已提交
818 819

	/* Take care that adding a new chunk of credit doesn't wrap to zero. */
820 821
	max_credit = queue->remaining_credit + queue->credit_bytes;
	if (max_credit < queue->remaining_credit)
I
Ian Campbell 已提交
822 823
		max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */

824
	queue->remaining_credit = min(max_credit, max_burst);
I
Ian Campbell 已提交
825 826 827 828
}

static void tx_credit_callback(unsigned long data)
{
829 830 831
	struct xenvif_queue *queue = (struct xenvif_queue *)data;
	tx_add_credit(queue);
	xenvif_napi_schedule_or_enable_events(queue);
I
Ian Campbell 已提交
832 833
}

834
static void xenvif_tx_err(struct xenvif_queue *queue,
W
Wei Liu 已提交
835
			  struct xen_netif_tx_request *txp, RING_IDX end)
I
Ian Campbell 已提交
836
{
837
	RING_IDX cons = queue->tx.req_cons;
838
	unsigned long flags;
I
Ian Campbell 已提交
839 840

	do {
841 842 843
		spin_lock_irqsave(&queue->response_lock, flags);
		make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
		spin_unlock_irqrestore(&queue->response_lock, flags);
844
		if (cons == end)
I
Ian Campbell 已提交
845
			break;
846
		txp = RING_GET_REQUEST(&queue->tx, cons++);
I
Ian Campbell 已提交
847
	} while (1);
848
	queue->tx.req_cons = cons;
I
Ian Campbell 已提交
849 850
}

W
Wei Liu 已提交
851
static void xenvif_fatal_tx_err(struct xenvif *vif)
852 853
{
	netdev_err(vif->dev, "fatal error; disabling device\n");
854
	vif->disabled = true;
855 856 857
	/* Disable the vif from queue 0's kthread */
	if (vif->queues)
		xenvif_kick_thread(&vif->queues[0]);
858 859
}

860
static int xenvif_count_requests(struct xenvif_queue *queue,
W
Wei Liu 已提交
861 862 863
				 struct xen_netif_tx_request *first,
				 struct xen_netif_tx_request *txp,
				 int work_to_do)
I
Ian Campbell 已提交
864
{
865
	RING_IDX cons = queue->tx.req_cons;
866 867
	int slots = 0;
	int drop_err = 0;
868
	int more_data;
I
Ian Campbell 已提交
869 870 871 872 873

	if (!(first->flags & XEN_NETTXF_more_data))
		return 0;

	do {
874 875
		struct xen_netif_tx_request dropped_tx = { 0 };

876
		if (slots >= work_to_do) {
877
			netdev_err(queue->vif->dev,
878 879
				   "Asked for %d slots but exceeds this limit\n",
				   work_to_do);
880
			xenvif_fatal_tx_err(queue->vif);
881
			return -ENODATA;
I
Ian Campbell 已提交
882 883
		}

884 885 886
		/* This guest is really using too many slots and
		 * considered malicious.
		 */
887
		if (unlikely(slots >= fatal_skb_slots)) {
888
			netdev_err(queue->vif->dev,
889
				   "Malicious frontend using %d slots, threshold %u\n",
890
				   slots, fatal_skb_slots);
891
			xenvif_fatal_tx_err(queue->vif);
892
			return -E2BIG;
I
Ian Campbell 已提交
893 894
		}

895
		/* Xen network protocol had implicit dependency on
896 897 898 899 900
		 * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
		 * the historical MAX_SKB_FRAGS value 18 to honor the
		 * same behavior as before. Any packet using more than
		 * 18 slots but less than fatal_skb_slots slots is
		 * dropped
901
		 */
902
		if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
903
			if (net_ratelimit())
904
				netdev_dbg(queue->vif->dev,
905
					   "Too many slots (%d) exceeding limit (%d), dropping packet\n",
906
					   slots, XEN_NETBK_LEGACY_SLOTS_MAX);
907 908 909
			drop_err = -E2BIG;
		}

910 911 912
		if (drop_err)
			txp = &dropped_tx;

913
		memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots),
I
Ian Campbell 已提交
914
		       sizeof(*txp));
915 916 917 918 919 920 921 922 923 924 925 926

		/* If the guest submitted a frame >= 64 KiB then
		 * first->size overflowed and following slots will
		 * appear to be larger than the frame.
		 *
		 * This cannot be fatal error as there are buggy
		 * frontends that do this.
		 *
		 * Consume all slots and drop the packet.
		 */
		if (!drop_err && txp->size > first->size) {
			if (net_ratelimit())
927
				netdev_dbg(queue->vif->dev,
928 929 930
					   "Invalid tx request, slot size %u > remaining size %u\n",
					   txp->size, first->size);
			drop_err = -EIO;
I
Ian Campbell 已提交
931 932 933
		}

		first->size -= txp->size;
934
		slots++;
I
Ian Campbell 已提交
935 936

		if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
937
			netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
I
Ian Campbell 已提交
938
				 txp->offset, txp->size);
939
			xenvif_fatal_tx_err(queue->vif);
940
			return -EINVAL;
I
Ian Campbell 已提交
941
		}
942 943 944 945 946 947 948

		more_data = txp->flags & XEN_NETTXF_more_data;

		if (!drop_err)
			txp++;

	} while (more_data);
949 950

	if (drop_err) {
951
		xenvif_tx_err(queue, first, cons + slots);
952 953 954 955
		return drop_err;
	}

	return slots;
I
Ian Campbell 已提交
956 957
}

958 959 960 961 962 963 964

struct xenvif_tx_cb {
	u16 pending_idx;
};

#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)

965
static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
Z
Zoltan Kiss 已提交
966 967 968
					  u16 pending_idx,
					  struct xen_netif_tx_request *txp,
					  struct gnttab_map_grant_ref *mop)
969
{
970 971
	queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
	gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
972
			  GNTMAP_host_map | GNTMAP_readonly,
973
			  txp->gref, queue->vif->domid);
974

975
	memcpy(&queue->pending_tx_info[pending_idx].req, txp,
976 977 978
	       sizeof(*txp));
}

979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995
static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
{
	struct sk_buff *skb =
		alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
			  GFP_ATOMIC | __GFP_NOWARN);
	if (unlikely(skb == NULL))
		return NULL;

	/* Packets passed to netif_rx() must have some headroom. */
	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);

	/* Initialize it here to avoid later surprises */
	skb_shinfo(skb)->destructor_arg = NULL;

	return skb;
}

996
static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
997 998 999
							struct sk_buff *skb,
							struct xen_netif_tx_request *txp,
							struct gnttab_map_grant_ref *gop)
I
Ian Campbell 已提交
1000 1001 1002
{
	struct skb_shared_info *shinfo = skb_shinfo(skb);
	skb_frag_t *frags = shinfo->frags;
1003
	u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1004 1005
	int start;
	pending_ring_idx_t index;
1006
	unsigned int nr_slots, frag_overflow = 0;
1007 1008

	/* At this point shinfo->nr_frags is in fact the number of
1009
	 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
1010
	 */
1011 1012 1013 1014 1015
	if (shinfo->nr_frags > MAX_SKB_FRAGS) {
		frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS;
		BUG_ON(frag_overflow > MAX_SKB_FRAGS);
		shinfo->nr_frags = MAX_SKB_FRAGS;
	}
1016
	nr_slots = shinfo->nr_frags;
I
Ian Campbell 已提交
1017 1018

	/* Skip first skb fragment if it is on same page as header fragment. */
1019
	start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
I
Ian Campbell 已提交
1020

1021 1022
	for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
	     shinfo->nr_frags++, txp++, gop++) {
1023 1024 1025
		index = pending_index(queue->pending_cons++);
		pending_idx = queue->pending_ring[index];
		xenvif_tx_create_map_op(queue, pending_idx, txp, gop);
1026
		frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
I
Ian Campbell 已提交
1027 1028
	}

1029 1030 1031 1032
	if (frag_overflow) {
		struct sk_buff *nskb = xenvif_alloc_skb(0);
		if (unlikely(nskb == NULL)) {
			if (net_ratelimit())
1033
				netdev_err(queue->vif->dev,
1034 1035 1036 1037 1038 1039 1040 1041 1042
					   "Can't allocate the frag_list skb.\n");
			return NULL;
		}

		shinfo = skb_shinfo(nskb);
		frags = shinfo->frags;

		for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
		     shinfo->nr_frags++, txp++, gop++) {
1043 1044 1045
			index = pending_index(queue->pending_cons++);
			pending_idx = queue->pending_ring[index];
			xenvif_tx_create_map_op(queue, pending_idx, txp, gop);
1046 1047 1048 1049 1050 1051
			frag_set_pending_idx(&frags[shinfo->nr_frags],
					     pending_idx);
		}

		skb_shinfo(skb)->frag_list = nskb;
	}
1052

I
Ian Campbell 已提交
1053 1054 1055
	return gop;
}

1056
static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
1057 1058 1059
					   u16 pending_idx,
					   grant_handle_t handle)
{
1060
	if (unlikely(queue->grant_tx_handle[pending_idx] !=
1061
		     NETBACK_INVALID_HANDLE)) {
1062
		netdev_err(queue->vif->dev,
1063 1064 1065 1066
			   "Trying to overwrite active handle! pending_idx: %x\n",
			   pending_idx);
		BUG();
	}
1067
	queue->grant_tx_handle[pending_idx] = handle;
1068 1069
}

1070
static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
1071 1072
					     u16 pending_idx)
{
1073
	if (unlikely(queue->grant_tx_handle[pending_idx] ==
1074
		     NETBACK_INVALID_HANDLE)) {
1075
		netdev_err(queue->vif->dev,
1076 1077 1078 1079
			   "Trying to unmap invalid handle! pending_idx: %x\n",
			   pending_idx);
		BUG();
	}
1080
	queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
1081 1082
}

1083
static int xenvif_tx_check_gop(struct xenvif_queue *queue,
W
Wei Liu 已提交
1084
			       struct sk_buff *skb,
1085 1086
			       struct gnttab_map_grant_ref **gopp_map,
			       struct gnttab_copy **gopp_copy)
I
Ian Campbell 已提交
1087
{
Z
Zoltan Kiss 已提交
1088
	struct gnttab_map_grant_ref *gop_map = *gopp_map;
1089
	u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1090 1091 1092
	/* This always points to the shinfo of the skb being checked, which
	 * could be either the first or the one on the frag_list
	 */
I
Ian Campbell 已提交
1093
	struct skb_shared_info *shinfo = skb_shinfo(skb);
1094 1095 1096 1097
	/* If this is non-NULL, we are currently checking the frag_list skb, and
	 * this points to the shinfo of the first one
	 */
	struct skb_shared_info *first_shinfo = NULL;
I
Ian Campbell 已提交
1098
	int nr_frags = shinfo->nr_frags;
1099 1100
	const bool sharedslot = nr_frags &&
				frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
1101
	int i, err;
I
Ian Campbell 已提交
1102 1103

	/* Check status of header. */
1104 1105 1106
	err = (*gopp_copy)->status;
	if (unlikely(err)) {
		if (net_ratelimit())
1107
			netdev_dbg(queue->vif->dev,
1108
				   "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
1109 1110 1111
				   (*gopp_copy)->status,
				   pending_idx,
				   (*gopp_copy)->source.u.ref);
1112 1113 1114 1115
		/* The first frag might still have this slot mapped */
		if (!sharedslot)
			xenvif_idx_release(queue, pending_idx,
					   XEN_NETIF_RSP_ERROR);
1116
	}
1117
	(*gopp_copy)++;
I
Ian Campbell 已提交
1118

1119
check_frags:
1120
	for (i = 0; i < nr_frags; i++, gop_map++) {
I
Ian Campbell 已提交
1121 1122
		int j, newerr;

1123
		pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
I
Ian Campbell 已提交
1124 1125

		/* Check error status: if okay then remember grant handle. */
1126
		newerr = gop_map->status;
1127

I
Ian Campbell 已提交
1128
		if (likely(!newerr)) {
1129
			xenvif_grant_handle_set(queue,
Z
Zoltan Kiss 已提交
1130 1131
						pending_idx,
						gop_map->handle);
I
Ian Campbell 已提交
1132
			/* Had a previous error? Invalidate this fragment. */
1133
			if (unlikely(err)) {
1134
				xenvif_idx_unmap(queue, pending_idx);
1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145
				/* If the mapping of the first frag was OK, but
				 * the header's copy failed, and they are
				 * sharing a slot, send an error
				 */
				if (i == 0 && sharedslot)
					xenvif_idx_release(queue, pending_idx,
							   XEN_NETIF_RSP_ERROR);
				else
					xenvif_idx_release(queue, pending_idx,
							   XEN_NETIF_RSP_OKAY);
			}
I
Ian Campbell 已提交
1146 1147 1148 1149
			continue;
		}

		/* Error on this fragment: respond to client with an error. */
1150
		if (net_ratelimit())
1151
			netdev_dbg(queue->vif->dev,
1152
				   "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
1153 1154 1155 1156
				   i,
				   gop_map->status,
				   pending_idx,
				   gop_map->ref);
1157

1158
		xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
I
Ian Campbell 已提交
1159 1160 1161 1162

		/* Not the first error? Preceding frags already invalidated. */
		if (err)
			continue;
1163 1164 1165 1166 1167 1168 1169 1170 1171 1172

		/* First error: if the header haven't shared a slot with the
		 * first frag, release it as well.
		 */
		if (!sharedslot)
			xenvif_idx_release(queue,
					   XENVIF_TX_CB(skb)->pending_idx,
					   XEN_NETIF_RSP_OKAY);

		/* Invalidate preceding fragments of this skb. */
1173
		for (j = 0; j < i; j++) {
1174
			pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1175
			xenvif_idx_unmap(queue, pending_idx);
1176 1177
			xenvif_idx_release(queue, pending_idx,
					   XEN_NETIF_RSP_OKAY);
I
Ian Campbell 已提交
1178 1179
		}

1180 1181 1182 1183 1184 1185 1186
		/* And if we found the error while checking the frag_list, unmap
		 * the first skb's frags
		 */
		if (first_shinfo) {
			for (j = 0; j < first_shinfo->nr_frags; j++) {
				pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
				xenvif_idx_unmap(queue, pending_idx);
1187 1188
				xenvif_idx_release(queue, pending_idx,
						   XEN_NETIF_RSP_OKAY);
1189
			}
I
Ian Campbell 已提交
1190 1191 1192 1193 1194 1195
		}

		/* Remember the error: invalidate all subsequent fragments. */
		err = newerr;
	}

1196 1197 1198
	if (skb_has_frag_list(skb) && !first_shinfo) {
		first_shinfo = skb_shinfo(skb);
		shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
1199 1200 1201 1202 1203
		nr_frags = shinfo->nr_frags;

		goto check_frags;
	}

1204
	*gopp_map = gop_map;
I
Ian Campbell 已提交
1205 1206 1207
	return err;
}

1208
static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
I
Ian Campbell 已提交
1209 1210 1211 1212
{
	struct skb_shared_info *shinfo = skb_shinfo(skb);
	int nr_frags = shinfo->nr_frags;
	int i;
1213 1214
	u16 prev_pending_idx = INVALID_PENDING_IDX;

I
Ian Campbell 已提交
1215 1216 1217
	for (i = 0; i < nr_frags; i++) {
		skb_frag_t *frag = shinfo->frags + i;
		struct xen_netif_tx_request *txp;
1218 1219
		struct page *page;
		u16 pending_idx;
I
Ian Campbell 已提交
1220

1221
		pending_idx = frag_get_pending_idx(frag);
I
Ian Campbell 已提交
1222

1223
		/* If this is not the first frag, chain it to the previous*/
1224
		if (prev_pending_idx == INVALID_PENDING_IDX)
1225
			skb_shinfo(skb)->destructor_arg =
1226
				&callback_param(queue, pending_idx);
1227
		else
1228 1229
			callback_param(queue, prev_pending_idx).ctx =
				&callback_param(queue, pending_idx);
1230

1231
		callback_param(queue, pending_idx).ctx = NULL;
1232 1233
		prev_pending_idx = pending_idx;

1234 1235
		txp = &queue->pending_tx_info[pending_idx].req;
		page = virt_to_page(idx_to_kaddr(queue, pending_idx));
1236
		__skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
I
Ian Campbell 已提交
1237 1238 1239 1240
		skb->len += txp->size;
		skb->data_len += txp->size;
		skb->truesize += txp->size;

1241
		/* Take an extra reference to offset network stack's put_page */
1242
		get_page(queue->mmap_pages[pending_idx]);
I
Ian Campbell 已提交
1243 1244 1245
	}
}

1246
static int xenvif_get_extras(struct xenvif_queue *queue,
I
Ian Campbell 已提交
1247 1248 1249 1250
				struct xen_netif_extra_info *extras,
				int work_to_do)
{
	struct xen_netif_extra_info extra;
1251
	RING_IDX cons = queue->tx.req_cons;
I
Ian Campbell 已提交
1252 1253 1254

	do {
		if (unlikely(work_to_do-- <= 0)) {
1255 1256
			netdev_err(queue->vif->dev, "Missing extra info\n");
			xenvif_fatal_tx_err(queue->vif);
I
Ian Campbell 已提交
1257 1258 1259
			return -EBADR;
		}

1260
		memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons),
I
Ian Campbell 已提交
1261 1262 1263
		       sizeof(extra));
		if (unlikely(!extra.type ||
			     extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1264 1265
			queue->tx.req_cons = ++cons;
			netdev_err(queue->vif->dev,
I
Ian Campbell 已提交
1266
				   "Invalid extra type: %d\n", extra.type);
1267
			xenvif_fatal_tx_err(queue->vif);
I
Ian Campbell 已提交
1268 1269 1270 1271
			return -EINVAL;
		}

		memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
1272
		queue->tx.req_cons = ++cons;
I
Ian Campbell 已提交
1273 1274 1275 1276 1277
	} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);

	return work_to_do;
}

W
Wei Liu 已提交
1278 1279 1280
static int xenvif_set_skb_gso(struct xenvif *vif,
			      struct sk_buff *skb,
			      struct xen_netif_extra_info *gso)
I
Ian Campbell 已提交
1281 1282
{
	if (!gso->u.gso.size) {
1283
		netdev_err(vif->dev, "GSO size must not be zero.\n");
W
Wei Liu 已提交
1284
		xenvif_fatal_tx_err(vif);
I
Ian Campbell 已提交
1285 1286 1287
		return -EINVAL;
	}

1288 1289 1290 1291 1292 1293 1294 1295
	switch (gso->u.gso.type) {
	case XEN_NETIF_GSO_TYPE_TCPV4:
		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
		break;
	case XEN_NETIF_GSO_TYPE_TCPV6:
		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
		break;
	default:
1296
		netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
W
Wei Liu 已提交
1297
		xenvif_fatal_tx_err(vif);
I
Ian Campbell 已提交
1298 1299 1300 1301
		return -EINVAL;
	}

	skb_shinfo(skb)->gso_size = gso->u.gso.size;
1302
	/* gso_segs will be calculated later */
I
Ian Campbell 已提交
1303 1304 1305 1306

	return 0;
}

1307
static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
1308
{
1309
	bool recalculate_partial_csum = false;
1310 1311 1312 1313 1314 1315 1316

	/* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
	 * peers can fail to set NETRXF_csum_blank when sending a GSO
	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
	 * recalculate the partial checksum.
	 */
	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1317
		queue->stats.rx_gso_checksum_fixup++;
1318
		skb->ip_summed = CHECKSUM_PARTIAL;
1319
		recalculate_partial_csum = true;
1320 1321 1322 1323 1324 1325
	}

	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
	if (skb->ip_summed != CHECKSUM_PARTIAL)
		return 0;

1326
	return skb_checksum_setup(skb, recalculate_partial_csum);
1327 1328
}

1329
static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
I
Ian Campbell 已提交
1330
{
1331
	u64 now = get_jiffies_64();
1332 1333
	u64 next_credit = queue->credit_window_start +
		msecs_to_jiffies(queue->credit_usec / 1000);
I
Ian Campbell 已提交
1334 1335

	/* Timer could already be pending in rare cases. */
1336
	if (timer_pending(&queue->credit_timeout))
I
Ian Campbell 已提交
1337 1338 1339
		return true;

	/* Passed the point where we can replenish credit? */
1340
	if (time_after_eq64(now, next_credit)) {
1341 1342
		queue->credit_window_start = now;
		tx_add_credit(queue);
I
Ian Campbell 已提交
1343 1344 1345
	}

	/* Still too big to send right now? Set a callback. */
1346 1347 1348 1349
	if (size > queue->remaining_credit) {
		queue->credit_timeout.data     =
			(unsigned long)queue;
		queue->credit_timeout.function =
I
Ian Campbell 已提交
1350
			tx_credit_callback;
1351
		mod_timer(&queue->credit_timeout,
I
Ian Campbell 已提交
1352
			  next_credit);
1353
		queue->credit_window_start = next_credit;
I
Ian Campbell 已提交
1354 1355 1356 1357 1358 1359 1360

		return true;
	}

	return false;
}

1361
static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1362 1363 1364
				     int budget,
				     unsigned *copy_ops,
				     unsigned *map_ops)
I
Ian Campbell 已提交
1365
{
1366
	struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop;
I
Ian Campbell 已提交
1367 1368 1369
	struct sk_buff *skb;
	int ret;

1370
	while (skb_queue_len(&queue->tx_queue) < budget) {
I
Ian Campbell 已提交
1371
		struct xen_netif_tx_request txreq;
1372
		struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
I
Ian Campbell 已提交
1373 1374 1375 1376 1377 1378 1379
		struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
		u16 pending_idx;
		RING_IDX idx;
		int work_to_do;
		unsigned int data_len;
		pending_ring_idx_t index;

1380
		if (queue->tx.sring->req_prod - queue->tx.req_cons >
1381
		    XEN_NETIF_TX_RING_SIZE) {
1382
			netdev_err(queue->vif->dev,
1383 1384
				   "Impossible number of requests. "
				   "req_prod %d, req_cons %d, size %ld\n",
1385
				   queue->tx.sring->req_prod, queue->tx.req_cons,
1386
				   XEN_NETIF_TX_RING_SIZE);
1387
			xenvif_fatal_tx_err(queue->vif);
1388
			break;
1389 1390
		}

1391
		work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
1392 1393
		if (!work_to_do)
			break;
I
Ian Campbell 已提交
1394

1395
		idx = queue->tx.req_cons;
I
Ian Campbell 已提交
1396
		rmb(); /* Ensure that we see the request before we copy it. */
1397
		memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx), sizeof(txreq));
I
Ian Campbell 已提交
1398 1399

		/* Credit-based scheduling. */
1400 1401
		if (txreq.size > queue->remaining_credit &&
		    tx_credit_exceeded(queue, txreq.size))
1402
			break;
I
Ian Campbell 已提交
1403

1404
		queue->remaining_credit -= txreq.size;
I
Ian Campbell 已提交
1405 1406

		work_to_do--;
1407
		queue->tx.req_cons = ++idx;
I
Ian Campbell 已提交
1408 1409 1410

		memset(extras, 0, sizeof(extras));
		if (txreq.flags & XEN_NETTXF_extra_info) {
1411
			work_to_do = xenvif_get_extras(queue, extras,
W
Wei Liu 已提交
1412
						       work_to_do);
1413
			idx = queue->tx.req_cons;
1414
			if (unlikely(work_to_do < 0))
1415
				break;
I
Ian Campbell 已提交
1416 1417
		}

1418
		ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do);
1419
		if (unlikely(ret < 0))
1420
			break;
1421

I
Ian Campbell 已提交
1422 1423 1424
		idx += ret;

		if (unlikely(txreq.size < ETH_HLEN)) {
1425
			netdev_dbg(queue->vif->dev,
I
Ian Campbell 已提交
1426
				   "Bad packet size: %d\n", txreq.size);
1427
			xenvif_tx_err(queue, &txreq, idx);
1428
			break;
I
Ian Campbell 已提交
1429 1430 1431 1432
		}

		/* No crossing a page as the payload mustn't fragment. */
		if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
1433
			netdev_err(queue->vif->dev,
I
Ian Campbell 已提交
1434 1435 1436
				   "txreq.offset: %x, size: %u, end: %lu\n",
				   txreq.offset, txreq.size,
				   (txreq.offset&~PAGE_MASK) + txreq.size);
1437
			xenvif_fatal_tx_err(queue->vif);
1438
			break;
I
Ian Campbell 已提交
1439 1440
		}

1441 1442
		index = pending_index(queue->pending_cons);
		pending_idx = queue->pending_ring[index];
I
Ian Campbell 已提交
1443

1444
		data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
1445
			    ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
1446
			XEN_NETBACK_TX_COPY_LEN : txreq.size;
I
Ian Campbell 已提交
1447

1448
		skb = xenvif_alloc_skb(data_len);
I
Ian Campbell 已提交
1449
		if (unlikely(skb == NULL)) {
1450
			netdev_dbg(queue->vif->dev,
I
Ian Campbell 已提交
1451
				   "Can't allocate a skb in start_xmit.\n");
1452
			xenvif_tx_err(queue, &txreq, idx);
I
Ian Campbell 已提交
1453 1454 1455 1456 1457 1458 1459
			break;
		}

		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
			struct xen_netif_extra_info *gso;
			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];

1460
			if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
W
Wei Liu 已提交
1461
				/* Failure in xenvif_set_skb_gso is fatal. */
I
Ian Campbell 已提交
1462
				kfree_skb(skb);
1463
				break;
I
Ian Campbell 已提交
1464 1465 1466
			}
		}

1467
		XENVIF_TX_CB(skb)->pending_idx = pending_idx;
I
Ian Campbell 已提交
1468 1469

		__skb_put(skb, data_len);
1470 1471 1472
		queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
		queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
		queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
1473

1474
		queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
1475
			virt_to_mfn(skb->data);
1476 1477
		queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
		queue->tx_copy_ops[*copy_ops].dest.offset =
1478 1479
			offset_in_page(skb->data);

1480 1481
		queue->tx_copy_ops[*copy_ops].len = data_len;
		queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
1482 1483

		(*copy_ops)++;
I
Ian Campbell 已提交
1484 1485 1486 1487

		skb_shinfo(skb)->nr_frags = ret;
		if (data_len < txreq.size) {
			skb_shinfo(skb)->nr_frags++;
1488 1489
			frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
					     pending_idx);
1490
			xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop);
1491
			gop++;
I
Ian Campbell 已提交
1492
		} else {
1493 1494
			frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
					     INVALID_PENDING_IDX);
1495
			memcpy(&queue->pending_tx_info[pending_idx].req, &txreq,
1496
			       sizeof(txreq));
I
Ian Campbell 已提交
1497 1498
		}

1499
		queue->pending_cons++;
I
Ian Campbell 已提交
1500

1501
		request_gop = xenvif_get_requests(queue, skb, txfrags, gop);
I
Ian Campbell 已提交
1502 1503
		if (request_gop == NULL) {
			kfree_skb(skb);
1504
			xenvif_tx_err(queue, &txreq, idx);
1505
			break;
I
Ian Campbell 已提交
1506 1507 1508
		}
		gop = request_gop;

1509
		__skb_queue_tail(&queue->tx_queue, skb);
1510

1511
		queue->tx.req_cons = idx;
I
Ian Campbell 已提交
1512

1513 1514
		if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
		    (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
I
Ian Campbell 已提交
1515 1516 1517
			break;
	}

1518
	(*map_ops) = gop - queue->tx_map_ops;
1519
	return;
I
Ian Campbell 已提交
1520 1521
}

1522 1523 1524
/* Consolidate skb with a frag_list into a brand new one with local pages on
 * frags. Returns 0 or -ENOMEM if can't allocate new pages.
 */
1525
static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
1526 1527 1528 1529 1530 1531 1532
{
	unsigned int offset = skb_headlen(skb);
	skb_frag_t frags[MAX_SKB_FRAGS];
	int i;
	struct ubuf_info *uarg;
	struct sk_buff *nskb = skb_shinfo(skb)->frag_list;

1533 1534
	queue->stats.tx_zerocopy_sent += 2;
	queue->stats.tx_frag_overflow++;
1535

1536
	xenvif_fill_frags(queue, nskb);
1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547
	/* Subtract frags size, we will correct it later */
	skb->truesize -= skb->data_len;
	skb->len += nskb->len;
	skb->data_len += nskb->len;

	/* create a brand new frags array and coalesce there */
	for (i = 0; offset < skb->len; i++) {
		struct page *page;
		unsigned int len;

		BUG_ON(i >= MAX_SKB_FRAGS);
Z
Zoltan Kiss 已提交
1548
		page = alloc_page(GFP_ATOMIC);
1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578
		if (!page) {
			int j;
			skb->truesize += skb->data_len;
			for (j = 0; j < i; j++)
				put_page(frags[j].page.p);
			return -ENOMEM;
		}

		if (offset + PAGE_SIZE < skb->len)
			len = PAGE_SIZE;
		else
			len = skb->len - offset;
		if (skb_copy_bits(skb, offset, page_address(page), len))
			BUG();

		offset += len;
		frags[i].page.p = page;
		frags[i].page_offset = 0;
		skb_frag_size_set(&frags[i], len);
	}
	/* swap out with old one */
	memcpy(skb_shinfo(skb)->frags,
	       frags,
	       i * sizeof(skb_frag_t));
	skb_shinfo(skb)->nr_frags = i;
	skb->truesize += i * PAGE_SIZE;

	/* remove traces of mapped pages and frag_list */
	skb_frag_list_init(skb);
	uarg = skb_shinfo(skb)->destructor_arg;
1579 1580
	/* increase inflight counter to offset decrement in callback */
	atomic_inc(&queue->inflight_packets);
1581 1582 1583
	uarg->callback(uarg, true);
	skb_shinfo(skb)->destructor_arg = NULL;

1584
	xenvif_skb_zerocopy_prepare(queue, nskb);
1585 1586 1587 1588
	kfree_skb(nskb);

	return 0;
}
1589

1590
static int xenvif_tx_submit(struct xenvif_queue *queue)
I
Ian Campbell 已提交
1591
{
1592 1593
	struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
	struct gnttab_copy *gop_copy = queue->tx_copy_ops;
I
Ian Campbell 已提交
1594
	struct sk_buff *skb;
1595
	int work_done = 0;
I
Ian Campbell 已提交
1596

1597
	while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
I
Ian Campbell 已提交
1598 1599 1600 1601
		struct xen_netif_tx_request *txp;
		u16 pending_idx;
		unsigned data_len;

1602
		pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1603
		txp = &queue->pending_tx_info[pending_idx].req;
I
Ian Campbell 已提交
1604 1605

		/* Check the remap error code. */
1606
		if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
1607 1608 1609 1610
			/* If there was an error, xenvif_tx_check_gop is
			 * expected to release all the frags which were mapped,
			 * so kfree_skb shouldn't do it again
			 */
I
Ian Campbell 已提交
1611
			skb_shinfo(skb)->nr_frags = 0;
1612 1613 1614 1615 1616
			if (skb_has_frag_list(skb)) {
				struct sk_buff *nskb =
						skb_shinfo(skb)->frag_list;
				skb_shinfo(nskb)->nr_frags = 0;
			}
I
Ian Campbell 已提交
1617 1618 1619 1620 1621
			kfree_skb(skb);
			continue;
		}

		data_len = skb->len;
1622
		callback_param(queue, pending_idx).ctx = NULL;
I
Ian Campbell 已提交
1623 1624 1625 1626 1627 1628
		if (data_len < txp->size) {
			/* Append the packet payload as a fragment. */
			txp->offset += data_len;
			txp->size -= data_len;
		} else {
			/* Schedule a response immediately. */
1629
			xenvif_idx_release(queue, pending_idx,
1630
					   XEN_NETIF_RSP_OKAY);
I
Ian Campbell 已提交
1631 1632 1633 1634 1635 1636 1637
		}

		if (txp->flags & XEN_NETTXF_csum_blank)
			skb->ip_summed = CHECKSUM_PARTIAL;
		else if (txp->flags & XEN_NETTXF_data_validated)
			skb->ip_summed = CHECKSUM_UNNECESSARY;

1638
		xenvif_fill_frags(queue, skb);
I
Ian Campbell 已提交
1639

1640
		if (unlikely(skb_has_frag_list(skb))) {
1641
			if (xenvif_handle_frag_list(queue, skb)) {
1642
				if (net_ratelimit())
1643
					netdev_err(queue->vif->dev,
1644
						   "Not enough memory to consolidate frag_list!\n");
1645
				xenvif_skb_zerocopy_prepare(queue, skb);
1646 1647 1648 1649 1650
				kfree_skb(skb);
				continue;
			}
		}

1651
		skb->dev      = queue->vif->dev;
I
Ian Campbell 已提交
1652
		skb->protocol = eth_type_trans(skb, skb->dev);
1653
		skb_reset_network_header(skb);
I
Ian Campbell 已提交
1654

1655 1656
		if (checksum_setup(queue, skb)) {
			netdev_dbg(queue->vif->dev,
I
Ian Campbell 已提交
1657
				   "Can't setup checksum in net_tx_action\n");
1658 1659
			/* We have to set this flag to trigger the callback */
			if (skb_shinfo(skb)->destructor_arg)
1660
				xenvif_skb_zerocopy_prepare(queue, skb);
I
Ian Campbell 已提交
1661 1662 1663 1664
			kfree_skb(skb);
			continue;
		}

1665
		skb_probe_transport_header(skb, 0);
1666

1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680
		/* If the packet is GSO then we will have just set up the
		 * transport header offset in checksum_setup so it's now
		 * straightforward to calculate gso_segs.
		 */
		if (skb_is_gso(skb)) {
			int mss = skb_shinfo(skb)->gso_size;
			int hdrlen = skb_transport_header(skb) -
				skb_mac_header(skb) +
				tcp_hdrlen(skb);

			skb_shinfo(skb)->gso_segs =
				DIV_ROUND_UP(skb->len - hdrlen, mss);
		}

1681 1682
		queue->stats.rx_bytes += skb->len;
		queue->stats.rx_packets++;
I
Ian Campbell 已提交
1683

1684 1685
		work_done++;

1686 1687 1688 1689 1690
		/* Set this flag right before netif_receive_skb, otherwise
		 * someone might think this packet already left netback, and
		 * do a skb_copy_ubufs while we are still in control of the
		 * skb. E.g. the __pskb_pull_tail earlier can do such thing.
		 */
1691
		if (skb_shinfo(skb)->destructor_arg) {
1692
			xenvif_skb_zerocopy_prepare(queue, skb);
1693
			queue->stats.tx_zerocopy_sent++;
1694
		}
1695

1696
		netif_receive_skb(skb);
I
Ian Campbell 已提交
1697
	}
1698 1699

	return work_done;
I
Ian Campbell 已提交
1700 1701
}

1702 1703
void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
{
1704 1705
	unsigned long flags;
	pending_ring_idx_t index;
1706
	struct xenvif_queue *queue = ubuf_to_queue(ubuf);
1707 1708 1709 1710

	/* This is the only place where we grab this lock, to protect callbacks
	 * from each other.
	 */
1711
	spin_lock_irqsave(&queue->callback_lock, flags);
1712 1713 1714
	do {
		u16 pending_idx = ubuf->desc;
		ubuf = (struct ubuf_info *) ubuf->ctx;
1715
		BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
1716
			MAX_PENDING_REQS);
1717 1718
		index = pending_index(queue->dealloc_prod);
		queue->dealloc_ring[index] = pending_idx;
1719 1720 1721 1722
		/* Sync with xenvif_tx_dealloc_action:
		 * insert idx then incr producer.
		 */
		smp_wmb();
1723
		queue->dealloc_prod++;
1724
	} while (ubuf);
1725 1726
	wake_up(&queue->dealloc_wq);
	spin_unlock_irqrestore(&queue->callback_lock, flags);
1727

1728
	if (likely(zerocopy_success))
1729
		queue->stats.tx_zerocopy_success++;
1730
	else
1731
		queue->stats.tx_zerocopy_fail++;
1732
	xenvif_skb_zerocopy_complete(queue);
1733 1734
}

1735
static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
1736 1737 1738 1739 1740 1741
{
	struct gnttab_unmap_grant_ref *gop;
	pending_ring_idx_t dc, dp;
	u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
	unsigned int i = 0;

1742 1743
	dc = queue->dealloc_cons;
	gop = queue->tx_unmap_ops;
1744 1745 1746

	/* Free up any grants we have finished using */
	do {
1747
		dp = queue->dealloc_prod;
1748 1749 1750 1751 1752 1753 1754

		/* Ensure we see all indices enqueued by all
		 * xenvif_zerocopy_callback().
		 */
		smp_rmb();

		while (dc != dp) {
1755
			BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS);
1756
			pending_idx =
1757
				queue->dealloc_ring[pending_index(dc++)];
1758

1759
			pending_idx_release[gop-queue->tx_unmap_ops] =
1760
				pending_idx;
1761 1762
			queue->pages_to_unmap[gop-queue->tx_unmap_ops] =
				queue->mmap_pages[pending_idx];
1763
			gnttab_set_unmap_op(gop,
1764
					    idx_to_kaddr(queue, pending_idx),
1765
					    GNTMAP_host_map,
1766 1767
					    queue->grant_tx_handle[pending_idx]);
			xenvif_grant_handle_reset(queue, pending_idx);
1768 1769 1770
			++gop;
		}

1771
	} while (dp != queue->dealloc_prod);
1772

1773
	queue->dealloc_cons = dc;
1774

1775
	if (gop - queue->tx_unmap_ops > 0) {
1776
		int ret;
1777
		ret = gnttab_unmap_refs(queue->tx_unmap_ops,
1778
					NULL,
1779 1780
					queue->pages_to_unmap,
					gop - queue->tx_unmap_ops);
1781
		if (ret) {
1782 1783 1784
			netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tx ret %d\n",
				   gop - queue->tx_unmap_ops, ret);
			for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
1785
				if (gop[i].status != GNTST_okay)
1786
					netdev_err(queue->vif->dev,
1787 1788 1789 1790 1791 1792 1793 1794 1795
						   " host_addr: %llx handle: %x status: %d\n",
						   gop[i].host_addr,
						   gop[i].handle,
						   gop[i].status);
			}
			BUG();
		}
	}

1796 1797
	for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
		xenvif_idx_release(queue, pending_idx_release[i],
1798
				   XEN_NETIF_RSP_OKAY);
1799 1800
}

1801

I
Ian Campbell 已提交
1802
/* Called after netfront has transmitted */
1803
int xenvif_tx_action(struct xenvif_queue *queue, int budget)
I
Ian Campbell 已提交
1804
{
1805
	unsigned nr_mops, nr_cops = 0;
1806
	int work_done, ret;
I
Ian Campbell 已提交
1807

1808
	if (unlikely(!tx_work_todo(queue)))
1809 1810
		return 0;

1811
	xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
I
Ian Campbell 已提交
1812

1813
	if (nr_cops == 0)
1814 1815
		return 0;

1816
	gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
1817
	if (nr_mops != 0) {
1818
		ret = gnttab_map_refs(queue->tx_map_ops,
1819
				      NULL,
1820
				      queue->pages_to_map,
1821 1822 1823
				      nr_mops);
		BUG_ON(ret);
	}
I
Ian Campbell 已提交
1824

1825
	work_done = xenvif_tx_submit(queue);
I
Ian Campbell 已提交
1826

1827
	return work_done;
I
Ian Campbell 已提交
1828 1829
}

1830
static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
W
Wei Liu 已提交
1831
			       u8 status)
I
Ian Campbell 已提交
1832 1833
{
	struct pending_tx_info *pending_tx_info;
1834 1835
	pending_ring_idx_t index;
	unsigned long flags;
1836

1837 1838 1839 1840 1841
	pending_tx_info = &queue->pending_tx_info[pending_idx];
	spin_lock_irqsave(&queue->response_lock, flags);
	make_tx_response(queue, &pending_tx_info->req, status);
	index = pending_index(queue->pending_prod);
	queue->pending_ring[index] = pending_idx;
1842 1843
	/* TX shouldn't use the index before we give it back here */
	mb();
1844 1845
	queue->pending_prod++;
	spin_unlock_irqrestore(&queue->response_lock, flags);
I
Ian Campbell 已提交
1846 1847
}

1848

1849
static void make_tx_response(struct xenvif_queue *queue,
I
Ian Campbell 已提交
1850 1851 1852
			     struct xen_netif_tx_request *txp,
			     s8       st)
{
1853
	RING_IDX i = queue->tx.rsp_prod_pvt;
I
Ian Campbell 已提交
1854 1855 1856
	struct xen_netif_tx_response *resp;
	int notify;

1857
	resp = RING_GET_RESPONSE(&queue->tx, i);
I
Ian Campbell 已提交
1858 1859 1860 1861
	resp->id     = txp->id;
	resp->status = st;

	if (txp->flags & XEN_NETTXF_extra_info)
1862
		RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
I
Ian Campbell 已提交
1863

1864 1865
	queue->tx.rsp_prod_pvt = ++i;
	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
I
Ian Campbell 已提交
1866
	if (notify)
1867
		notify_remote_via_irq(queue->tx_irq);
I
Ian Campbell 已提交
1868 1869
}

1870
static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
I
Ian Campbell 已提交
1871 1872 1873 1874 1875 1876
					     u16      id,
					     s8       st,
					     u16      offset,
					     u16      size,
					     u16      flags)
{
1877
	RING_IDX i = queue->rx.rsp_prod_pvt;
I
Ian Campbell 已提交
1878 1879
	struct xen_netif_rx_response *resp;

1880
	resp = RING_GET_RESPONSE(&queue->rx, i);
I
Ian Campbell 已提交
1881 1882 1883 1884 1885 1886 1887
	resp->offset     = offset;
	resp->flags      = flags;
	resp->id         = id;
	resp->status     = (s16)size;
	if (st < 0)
		resp->status = (s16)st;

1888
	queue->rx.rsp_prod_pvt = ++i;
I
Ian Campbell 已提交
1889 1890 1891 1892

	return resp;
}

1893
void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
1894 1895 1896 1897 1898
{
	int ret;
	struct gnttab_unmap_grant_ref tx_unmap_op;

	gnttab_set_unmap_op(&tx_unmap_op,
1899
			    idx_to_kaddr(queue, pending_idx),
1900
			    GNTMAP_host_map,
1901 1902
			    queue->grant_tx_handle[pending_idx]);
	xenvif_grant_handle_reset(queue, pending_idx);
1903 1904

	ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
1905
				&queue->mmap_pages[pending_idx], 1);
1906
	if (ret) {
1907
		netdev_err(queue->vif->dev,
1908 1909 1910 1911 1912 1913 1914 1915
			   "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n",
			   ret,
			   pending_idx,
			   tx_unmap_op.host_addr,
			   tx_unmap_op.handle,
			   tx_unmap_op.status);
		BUG();
	}
1916 1917
}

1918
static inline int tx_work_todo(struct xenvif_queue *queue)
I
Ian Campbell 已提交
1919
{
1920
	if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
I
Ian Campbell 已提交
1921 1922 1923 1924 1925
		return 1;

	return 0;
}

1926
static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
1927
{
1928
	return queue->dealloc_cons != queue->dealloc_prod;
1929 1930
}

1931
void xenvif_unmap_frontend_rings(struct xenvif_queue *queue)
I
Ian Campbell 已提交
1932
{
1933 1934 1935 1936 1937 1938
	if (queue->tx.sring)
		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
					queue->tx.sring);
	if (queue->rx.sring)
		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
					queue->rx.sring);
I
Ian Campbell 已提交
1939 1940
}

1941
int xenvif_map_frontend_rings(struct xenvif_queue *queue,
W
Wei Liu 已提交
1942 1943
			      grant_ref_t tx_ring_ref,
			      grant_ref_t rx_ring_ref)
I
Ian Campbell 已提交
1944
{
1945
	void *addr;
I
Ian Campbell 已提交
1946 1947 1948 1949 1950
	struct xen_netif_tx_sring *txs;
	struct xen_netif_rx_sring *rxs;

	int err = -ENOMEM;

1951
	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1952 1953
				     tx_ring_ref, &addr);
	if (err)
I
Ian Campbell 已提交
1954 1955
		goto err;

1956
	txs = (struct xen_netif_tx_sring *)addr;
1957
	BACK_RING_INIT(&queue->tx, txs, PAGE_SIZE);
I
Ian Campbell 已提交
1958

1959
	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1960 1961
				     rx_ring_ref, &addr);
	if (err)
I
Ian Campbell 已提交
1962 1963
		goto err;

1964
	rxs = (struct xen_netif_rx_sring *)addr;
1965
	BACK_RING_INIT(&queue->rx, rxs, PAGE_SIZE);
I
Ian Campbell 已提交
1966 1967 1968 1969

	return 0;

err:
1970
	xenvif_unmap_frontend_rings(queue);
I
Ian Campbell 已提交
1971 1972 1973
	return err;
}

1974
static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
1975
{
1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986
	struct xenvif *vif = queue->vif;

	queue->stalled = true;

	/* At least one queue has stalled? Disable the carrier. */
	spin_lock(&vif->lock);
	if (vif->stalled_queues++ == 0) {
		netdev_info(vif->dev, "Guest Rx stalled");
		netif_carrier_off(vif->dev);
	}
	spin_unlock(&vif->lock);
1987 1988
}

1989
static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
1990
{
1991
	struct xenvif *vif = queue->vif;
1992

1993 1994
	queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
	queue->stalled = false;
1995

1996 1997 1998 1999 2000 2001 2002 2003
	/* All queues are ready? Enable the carrier. */
	spin_lock(&vif->lock);
	if (--vif->stalled_queues == 0) {
		netdev_info(vif->dev, "Guest Rx ready");
		netif_carrier_on(vif->dev);
	}
	spin_unlock(&vif->lock);
}
2004

2005 2006 2007 2008 2009 2010 2011 2012 2013 2014
static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
{
	RING_IDX prod, cons;

	prod = queue->rx.sring->req_prod;
	cons = queue->rx.req_cons;

	return !queue->stalled
		&& prod - cons < XEN_NETBK_RX_SLOTS_MAX
		&& time_after(jiffies,
2015
			      queue->last_rx_time + queue->vif->stall_timeout);
2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028
}

static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
{
	RING_IDX prod, cons;

	prod = queue->rx.sring->req_prod;
	cons = queue->rx.req_cons;

	return queue->stalled
		&& prod - cons >= XEN_NETBK_RX_SLOTS_MAX;
}

2029
static bool xenvif_have_rx_work(struct xenvif_queue *queue)
2030
{
2031 2032
	return (!skb_queue_empty(&queue->rx_queue)
		&& xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX))
2033 2034 2035
		|| (queue->vif->stall_timeout &&
		    (xenvif_rx_queue_stalled(queue)
		     || xenvif_rx_queue_ready(queue)))
2036 2037
		|| kthread_should_stop()
		|| queue->vif->disabled;
2038 2039
}

2040
static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
2041
{
2042 2043
	struct sk_buff *skb;
	long timeout;
2044

2045 2046 2047
	skb = skb_peek(&queue->rx_queue);
	if (!skb)
		return MAX_SCHEDULE_TIMEOUT;
2048

2049 2050 2051
	timeout = XENVIF_RX_CB(skb)->expires - jiffies;
	return timeout < 0 ? 0 : timeout;
}
2052

2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078
/* Wait until the guest Rx thread has work.
 *
 * The timeout needs to be adjusted based on the current head of the
 * queue (and not just the head at the beginning).  In particular, if
 * the queue is initially empty an infinite timeout is used and this
 * needs to be reduced when a skb is queued.
 *
 * This cannot be done with wait_event_timeout() because it only
 * calculates the timeout once.
 */
static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
{
	DEFINE_WAIT(wait);

	if (xenvif_have_rx_work(queue))
		return;

	for (;;) {
		long ret;

		prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
		if (xenvif_have_rx_work(queue))
			break;
		ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
		if (!ret)
			break;
2079
	}
2080
	finish_wait(&queue->wq, &wait);
2081 2082
}

2083
int xenvif_kthread_guest_rx(void *data)
2084
{
2085
	struct xenvif_queue *queue = data;
2086
	struct xenvif *vif = queue->vif;
2087

2088 2089 2090
	if (!vif->stall_timeout)
		xenvif_queue_carrier_on(queue);

2091 2092
	for (;;) {
		xenvif_wait_for_rx_work(queue);
2093

2094 2095 2096
		if (kthread_should_stop())
			break;

2097 2098 2099 2100
		/* This frontend is found to be rogue, disable it in
		 * kthread context. Currently this is only set when
		 * netback finds out frontend sends malformed packet,
		 * but we cannot disable the interface in softirq
2101 2102
		 * context so we defer it here, if this thread is
		 * associated with queue 0.
2103
		 */
2104 2105 2106 2107
		if (unlikely(vif->disabled && queue->id == 0)) {
			xenvif_carrier_off(vif);
			xenvif_rx_queue_purge(queue);
			continue;
2108 2109
		}

2110 2111
		if (!skb_queue_empty(&queue->rx_queue))
			xenvif_rx_action(queue);
2112

2113 2114 2115 2116
		/* If the guest hasn't provided any Rx slots for a
		 * while it's probably not responsive, drop the
		 * carrier so packets are dropped earlier.
		 */
2117 2118 2119 2120 2121 2122
		if (vif->stall_timeout) {
			if (xenvif_rx_queue_stalled(queue))
				xenvif_queue_carrier_off(queue);
			else if (xenvif_rx_queue_ready(queue))
				xenvif_queue_carrier_on(queue);
		}
2123

2124 2125 2126 2127 2128 2129 2130 2131 2132
		/* Queued packets may have foreign pages from other
		 * domains.  These cannot be queued indefinitely as
		 * this would starve guests of grant refs and transmit
		 * slots.
		 */
		xenvif_rx_queue_drop_expired(queue);

		xenvif_rx_queue_maybe_wake(queue);

2133 2134 2135
		cond_resched();
	}

2136
	/* Bin any remaining skbs */
2137
	xenvif_rx_queue_purge(queue);
2138

2139 2140 2141
	return 0;
}

2142 2143 2144 2145 2146 2147 2148 2149 2150
static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
{
	/* Dealloc thread must remain running until all inflight
	 * packets complete.
	 */
	return kthread_should_stop() &&
		!atomic_read(&queue->inflight_packets);
}

2151 2152
int xenvif_dealloc_kthread(void *data)
{
2153
	struct xenvif_queue *queue = data;
2154

2155
	for (;;) {
2156 2157
		wait_event_interruptible(queue->dealloc_wq,
					 tx_dealloc_work_todo(queue) ||
2158 2159
					 xenvif_dealloc_kthread_should_stop(queue));
		if (xenvif_dealloc_kthread_should_stop(queue))
2160 2161
			break;

2162
		xenvif_tx_dealloc_action(queue);
2163 2164 2165 2166
		cond_resched();
	}

	/* Unmap anything remaining*/
2167 2168
	if (tx_dealloc_work_todo(queue))
		xenvif_tx_dealloc_action(queue);
2169 2170 2171 2172

	return 0;
}

I
Ian Campbell 已提交
2173 2174 2175 2176
static int __init netback_init(void)
{
	int rc = 0;

2177
	if (!xen_domain())
I
Ian Campbell 已提交
2178 2179
		return -ENODEV;

2180 2181 2182
	/* Allow as many queues as there are CPUs, by default */
	xenvif_max_queues = num_online_cpus();

2183
	if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
2184 2185
		pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
			fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
2186
		fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
2187 2188
	}

I
Ian Campbell 已提交
2189 2190 2191 2192
	rc = xenvif_xenbus_init();
	if (rc)
		goto failed_init;

2193 2194 2195 2196 2197 2198 2199
#ifdef CONFIG_DEBUG_FS
	xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
	if (IS_ERR_OR_NULL(xen_netback_dbg_root))
		pr_warn("Init of debugfs returned %ld!\n",
			PTR_ERR(xen_netback_dbg_root));
#endif /* CONFIG_DEBUG_FS */

I
Ian Campbell 已提交
2200 2201 2202 2203 2204 2205 2206 2207
	return 0;

failed_init:
	return rc;
}

module_init(netback_init);

2208 2209
static void __exit netback_fini(void)
{
2210 2211 2212 2213
#ifdef CONFIG_DEBUG_FS
	if (!IS_ERR_OR_NULL(xen_netback_dbg_root))
		debugfs_remove_recursive(xen_netback_dbg_root);
#endif /* CONFIG_DEBUG_FS */
2214 2215 2216 2217
	xenvif_xenbus_fini();
}
module_exit(netback_fini);

I
Ian Campbell 已提交
2218
MODULE_LICENSE("Dual BSD/GPL");
2219
MODULE_ALIAS("xen-backend:vif");