ulpqueue.c 27.9 KB
Newer Older
1
/* SCTP kernel implementation
L
Linus Torvalds 已提交
2 3 4 5 6 7 8 9 10
 * (C) Copyright IBM Corp. 2001, 2004
 * Copyright (c) 1999-2000 Cisco, Inc.
 * Copyright (c) 1999-2001 Motorola, Inc.
 * Copyright (c) 2001 Intel Corp.
 * Copyright (c) 2001 Nokia, Inc.
 * Copyright (c) 2001 La Monte H.P. Yarroll
 *
 * This abstraction carries sctp events to the ULP (sockets).
 *
11
 * This SCTP implementation is free software;
L
Linus Torvalds 已提交
12 13 14 15 16
 * you can redistribute it and/or modify it under the terms of
 * the GNU General Public License as published by
 * the Free Software Foundation; either version 2, or (at your option)
 * any later version.
 *
17
 * This SCTP implementation is distributed in the hope that it
L
Linus Torvalds 已提交
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
 *                 ************************
 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
 * See the GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with GNU CC; see the file COPYING.  If not, write to
 * the Free Software Foundation, 59 Temple Place - Suite 330,
 * Boston, MA 02111-1307, USA.
 *
 * Please send any bug reports or fixes you make to the
 * email address(es):
 *    lksctp developers <lksctp-developers@lists.sourceforge.net>
 *
 * Or submit a bug report through the following website:
 *    http://www.sf.net/projects/lksctp
 *
 * Written or modified by:
 *    Jon Grimm             <jgrimm@us.ibm.com>
 *    La Monte H.P. Yarroll <piggy@acm.org>
 *    Sridhar Samudrala     <sri@us.ibm.com>
 *
 * Any bugs reported given to us we will try to fix... any fixes shared will
 * be incorporated into the next SCTP release.
 */

#include <linux/types.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/sctp/structs.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>

/* Forward declarations for internal helpers.  */
static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
D
David S. Miller 已提交
53
					      struct sctp_ulpevent *);
L
Linus Torvalds 已提交
54
static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *,
D
David S. Miller 已提交
55
					      struct sctp_ulpevent *);
56
static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
L
Linus Torvalds 已提交
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76

/* 1st Level Abstractions */

/* Initialize a ULP queue from a block of memory.  */
struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
				 struct sctp_association *asoc)
{
	memset(ulpq, 0, sizeof(struct sctp_ulpq));

	ulpq->asoc = asoc;
	skb_queue_head_init(&ulpq->reasm);
	skb_queue_head_init(&ulpq->lobby);
	ulpq->pd_mode  = 0;
	ulpq->malloced = 0;

	return ulpq;
}


/* Flush the reassembly and ordering queues.  */
77
void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
L
Linus Torvalds 已提交
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
{
	struct sk_buff *skb;
	struct sctp_ulpevent *event;

	while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
		event = sctp_skb2event(skb);
		sctp_ulpevent_free(event);
	}

	while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
		event = sctp_skb2event(skb);
		sctp_ulpevent_free(event);
	}

}

/* Dispose of a ulpqueue.  */
void sctp_ulpq_free(struct sctp_ulpq *ulpq)
{
	sctp_ulpq_flush(ulpq);
	if (ulpq->malloced)
		kfree(ulpq);
}

/* Process an incoming DATA chunk.  */
int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
A
Al Viro 已提交
104
			gfp_t gfp)
L
Linus Torvalds 已提交
105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
{
	struct sk_buff_head temp;
	sctp_data_chunk_t *hdr;
	struct sctp_ulpevent *event;

	hdr = (sctp_data_chunk_t *) chunk->chunk_hdr;

	/* Create an event from the incoming chunk. */
	event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
	if (!event)
		return -ENOMEM;

	/* Do reassembly if needed.  */
	event = sctp_ulpq_reasm(ulpq, event);

	/* Do ordering if needed.  */
	if ((event) && (event->msg_flags & MSG_EOR)){
		/* Create a temporary list to collect chunks on.  */
		skb_queue_head_init(&temp);
		__skb_queue_tail(&temp, sctp_event2skb(event));

		event = sctp_ulpq_order(ulpq, event);
	}

D
David S. Miller 已提交
129 130 131
	/* Send event to the ULP.  'event' is the sctp_ulpevent for
	 * very first SKB on the 'temp' list.
	 */
L
Linus Torvalds 已提交
132 133 134 135 136 137 138 139 140 141
	if (event)
		sctp_ulpq_tail_event(ulpq, event);

	return 0;
}

/* Add a new event for propagation to the ULP.  */
/* Clear the partial delivery mode for this socket.   Note: This
 * assumes that no association is currently in partial delivery mode.
 */
142
int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
L
Linus Torvalds 已提交
143 144 145
{
	struct sctp_sock *sp = sctp_sk(sk);

146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
	if (atomic_dec_and_test(&sp->pd_mode)) {
		/* This means there are no other associations in PD, so
		 * we can go ahead and clear out the lobby in one shot
		 */
		if (!skb_queue_empty(&sp->pd_lobby)) {
			struct list_head *list;
			sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
			list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
			INIT_LIST_HEAD(list);
			return 1;
		}
	} else {
		/* There are other associations in PD, so we only need to
		 * pull stuff out of the lobby that belongs to the
		 * associations that is exiting PD (all of its notifications
		 * are posted here).
		 */
		if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
			struct sk_buff *skb, *tmp;
			struct sctp_ulpevent *event;

			sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
				event = sctp_skb2event(skb);
				if (event->asoc == asoc) {
					__skb_unlink(skb, &sp->pd_lobby);
					__skb_queue_tail(&sk->sk_receive_queue,
							 skb);
				}
			}
		}
L
Linus Torvalds 已提交
176
	}
177

L
Linus Torvalds 已提交
178 179 180
	return 0;
}

181 182 183 184 185 186 187 188 189
/* Set the pd_mode on the socket and ulpq */
static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
{
	struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);

	atomic_inc(&sp->pd_mode);
	ulpq->pd_mode = 1;
}

L
Linus Torvalds 已提交
190 191 192 193
/* Clear the pd_mode and restart any pending messages waiting for delivery. */
static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
{
	ulpq->pd_mode = 0;
194
	sctp_ulpq_reasm_drain(ulpq);
195
	return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
L
Linus Torvalds 已提交
196 197
}

D
David S. Miller 已提交
198 199 200
/* If the SKB of 'event' is on a list, it is the first such member
 * of that list.
 */
L
Linus Torvalds 已提交
201 202 203
int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
{
	struct sock *sk = ulpq->asoc->base.sk;
D
David S. Miller 已提交
204 205
	struct sk_buff_head *queue, *skb_list;
	struct sk_buff *skb = sctp_event2skb(event);
L
Linus Torvalds 已提交
206 207
	int clear_pd = 0;

D
David S. Miller 已提交
208 209
	skb_list = (struct sk_buff_head *) skb->prev;

L
Linus Torvalds 已提交
210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
	/* If the socket is just going to throw this away, do not
	 * even try to deliver it.
	 */
	if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN))
		goto out_free;

	/* Check if the user wishes to receive this event.  */
	if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
		goto out_free;

	/* If we are in partial delivery mode, post to the lobby until
	 * partial delivery is cleared, unless, of course _this_ is
	 * the association the cause of the partial delivery.
	 */

225
	if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) {
L
Linus Torvalds 已提交
226
		queue = &sk->sk_receive_queue;
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
	} else {
		if (ulpq->pd_mode) {
			/* If the association is in partial delivery, we
			 * need to finish delivering the partially processed
			 * packet before passing any other data.  This is
			 * because we don't truly support stream interleaving.
			 */
			if ((event->msg_flags & MSG_NOTIFICATION) ||
			    (SCTP_DATA_NOT_FRAG ==
				    (event->msg_flags & SCTP_DATA_FRAG_MASK)))
				queue = &sctp_sk(sk)->pd_lobby;
			else {
				clear_pd = event->msg_flags & MSG_EOR;
				queue = &sk->sk_receive_queue;
			}
		} else {
			/*
			 * If fragment interleave is enabled, we
			 * can queue this to the recieve queue instead
			 * of the lobby.
			 */
			if (sctp_sk(sk)->frag_interleave)
				queue = &sk->sk_receive_queue;
			else
				queue = &sctp_sk(sk)->pd_lobby;
L
Linus Torvalds 已提交
252
		}
253
	}
L
Linus Torvalds 已提交
254 255 256 257

	/* If we are harvesting multiple skbs they will be
	 * collected on a list.
	 */
D
David S. Miller 已提交
258 259
	if (skb_list)
		sctp_skb_list_tail(skb_list, queue);
L
Linus Torvalds 已提交
260
	else
D
David S. Miller 已提交
261
		__skb_queue_tail(queue, skb);
L
Linus Torvalds 已提交
262 263 264 265 266 267 268 269 270 271 272 273 274

	/* Did we just complete partial delivery and need to get
	 * rolling again?  Move pending data to the receive
	 * queue.
	 */
	if (clear_pd)
		sctp_ulpq_clear_pd(ulpq);

	if (queue == &sk->sk_receive_queue)
		sk->sk_data_ready(sk, 0);
	return 1;

out_free:
D
David S. Miller 已提交
275 276
	if (skb_list)
		sctp_queue_purge_ulpevents(skb_list);
L
Linus Torvalds 已提交
277 278
	else
		sctp_ulpevent_free(event);
D
David S. Miller 已提交
279

L
Linus Torvalds 已提交
280 281 282 283 284 285
	return 0;
}

/* 2nd Level Abstractions */

/* Helper function to store chunks that need to be reassembled.  */
286
static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
L
Linus Torvalds 已提交
287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
					 struct sctp_ulpevent *event)
{
	struct sk_buff *pos;
	struct sctp_ulpevent *cevent;
	__u32 tsn, ctsn;

	tsn = event->tsn;

	/* See if it belongs at the end. */
	pos = skb_peek_tail(&ulpq->reasm);
	if (!pos) {
		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
		return;
	}

	/* Short circuit just dropping it at the end. */
	cevent = sctp_skb2event(pos);
	ctsn = cevent->tsn;
	if (TSN_lt(ctsn, tsn)) {
		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
		return;
	}

	/* Find the right place in this list. We store them by TSN.  */
	skb_queue_walk(&ulpq->reasm, pos) {
		cevent = sctp_skb2event(pos);
		ctsn = cevent->tsn;

		if (TSN_lt(tsn, ctsn))
			break;
	}

	/* Insert before pos. */
	__skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->reasm);

}

/* Helper function to return an event corresponding to the reassembled
 * datagram.
 * This routine creates a re-assembled skb given the first and last skb's
 * as stored in the reassembly queue. The skb's may be non-linear if the sctp
 * payload was fragmented on the way and ip had to reassemble them.
 * We add the rest of skb's to the first skb's fraglist.
 */
D
David S. Miller 已提交
331
static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag)
L
Linus Torvalds 已提交
332 333
{
	struct sk_buff *pos;
334
	struct sk_buff *new = NULL;
L
Linus Torvalds 已提交
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352
	struct sctp_ulpevent *event;
	struct sk_buff *pnext, *last;
	struct sk_buff *list = skb_shinfo(f_frag)->frag_list;

	/* Store the pointer to the 2nd skb */
	if (f_frag == l_frag)
		pos = NULL;
	else
		pos = f_frag->next;

	/* Get the last skb in the f_frag's frag_list if present. */
	for (last = list; list; last = list, list = list->next);

	/* Add the list of remaining fragments to the first fragments
	 * frag_list.
	 */
	if (last)
		last->next = pos;
353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
	else {
		if (skb_cloned(f_frag)) {
			/* This is a cloned skb, we can't just modify
			 * the frag_list.  We need a new skb to do that.
			 * Instead of calling skb_unshare(), we'll do it
			 * ourselves since we need to delay the free.
			 */
			new = skb_copy(f_frag, GFP_ATOMIC);
			if (!new)
				return NULL;	/* try again later */

			sctp_skb_set_owner_r(new, f_frag->sk);

			skb_shinfo(new)->frag_list = pos;
		} else
			skb_shinfo(f_frag)->frag_list = pos;
	}
L
Linus Torvalds 已提交
370 371

	/* Remove the first fragment from the reassembly queue.  */
D
David S. Miller 已提交
372
	__skb_unlink(f_frag, queue);
373

374 375 376 377 378
	/* if we did unshare, then free the old skb and re-assign */
	if (new) {
		kfree_skb(f_frag);
		f_frag = new;
	}
379

L
Linus Torvalds 已提交
380 381 382 383 384 385 386 387 388
	while (pos) {

		pnext = pos->next;

		/* Update the len and data_len fields of the first fragment. */
		f_frag->len += pos->len;
		f_frag->data_len += pos->len;

		/* Remove the fragment from the reassembly queue.  */
D
David S. Miller 已提交
389
		__skb_unlink(pos, queue);
390

L
Linus Torvalds 已提交
391 392 393 394 395
		/* Break if we have reached the last fragment.  */
		if (pos == l_frag)
			break;
		pos->next = pnext;
		pos = pnext;
396
	}
L
Linus Torvalds 已提交
397 398 399 400 401 402 403 404 405 406 407

	event = sctp_skb2event(f_frag);
	SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS);

	return event;
}


/* Helper function to check if an incoming chunk has filled up the last
 * missing fragment in a SCTP datagram and return the corresponding event.
 */
408
static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
L
Linus Torvalds 已提交
409 410 411 412 413 414
{
	struct sk_buff *pos;
	struct sctp_ulpevent *cevent;
	struct sk_buff *first_frag = NULL;
	__u32 ctsn, next_tsn;
	struct sctp_ulpevent *retval = NULL;
415 416 417 418 419
	struct sk_buff *pd_first = NULL;
	struct sk_buff *pd_last = NULL;
	size_t pd_len = 0;
	struct sctp_association *asoc;
	u32 pd_point;
L
Linus Torvalds 已提交
420 421 422 423 424 425 426 427 428 429 430 431 432 433 434

	/* Initialized to 0 just to avoid compiler warning message.  Will
	 * never be used with this value. It is referenced only after it
	 * is set when we find the first fragment of a message.
	 */
	next_tsn = 0;

	/* The chunks are held in the reasm queue sorted by TSN.
	 * Walk through the queue sequentially and look for a sequence of
	 * fragmented chunks that complete a datagram.
	 * 'first_frag' and next_tsn are reset when we find a chunk which
	 * is the first fragment of a datagram. Once these 2 fields are set
	 * we expect to find the remaining middle fragments and the last
	 * fragment in order. If not, first_frag is reset to NULL and we
	 * start the next pass when we find another first fragment.
435 436 437 438
	 *
	 * There is a potential to do partial delivery if user sets
	 * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
	 * to see if can do PD.
L
Linus Torvalds 已提交
439 440 441 442 443 444 445
	 */
	skb_queue_walk(&ulpq->reasm, pos) {
		cevent = sctp_skb2event(pos);
		ctsn = cevent->tsn;

		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
		case SCTP_DATA_FIRST_FRAG:
446 447 448 449 450 451 452 453 454 455 456 457 458 459
			/* If this "FIRST_FRAG" is the first
			 * element in the queue, then count it towards
			 * possible PD.
			 */
			if (pos == ulpq->reasm.next) {
			    pd_first = pos;
			    pd_last = pos;
			    pd_len = pos->len;
			} else {
			    pd_first = NULL;
			    pd_last = NULL;
			    pd_len = 0;
			}

L
Linus Torvalds 已提交
460 461 462 463 464
			first_frag = pos;
			next_tsn = ctsn + 1;
			break;

		case SCTP_DATA_MIDDLE_FRAG:
465
			if ((first_frag) && (ctsn == next_tsn)) {
L
Linus Torvalds 已提交
466
				next_tsn++;
467 468 469 470 471
				if (pd_first) {
				    pd_last = pos;
				    pd_len += pos->len;
				}
			} else
L
Linus Torvalds 已提交
472 473 474 475 476 477 478 479 480
				first_frag = NULL;
			break;

		case SCTP_DATA_LAST_FRAG:
			if (first_frag && (ctsn == next_tsn))
				goto found;
			else
				first_frag = NULL;
			break;
481
		}
482 483 484 485 486 487 488 489 490 491 492 493
	}

	asoc = ulpq->asoc;
	if (pd_first) {
		/* Make sure we can enter partial deliver.
		 * We can trigger partial delivery only if framgent
		 * interleave is set, or the socket is not already
		 * in  partial delivery.
		 */
		if (!sctp_sk(asoc->base.sk)->frag_interleave &&
		    atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
			goto done;
L
Linus Torvalds 已提交
494

495 496 497 498 499 500 501 502 503
		cevent = sctp_skb2event(pd_first);
		pd_point = sctp_sk(asoc->base.sk)->pd_point;
		if (pd_point && pd_point <= pd_len) {
			retval = sctp_make_reassembled_event(&ulpq->reasm,
							     pd_first,
							     pd_last);
			if (retval)
				sctp_ulpq_set_pd(ulpq);
		}
L
Linus Torvalds 已提交
504 505 506 507
	}
done:
	return retval;
found:
D
David S. Miller 已提交
508
	retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, pos);
L
Linus Torvalds 已提交
509 510 511 512 513 514
	if (retval)
		retval->msg_flags |= MSG_EOR;
	goto done;
}

/* Retrieve the next set of fragments of a partial message. */
515
static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
L
Linus Torvalds 已提交
516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
{
	struct sk_buff *pos, *last_frag, *first_frag;
	struct sctp_ulpevent *cevent;
	__u32 ctsn, next_tsn;
	int is_last;
	struct sctp_ulpevent *retval;

	/* The chunks are held in the reasm queue sorted by TSN.
	 * Walk through the queue sequentially and look for the first
	 * sequence of fragmented chunks.
	 */

	if (skb_queue_empty(&ulpq->reasm))
		return NULL;

	last_frag = first_frag = NULL;
	retval = NULL;
	next_tsn = 0;
	is_last = 0;

	skb_queue_walk(&ulpq->reasm, pos) {
		cevent = sctp_skb2event(pos);
		ctsn = cevent->tsn;

		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
		case SCTP_DATA_MIDDLE_FRAG:
			if (!first_frag) {
				first_frag = pos;
				next_tsn = ctsn + 1;
				last_frag = pos;
			} else if (next_tsn == ctsn)
				next_tsn++;
			else
				goto done;
			break;
		case SCTP_DATA_LAST_FRAG:
			if (!first_frag)
				first_frag = pos;
			else if (ctsn != next_tsn)
				goto done;
			last_frag = pos;
			is_last = 1;
			goto done;
		default:
			return NULL;
561
		}
L
Linus Torvalds 已提交
562 563 564 565 566 567
	}

	/* We have the reassembled event. There is no need to look
	 * further.
	 */
done:
D
David S. Miller 已提交
568
	retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
L
Linus Torvalds 已提交
569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608
	if (retval && is_last)
		retval->msg_flags |= MSG_EOR;

	return retval;
}


/* Helper function to reassemble chunks.  Hold chunks on the reasm queue that
 * need reassembling.
 */
static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
						struct sctp_ulpevent *event)
{
	struct sctp_ulpevent *retval = NULL;

	/* Check if this is part of a fragmented message.  */
	if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
		event->msg_flags |= MSG_EOR;
		return event;
	}

	sctp_ulpq_store_reasm(ulpq, event);
	if (!ulpq->pd_mode)
		retval = sctp_ulpq_retrieve_reassembled(ulpq);
	else {
		__u32 ctsn, ctsnap;

		/* Do not even bother unless this is the next tsn to
		 * be delivered.
		 */
		ctsn = event->tsn;
		ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
		if (TSN_lte(ctsn, ctsnap))
			retval = sctp_ulpq_retrieve_partial(ulpq);
	}

	return retval;
}

/* Retrieve the first part (sequential fragments) for partial delivery.  */
609
static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
L
Linus Torvalds 已提交
610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652
{
	struct sk_buff *pos, *last_frag, *first_frag;
	struct sctp_ulpevent *cevent;
	__u32 ctsn, next_tsn;
	struct sctp_ulpevent *retval;

	/* The chunks are held in the reasm queue sorted by TSN.
	 * Walk through the queue sequentially and look for a sequence of
	 * fragmented chunks that start a datagram.
	 */

	if (skb_queue_empty(&ulpq->reasm))
		return NULL;

	last_frag = first_frag = NULL;
	retval = NULL;
	next_tsn = 0;

	skb_queue_walk(&ulpq->reasm, pos) {
		cevent = sctp_skb2event(pos);
		ctsn = cevent->tsn;

		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
		case SCTP_DATA_FIRST_FRAG:
			if (!first_frag) {
				first_frag = pos;
				next_tsn = ctsn + 1;
				last_frag = pos;
			} else
				goto done;
			break;

		case SCTP_DATA_MIDDLE_FRAG:
			if (!first_frag)
				return NULL;
			if (ctsn == next_tsn) {
				next_tsn++;
				last_frag = pos;
			} else
				goto done;
			break;
		default:
			return NULL;
653
		}
L
Linus Torvalds 已提交
654 655 656 657 658 659
	}

	/* We have the reassembled event. There is no need to look
	 * further.
	 */
done:
D
David S. Miller 已提交
660
	retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
L
Linus Torvalds 已提交
661 662 663
	return retval;
}

664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703
/*
 * Flush out stale fragments from the reassembly queue when processing
 * a Forward TSN.
 *
 * RFC 3758, Section 3.6
 *
 * After receiving and processing a FORWARD TSN, the data receiver MUST
 * take cautions in updating its re-assembly queue.  The receiver MUST
 * remove any partially reassembled message, which is still missing one
 * or more TSNs earlier than or equal to the new cumulative TSN point.
 * In the event that the receiver has invoked the partial delivery API,
 * a notification SHOULD also be generated to inform the upper layer API
 * that the message being partially delivered will NOT be completed.
 */
void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
{
	struct sk_buff *pos, *tmp;
	struct sctp_ulpevent *event;
	__u32 tsn;

	if (skb_queue_empty(&ulpq->reasm))
		return;

	skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
		event = sctp_skb2event(pos);
		tsn = event->tsn;

		/* Since the entire message must be abandoned by the
		 * sender (item A3 in Section 3.5, RFC 3758), we can
		 * free all fragments on the list that are less then
		 * or equal to ctsn_point
		 */
		if (TSN_lte(tsn, fwd_tsn)) {
			__skb_unlink(pos, &ulpq->reasm);
			sctp_ulpevent_free(event);
		} else
			break;
	}
}

704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734
/*
 * Drain the reassembly queue.  If we just cleared parted delivery, it
 * is possible that the reassembly queue will contain already reassembled
 * messages.  Retrieve any such messages and give them to the user.
 */
static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
{
	struct sctp_ulpevent *event = NULL;
	struct sk_buff_head temp;

	if (skb_queue_empty(&ulpq->reasm))
		return;

	while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
		/* Do ordering if needed.  */
		if ((event) && (event->msg_flags & MSG_EOR)){
			skb_queue_head_init(&temp);
			__skb_queue_tail(&temp, sctp_event2skb(event));

			event = sctp_ulpq_order(ulpq, event);
		}

		/* Send event to the ULP.  'event' is the
		 * sctp_ulpevent for  very first SKB on the  temp' list.
		 */
		if (event)
			sctp_ulpq_tail_event(ulpq, event);
	}
}


L
Linus Torvalds 已提交
735 736 737
/* Helper function to gather skbs that have possibly become
 * ordered by an an incoming chunk.
 */
738
static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
L
Linus Torvalds 已提交
739 740
					      struct sctp_ulpevent *event)
{
D
David S. Miller 已提交
741
	struct sk_buff_head *event_list;
L
Linus Torvalds 已提交
742 743 744 745 746 747 748 749 750 751
	struct sk_buff *pos, *tmp;
	struct sctp_ulpevent *cevent;
	struct sctp_stream *in;
	__u16 sid, csid;
	__u16 ssn, cssn;

	sid = event->stream;
	ssn = event->ssn;
	in  = &ulpq->asoc->ssnmap->in;

D
David S. Miller 已提交
752 753
	event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;

L
Linus Torvalds 已提交
754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773
	/* We are holding the chunks by stream, by SSN.  */
	sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
		cevent = (struct sctp_ulpevent *) pos->cb;
		csid = cevent->stream;
		cssn = cevent->ssn;

		/* Have we gone too far?  */
		if (csid > sid)
			break;

		/* Have we not gone far enough?  */
		if (csid < sid)
			continue;

		if (cssn != sctp_ssn_peek(in, sid))
			break;

		/* Found it, so mark in the ssnmap. */
		sctp_ssn_next(in, sid);

D
David S. Miller 已提交
774
		__skb_unlink(pos, &ulpq->lobby);
L
Linus Torvalds 已提交
775 776

		/* Attach all gathered skbs to the event.  */
D
David S. Miller 已提交
777
		__skb_queue_tail(event_list, pos);
L
Linus Torvalds 已提交
778 779 780 781
	}
}

/* Helper function to store chunks needing ordering.  */
782
static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
L
Linus Torvalds 已提交
783 784 785 786 787 788 789 790 791 792 793 794 795 796 797
					   struct sctp_ulpevent *event)
{
	struct sk_buff *pos;
	struct sctp_ulpevent *cevent;
	__u16 sid, csid;
	__u16 ssn, cssn;

	pos = skb_peek_tail(&ulpq->lobby);
	if (!pos) {
		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
		return;
	}

	sid = event->stream;
	ssn = event->ssn;
798

L
Linus Torvalds 已提交
799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832
	cevent = (struct sctp_ulpevent *) pos->cb;
	csid = cevent->stream;
	cssn = cevent->ssn;
	if (sid > csid) {
		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
		return;
	}

	if ((sid == csid) && SSN_lt(cssn, ssn)) {
		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
		return;
	}

	/* Find the right place in this list.  We store them by
	 * stream ID and then by SSN.
	 */
	skb_queue_walk(&ulpq->lobby, pos) {
		cevent = (struct sctp_ulpevent *) pos->cb;
		csid = cevent->stream;
		cssn = cevent->ssn;

		if (csid > sid)
			break;
		if (csid == sid && SSN_lt(ssn, cssn))
			break;
	}


	/* Insert before pos. */
	__skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->lobby);

}

static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
D
David S. Miller 已提交
833
					     struct sctp_ulpevent *event)
L
Linus Torvalds 已提交
834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869
{
	__u16 sid, ssn;
	struct sctp_stream *in;

	/* Check if this message needs ordering.  */
	if (SCTP_DATA_UNORDERED & event->msg_flags)
		return event;

	/* Note: The stream ID must be verified before this routine.  */
	sid = event->stream;
	ssn = event->ssn;
	in  = &ulpq->asoc->ssnmap->in;

	/* Is this the expected SSN for this stream ID?  */
	if (ssn != sctp_ssn_peek(in, sid)) {
		/* We've received something out of order, so find where it
		 * needs to be placed.  We order by stream and then by SSN.
		 */
		sctp_ulpq_store_ordered(ulpq, event);
		return NULL;
	}

	/* Mark that the next chunk has been found.  */
	sctp_ssn_next(in, sid);

	/* Go find any other chunks that were waiting for
	 * ordering.
	 */
	sctp_ulpq_retrieve_ordered(ulpq, event);

	return event;
}

/* Helper function to gather skbs that have possibly become
 * ordered by forward tsn skipping their dependencies.
 */
870
static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
L
Linus Torvalds 已提交
871 872 873
{
	struct sk_buff *pos, *tmp;
	struct sctp_ulpevent *cevent;
D
David S. Miller 已提交
874
	struct sctp_ulpevent *event;
L
Linus Torvalds 已提交
875 876
	struct sctp_stream *in;
	struct sk_buff_head temp;
877
	struct sk_buff_head *lobby = &ulpq->lobby;
L
Linus Torvalds 已提交
878 879 880 881 882
	__u16 csid, cssn;

	in  = &ulpq->asoc->ssnmap->in;

	/* We are holding the chunks by stream, by SSN.  */
D
David S. Miller 已提交
883 884
	skb_queue_head_init(&temp);
	event = NULL;
885
	sctp_skb_for_each(pos, lobby, tmp) {
L
Linus Torvalds 已提交
886 887 888 889
		cevent = (struct sctp_ulpevent *) pos->cb;
		csid = cevent->stream;
		cssn = cevent->ssn;

890 891
		/* Have we gone too far?  */
		if (csid > sid)
L
Linus Torvalds 已提交
892 893
			break;

894 895 896 897 898
		/* Have we not gone far enough?  */
		if (csid < sid)
			continue;

		/* see if this ssn has been marked by skipping */
899
		if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
900
			break;
L
Linus Torvalds 已提交
901

902
		__skb_unlink(pos, lobby);
903
		if (!event)
L
Linus Torvalds 已提交
904 905
			/* Create a temporary list to collect chunks on.  */
			event = sctp_skb2event(pos);
906 907 908

		/* Attach all gathered skbs to the event.  */
		__skb_queue_tail(&temp, pos);
L
Linus Torvalds 已提交
909 910
	}

911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926
	/* If we didn't reap any data, see if the next expected SSN
	 * is next on the queue and if so, use that.
	 */
	if (event == NULL && pos != (struct sk_buff *)lobby) {
		cevent = (struct sctp_ulpevent *) pos->cb;
		csid = cevent->stream;
		cssn = cevent->ssn;

		if (csid == sid && cssn == sctp_ssn_peek(in, csid)) {
			sctp_ssn_next(in, csid);
			__skb_unlink(pos, lobby);
			__skb_queue_tail(&temp, pos);
			event = sctp_skb2event(pos);
		}
	}

D
David S. Miller 已提交
927 928 929
	/* Send event to the ULP.  'event' is the sctp_ulpevent for
	 * very first SKB on the 'temp' list.
	 */
930 931 932
	if (event) {
		/* see if we have more ordered that we can deliver */
		sctp_ulpq_retrieve_ordered(ulpq, event);
L
Linus Torvalds 已提交
933
		sctp_ulpq_tail_event(ulpq, event);
934
	}
L
Linus Torvalds 已提交
935 936
}

937 938 939
/* Skip over an SSN. This is used during the processing of
 * Forwared TSN chunk to skip over the abandoned ordered data
 */
L
Linus Torvalds 已提交
940 941 942 943 944 945 946 947 948 949 950 951 952 953 954
void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
{
	struct sctp_stream *in;

	/* Note: The stream ID must be verified before this routine.  */
	in  = &ulpq->asoc->ssnmap->in;

	/* Is this an old SSN?  If so ignore. */
	if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))
		return;

	/* Mark that we are no longer expecting this SSN or lower. */
	sctp_ssn_skip(in, sid, ssn);

	/* Go find any other chunks that were waiting for
955
	 * ordering and deliver them if needed.
L
Linus Torvalds 已提交
956
	 */
957
	sctp_ulpq_reap_ordered(ulpq, sid);
L
Linus Torvalds 已提交
958 959 960
	return;
}

961 962
static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
		struct sk_buff_head *list, __u16 needed)
L
Linus Torvalds 已提交
963 964 965 966 967 968 969 970 971
{
	__u16 freed = 0;
	__u32 tsn;
	struct sk_buff *skb;
	struct sctp_ulpevent *event;
	struct sctp_tsnmap *tsnmap;

	tsnmap = &ulpq->asoc->peer.tsn_map;

972
	while ((skb = __skb_dequeue_tail(list)) != NULL) {
L
Linus Torvalds 已提交
973 974 975 976 977 978 979 980 981 982 983 984 985
		freed += skb_headlen(skb);
		event = sctp_skb2event(skb);
		tsn = event->tsn;

		sctp_ulpevent_free(event);
		sctp_tsnmap_renege(tsnmap, tsn);
		if (freed >= needed)
			return freed;
	}

	return freed;
}

986 987 988 989 990 991
/* Renege 'needed' bytes from the ordering queue. */
static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
{
	return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
}

L
Linus Torvalds 已提交
992 993 994
/* Renege 'needed' bytes from the reassembly queue. */
static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
{
995
	return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
L
Linus Torvalds 已提交
996 997 998 999
}

/* Partial deliver the first message as there is pressure on rwnd. */
void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
A
Alexey Dobriyan 已提交
1000
				struct sctp_chunk *chunk,
A
Al Viro 已提交
1001
				gfp_t gfp)
L
Linus Torvalds 已提交
1002 1003 1004
{
	struct sctp_ulpevent *event;
	struct sctp_association *asoc;
1005
	struct sctp_sock *sp;
L
Linus Torvalds 已提交
1006 1007

	asoc = ulpq->asoc;
1008
	sp = sctp_sk(asoc->base.sk);
L
Linus Torvalds 已提交
1009

1010 1011 1012 1013 1014
	/* If the association is already in Partial Delivery mode
	 * we have noting to do.
	 */
	if (ulpq->pd_mode)
		return;
L
Linus Torvalds 已提交
1015

1016 1017 1018 1019 1020 1021
	/* If the user enabled fragment interleave socket option,
	 * multiple associations can enter partial delivery.
	 * Otherwise, we can only enter partial delivery if the
	 * socket is not in partial deliver mode.
	 */
	if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
L
Linus Torvalds 已提交
1022 1023 1024 1025 1026
		/* Is partial delivery possible?  */
		event = sctp_ulpq_retrieve_first(ulpq);
		/* Send event to the ULP.   */
		if (event) {
			sctp_ulpq_tail_event(ulpq, event);
1027
			sctp_ulpq_set_pd(ulpq);
L
Linus Torvalds 已提交
1028 1029 1030 1031 1032 1033 1034
			return;
		}
	}
}

/* Renege some packets to make room for an incoming chunk.  */
void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
A
Al Viro 已提交
1035
		      gfp_t gfp)
L
Linus Torvalds 已提交
1036 1037 1038 1039 1040 1041 1042 1043 1044
{
	struct sctp_association *asoc;
	__u16 needed, freed;

	asoc = ulpq->asoc;

	if (chunk) {
		needed = ntohs(chunk->chunk_hdr->length);
		needed -= sizeof(sctp_data_chunk_t);
1045
	} else
L
Linus Torvalds 已提交
1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061
		needed = SCTP_DEFAULT_MAXWINDOW;

	freed = 0;

	if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
		freed = sctp_ulpq_renege_order(ulpq, needed);
		if (freed < needed) {
			freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
		}
	}
	/* If able to free enough room, accept this chunk. */
	if (chunk && (freed >= needed)) {
		__u32 tsn;
		tsn = ntohl(chunk->subh.data_hdr->tsn);
		sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn);
		sctp_ulpq_tail_data(ulpq, chunk, gfp);
1062

L
Linus Torvalds 已提交
1063 1064 1065
		sctp_ulpq_partial_delivery(ulpq, chunk, gfp);
	}

1066
	sk_mem_reclaim(asoc->base.sk);
L
Linus Torvalds 已提交
1067 1068 1069 1070 1071 1072 1073 1074
	return;
}



/* Notify the application if an association is aborted and in
 * partial delivery mode.  Send up any pending received messages.
 */
A
Al Viro 已提交
1075
void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
L
Linus Torvalds 已提交
1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
{
	struct sctp_ulpevent *ev = NULL;
	struct sock *sk;

	if (!ulpq->pd_mode)
		return;

	sk = ulpq->asoc->base.sk;
	if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
				       &sctp_sk(sk)->subscribe))
		ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
					      SCTP_PARTIAL_DELIVERY_ABORTED,
					      gfp);
	if (ev)
		__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));

	/* If there is data waiting, send it up the socket now. */
	if (sctp_ulpq_clear_pd(ulpq) || ev)
		sk->sk_data_ready(sk, 0);
}