ulpqueue.c 29.0 KB
Newer Older
1
/* SCTP kernel implementation
L
Linus Torvalds 已提交
2 3 4 5 6 7 8 9 10
 * (C) Copyright IBM Corp. 2001, 2004
 * Copyright (c) 1999-2000 Cisco, Inc.
 * Copyright (c) 1999-2001 Motorola, Inc.
 * Copyright (c) 2001 Intel Corp.
 * Copyright (c) 2001 Nokia, Inc.
 * Copyright (c) 2001 La Monte H.P. Yarroll
 *
 * This abstraction carries sctp events to the ULP (sockets).
 *
11
 * This SCTP implementation is free software;
L
Linus Torvalds 已提交
12 13 14 15 16
 * you can redistribute it and/or modify it under the terms of
 * the GNU General Public License as published by
 * the Free Software Foundation; either version 2, or (at your option)
 * any later version.
 *
17
 * This SCTP implementation is distributed in the hope that it
L
Linus Torvalds 已提交
18 19 20 21 22 23
 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
 *                 ************************
 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
 * See the GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
24 25
 * along with GNU CC; see the file COPYING.  If not, see
 * <http://www.gnu.org/licenses/>.
L
Linus Torvalds 已提交
26 27 28
 *
 * Please send any bug reports or fixes you make to the
 * email address(es):
29
 *    lksctp developers <linux-sctp@vger.kernel.org>
L
Linus Torvalds 已提交
30 31 32 33 34 35 36
 *
 * Written or modified by:
 *    Jon Grimm             <jgrimm@us.ibm.com>
 *    La Monte H.P. Yarroll <piggy@acm.org>
 *    Sridhar Samudrala     <sri@us.ibm.com>
 */

37
#include <linux/slab.h>
L
Linus Torvalds 已提交
38 39 40
#include <linux/types.h>
#include <linux/skbuff.h>
#include <net/sock.h>
41
#include <net/busy_poll.h>
L
Linus Torvalds 已提交
42 43 44 45 46
#include <net/sctp/structs.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>

/* Forward declarations for internal helpers.  */
47
static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
D
David S. Miller 已提交
48
					      struct sctp_ulpevent *);
49
static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,
D
David S. Miller 已提交
50
					      struct sctp_ulpevent *);
51
static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
L
Linus Torvalds 已提交
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70

/* 1st Level Abstractions */

/* Initialize a ULP queue from a block of memory.  */
struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
				 struct sctp_association *asoc)
{
	memset(ulpq, 0, sizeof(struct sctp_ulpq));

	ulpq->asoc = asoc;
	skb_queue_head_init(&ulpq->reasm);
	skb_queue_head_init(&ulpq->lobby);
	ulpq->pd_mode  = 0;

	return ulpq;
}


/* Flush the reassembly and ordering queues.  */
71
void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
L
Linus Torvalds 已提交
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
{
	struct sk_buff *skb;
	struct sctp_ulpevent *event;

	while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
		event = sctp_skb2event(skb);
		sctp_ulpevent_free(event);
	}

	while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
		event = sctp_skb2event(skb);
		sctp_ulpevent_free(event);
	}

}

/* Dispose of a ulpqueue.  */
void sctp_ulpq_free(struct sctp_ulpq *ulpq)
{
	sctp_ulpq_flush(ulpq);
}

/* Process an incoming DATA chunk.  */
int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
A
Al Viro 已提交
96
			gfp_t gfp)
L
Linus Torvalds 已提交
97 98 99
{
	struct sk_buff_head temp;
	struct sctp_ulpevent *event;
100
	int event_eor = 0;
L
Linus Torvalds 已提交
101 102 103 104 105 106 107 108 109 110

	/* Create an event from the incoming chunk. */
	event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
	if (!event)
		return -ENOMEM;

	/* Do reassembly if needed.  */
	event = sctp_ulpq_reasm(ulpq, event);

	/* Do ordering if needed.  */
111
	if ((event) && (event->msg_flags & MSG_EOR)) {
L
Linus Torvalds 已提交
112 113 114 115 116 117 118
		/* Create a temporary list to collect chunks on.  */
		skb_queue_head_init(&temp);
		__skb_queue_tail(&temp, sctp_event2skb(event));

		event = sctp_ulpq_order(ulpq, event);
	}

D
David S. Miller 已提交
119 120 121
	/* Send event to the ULP.  'event' is the sctp_ulpevent for
	 * very first SKB on the 'temp' list.
	 */
122 123
	if (event) {
		event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
L
Linus Torvalds 已提交
124
		sctp_ulpq_tail_event(ulpq, event);
125
	}
L
Linus Torvalds 已提交
126

127
	return event_eor;
L
Linus Torvalds 已提交
128 129 130 131 132 133
}

/* Add a new event for propagation to the ULP.  */
/* Clear the partial delivery mode for this socket.   Note: This
 * assumes that no association is currently in partial delivery mode.
 */
134
int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
L
Linus Torvalds 已提交
135 136 137
{
	struct sctp_sock *sp = sctp_sk(sk);

138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
	if (atomic_dec_and_test(&sp->pd_mode)) {
		/* This means there are no other associations in PD, so
		 * we can go ahead and clear out the lobby in one shot
		 */
		if (!skb_queue_empty(&sp->pd_lobby)) {
			struct list_head *list;
			sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
			list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
			INIT_LIST_HEAD(list);
			return 1;
		}
	} else {
		/* There are other associations in PD, so we only need to
		 * pull stuff out of the lobby that belongs to the
		 * associations that is exiting PD (all of its notifications
		 * are posted here).
		 */
		if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
			struct sk_buff *skb, *tmp;
			struct sctp_ulpevent *event;

			sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
				event = sctp_skb2event(skb);
				if (event->asoc == asoc) {
					__skb_unlink(skb, &sp->pd_lobby);
					__skb_queue_tail(&sk->sk_receive_queue,
							 skb);
				}
			}
		}
L
Linus Torvalds 已提交
168
	}
169

L
Linus Torvalds 已提交
170 171 172
	return 0;
}

173 174 175 176 177 178 179 180 181
/* Set the pd_mode on the socket and ulpq */
static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
{
	struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);

	atomic_inc(&sp->pd_mode);
	ulpq->pd_mode = 1;
}

L
Linus Torvalds 已提交
182 183 184 185
/* Clear the pd_mode and restart any pending messages waiting for delivery. */
static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
{
	ulpq->pd_mode = 0;
186
	sctp_ulpq_reasm_drain(ulpq);
187
	return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
L
Linus Torvalds 已提交
188 189
}

D
David S. Miller 已提交
190 191 192
/* If the SKB of 'event' is on a list, it is the first such member
 * of that list.
 */
L
Linus Torvalds 已提交
193 194 195
int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
{
	struct sock *sk = ulpq->asoc->base.sk;
D
David S. Miller 已提交
196 197
	struct sk_buff_head *queue, *skb_list;
	struct sk_buff *skb = sctp_event2skb(event);
L
Linus Torvalds 已提交
198 199
	int clear_pd = 0;

D
David S. Miller 已提交
200 201
	skb_list = (struct sk_buff_head *) skb->prev;

L
Linus Torvalds 已提交
202 203 204 205 206 207
	/* If the socket is just going to throw this away, do not
	 * even try to deliver it.
	 */
	if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN))
		goto out_free;

E
Eric Dumazet 已提交
208
	if (!sctp_ulpevent_is_notification(event)) {
209
		sk_mark_napi_id(sk, skb);
E
Eric Dumazet 已提交
210 211
		sk_incoming_cpu_update(sk);
	}
L
Linus Torvalds 已提交
212 213 214 215 216 217 218 219 220
	/* Check if the user wishes to receive this event.  */
	if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
		goto out_free;

	/* If we are in partial delivery mode, post to the lobby until
	 * partial delivery is cleared, unless, of course _this_ is
	 * the association the cause of the partial delivery.
	 */

221
	if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) {
L
Linus Torvalds 已提交
222
		queue = &sk->sk_receive_queue;
223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
	} else {
		if (ulpq->pd_mode) {
			/* If the association is in partial delivery, we
			 * need to finish delivering the partially processed
			 * packet before passing any other data.  This is
			 * because we don't truly support stream interleaving.
			 */
			if ((event->msg_flags & MSG_NOTIFICATION) ||
			    (SCTP_DATA_NOT_FRAG ==
				    (event->msg_flags & SCTP_DATA_FRAG_MASK)))
				queue = &sctp_sk(sk)->pd_lobby;
			else {
				clear_pd = event->msg_flags & MSG_EOR;
				queue = &sk->sk_receive_queue;
			}
		} else {
			/*
			 * If fragment interleave is enabled, we
L
Lucas De Marchi 已提交
241
			 * can queue this to the receive queue instead
242 243 244 245 246 247
			 * of the lobby.
			 */
			if (sctp_sk(sk)->frag_interleave)
				queue = &sk->sk_receive_queue;
			else
				queue = &sctp_sk(sk)->pd_lobby;
L
Linus Torvalds 已提交
248
		}
249
	}
L
Linus Torvalds 已提交
250 251 252 253

	/* If we are harvesting multiple skbs they will be
	 * collected on a list.
	 */
D
David S. Miller 已提交
254 255
	if (skb_list)
		sctp_skb_list_tail(skb_list, queue);
L
Linus Torvalds 已提交
256
	else
D
David S. Miller 已提交
257
		__skb_queue_tail(queue, skb);
L
Linus Torvalds 已提交
258 259 260 261 262 263 264 265 266

	/* Did we just complete partial delivery and need to get
	 * rolling again?  Move pending data to the receive
	 * queue.
	 */
	if (clear_pd)
		sctp_ulpq_clear_pd(ulpq);

	if (queue == &sk->sk_receive_queue)
267
		sk->sk_data_ready(sk);
L
Linus Torvalds 已提交
268 269 270
	return 1;

out_free:
D
David S. Miller 已提交
271 272
	if (skb_list)
		sctp_queue_purge_ulpevents(skb_list);
L
Linus Torvalds 已提交
273 274
	else
		sctp_ulpevent_free(event);
D
David S. Miller 已提交
275

L
Linus Torvalds 已提交
276 277 278 279 280 281
	return 0;
}

/* 2nd Level Abstractions */

/* Helper function to store chunks that need to be reassembled.  */
282
static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
L
Linus Torvalds 已提交
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
					 struct sctp_ulpevent *event)
{
	struct sk_buff *pos;
	struct sctp_ulpevent *cevent;
	__u32 tsn, ctsn;

	tsn = event->tsn;

	/* See if it belongs at the end. */
	pos = skb_peek_tail(&ulpq->reasm);
	if (!pos) {
		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
		return;
	}

	/* Short circuit just dropping it at the end. */
	cevent = sctp_skb2event(pos);
	ctsn = cevent->tsn;
	if (TSN_lt(ctsn, tsn)) {
		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
		return;
	}

	/* Find the right place in this list. We store them by TSN.  */
	skb_queue_walk(&ulpq->reasm, pos) {
		cevent = sctp_skb2event(pos);
		ctsn = cevent->tsn;

		if (TSN_lt(tsn, ctsn))
			break;
	}

	/* Insert before pos. */
316
	__skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
L
Linus Torvalds 已提交
317 318 319 320 321 322 323 324 325 326

}

/* Helper function to return an event corresponding to the reassembled
 * datagram.
 * This routine creates a re-assembled skb given the first and last skb's
 * as stored in the reassembly queue. The skb's may be non-linear if the sctp
 * payload was fragmented on the way and ip had to reassemble them.
 * We add the rest of skb's to the first skb's fraglist.
 */
327 328 329
static struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
	struct sk_buff_head *queue, struct sk_buff *f_frag,
	struct sk_buff *l_frag)
L
Linus Torvalds 已提交
330 331
{
	struct sk_buff *pos;
332
	struct sk_buff *new = NULL;
L
Linus Torvalds 已提交
333 334 335 336 337 338 339 340 341 342 343
	struct sctp_ulpevent *event;
	struct sk_buff *pnext, *last;
	struct sk_buff *list = skb_shinfo(f_frag)->frag_list;

	/* Store the pointer to the 2nd skb */
	if (f_frag == l_frag)
		pos = NULL;
	else
		pos = f_frag->next;

	/* Get the last skb in the f_frag's frag_list if present. */
344 345
	for (last = list; list; last = list, list = list->next)
		;
L
Linus Torvalds 已提交
346 347 348 349 350 351

	/* Add the list of remaining fragments to the first fragments
	 * frag_list.
	 */
	if (last)
		last->next = pos;
352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
	else {
		if (skb_cloned(f_frag)) {
			/* This is a cloned skb, we can't just modify
			 * the frag_list.  We need a new skb to do that.
			 * Instead of calling skb_unshare(), we'll do it
			 * ourselves since we need to delay the free.
			 */
			new = skb_copy(f_frag, GFP_ATOMIC);
			if (!new)
				return NULL;	/* try again later */

			sctp_skb_set_owner_r(new, f_frag->sk);

			skb_shinfo(new)->frag_list = pos;
		} else
			skb_shinfo(f_frag)->frag_list = pos;
	}
L
Linus Torvalds 已提交
369 370

	/* Remove the first fragment from the reassembly queue.  */
D
David S. Miller 已提交
371
	__skb_unlink(f_frag, queue);
372

373 374 375 376 377
	/* if we did unshare, then free the old skb and re-assign */
	if (new) {
		kfree_skb(f_frag);
		f_frag = new;
	}
378

L
Linus Torvalds 已提交
379 380 381 382 383 384 385 386 387
	while (pos) {

		pnext = pos->next;

		/* Update the len and data_len fields of the first fragment. */
		f_frag->len += pos->len;
		f_frag->data_len += pos->len;

		/* Remove the fragment from the reassembly queue.  */
D
David S. Miller 已提交
388
		__skb_unlink(pos, queue);
389

L
Linus Torvalds 已提交
390 391 392 393 394
		/* Break if we have reached the last fragment.  */
		if (pos == l_frag)
			break;
		pos->next = pnext;
		pos = pnext;
395
	}
L
Linus Torvalds 已提交
396 397

	event = sctp_skb2event(f_frag);
398
	SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
L
Linus Torvalds 已提交
399 400 401 402 403 404 405 406

	return event;
}


/* Helper function to check if an incoming chunk has filled up the last
 * missing fragment in a SCTP datagram and return the corresponding event.
 */
407
static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
L
Linus Torvalds 已提交
408 409 410 411 412 413
{
	struct sk_buff *pos;
	struct sctp_ulpevent *cevent;
	struct sk_buff *first_frag = NULL;
	__u32 ctsn, next_tsn;
	struct sctp_ulpevent *retval = NULL;
414 415 416 417 418
	struct sk_buff *pd_first = NULL;
	struct sk_buff *pd_last = NULL;
	size_t pd_len = 0;
	struct sctp_association *asoc;
	u32 pd_point;
L
Linus Torvalds 已提交
419 420 421 422 423 424 425 426 427 428 429 430 431 432 433

	/* Initialized to 0 just to avoid compiler warning message.  Will
	 * never be used with this value. It is referenced only after it
	 * is set when we find the first fragment of a message.
	 */
	next_tsn = 0;

	/* The chunks are held in the reasm queue sorted by TSN.
	 * Walk through the queue sequentially and look for a sequence of
	 * fragmented chunks that complete a datagram.
	 * 'first_frag' and next_tsn are reset when we find a chunk which
	 * is the first fragment of a datagram. Once these 2 fields are set
	 * we expect to find the remaining middle fragments and the last
	 * fragment in order. If not, first_frag is reset to NULL and we
	 * start the next pass when we find another first fragment.
434 435 436 437
	 *
	 * There is a potential to do partial delivery if user sets
	 * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
	 * to see if can do PD.
L
Linus Torvalds 已提交
438 439 440 441 442 443 444
	 */
	skb_queue_walk(&ulpq->reasm, pos) {
		cevent = sctp_skb2event(pos);
		ctsn = cevent->tsn;

		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
		case SCTP_DATA_FIRST_FRAG:
445 446 447 448 449 450 451 452 453 454 455 456 457 458
			/* If this "FIRST_FRAG" is the first
			 * element in the queue, then count it towards
			 * possible PD.
			 */
			if (pos == ulpq->reasm.next) {
			    pd_first = pos;
			    pd_last = pos;
			    pd_len = pos->len;
			} else {
			    pd_first = NULL;
			    pd_last = NULL;
			    pd_len = 0;
			}

L
Linus Torvalds 已提交
459 460 461 462 463
			first_frag = pos;
			next_tsn = ctsn + 1;
			break;

		case SCTP_DATA_MIDDLE_FRAG:
464
			if ((first_frag) && (ctsn == next_tsn)) {
L
Linus Torvalds 已提交
465
				next_tsn++;
466 467 468 469 470
				if (pd_first) {
				    pd_last = pos;
				    pd_len += pos->len;
				}
			} else
L
Linus Torvalds 已提交
471 472 473 474 475 476 477 478 479
				first_frag = NULL;
			break;

		case SCTP_DATA_LAST_FRAG:
			if (first_frag && (ctsn == next_tsn))
				goto found;
			else
				first_frag = NULL;
			break;
480
		}
481 482 483 484 485 486 487 488 489 490 491 492
	}

	asoc = ulpq->asoc;
	if (pd_first) {
		/* Make sure we can enter partial deliver.
		 * We can trigger partial delivery only if framgent
		 * interleave is set, or the socket is not already
		 * in  partial delivery.
		 */
		if (!sctp_sk(asoc->base.sk)->frag_interleave &&
		    atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
			goto done;
L
Linus Torvalds 已提交
493

494 495 496
		cevent = sctp_skb2event(pd_first);
		pd_point = sctp_sk(asoc->base.sk)->pd_point;
		if (pd_point && pd_point <= pd_len) {
497 498
			retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
							     &ulpq->reasm,
499 500 501 502 503
							     pd_first,
							     pd_last);
			if (retval)
				sctp_ulpq_set_pd(ulpq);
		}
L
Linus Torvalds 已提交
504 505 506 507
	}
done:
	return retval;
found:
508 509
	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
					     &ulpq->reasm, first_frag, pos);
L
Linus Torvalds 已提交
510 511 512 513 514 515
	if (retval)
		retval->msg_flags |= MSG_EOR;
	goto done;
}

/* Retrieve the next set of fragments of a partial message. */
516
static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
L
Linus Torvalds 已提交
517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541
{
	struct sk_buff *pos, *last_frag, *first_frag;
	struct sctp_ulpevent *cevent;
	__u32 ctsn, next_tsn;
	int is_last;
	struct sctp_ulpevent *retval;

	/* The chunks are held in the reasm queue sorted by TSN.
	 * Walk through the queue sequentially and look for the first
	 * sequence of fragmented chunks.
	 */

	if (skb_queue_empty(&ulpq->reasm))
		return NULL;

	last_frag = first_frag = NULL;
	retval = NULL;
	next_tsn = 0;
	is_last = 0;

	skb_queue_walk(&ulpq->reasm, pos) {
		cevent = sctp_skb2event(pos);
		ctsn = cevent->tsn;

		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
542 543 544 545
		case SCTP_DATA_FIRST_FRAG:
			if (!first_frag)
				return NULL;
			goto done;
L
Linus Torvalds 已提交
546 547 548 549 550
		case SCTP_DATA_MIDDLE_FRAG:
			if (!first_frag) {
				first_frag = pos;
				next_tsn = ctsn + 1;
				last_frag = pos;
551
			} else if (next_tsn == ctsn) {
L
Linus Torvalds 已提交
552
				next_tsn++;
553 554
				last_frag = pos;
			} else
L
Linus Torvalds 已提交
555 556 557 558 559 560 561 562 563 564 565 566
				goto done;
			break;
		case SCTP_DATA_LAST_FRAG:
			if (!first_frag)
				first_frag = pos;
			else if (ctsn != next_tsn)
				goto done;
			last_frag = pos;
			is_last = 1;
			goto done;
		default:
			return NULL;
567
		}
L
Linus Torvalds 已提交
568 569 570 571 572 573
	}

	/* We have the reassembled event. There is no need to look
	 * further.
	 */
done:
574 575
	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
					&ulpq->reasm, first_frag, last_frag);
L
Linus Torvalds 已提交
576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
	if (retval && is_last)
		retval->msg_flags |= MSG_EOR;

	return retval;
}


/* Helper function to reassemble chunks.  Hold chunks on the reasm queue that
 * need reassembling.
 */
static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
						struct sctp_ulpevent *event)
{
	struct sctp_ulpevent *retval = NULL;

	/* Check if this is part of a fragmented message.  */
	if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
		event->msg_flags |= MSG_EOR;
		return event;
	}

	sctp_ulpq_store_reasm(ulpq, event);
	if (!ulpq->pd_mode)
		retval = sctp_ulpq_retrieve_reassembled(ulpq);
	else {
		__u32 ctsn, ctsnap;

		/* Do not even bother unless this is the next tsn to
		 * be delivered.
		 */
		ctsn = event->tsn;
		ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
		if (TSN_lte(ctsn, ctsnap))
			retval = sctp_ulpq_retrieve_partial(ulpq);
	}

	return retval;
}

/* Retrieve the first part (sequential fragments) for partial delivery.  */
616
static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
L
Linus Torvalds 已提交
617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657
{
	struct sk_buff *pos, *last_frag, *first_frag;
	struct sctp_ulpevent *cevent;
	__u32 ctsn, next_tsn;
	struct sctp_ulpevent *retval;

	/* The chunks are held in the reasm queue sorted by TSN.
	 * Walk through the queue sequentially and look for a sequence of
	 * fragmented chunks that start a datagram.
	 */

	if (skb_queue_empty(&ulpq->reasm))
		return NULL;

	last_frag = first_frag = NULL;
	retval = NULL;
	next_tsn = 0;

	skb_queue_walk(&ulpq->reasm, pos) {
		cevent = sctp_skb2event(pos);
		ctsn = cevent->tsn;

		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
		case SCTP_DATA_FIRST_FRAG:
			if (!first_frag) {
				first_frag = pos;
				next_tsn = ctsn + 1;
				last_frag = pos;
			} else
				goto done;
			break;

		case SCTP_DATA_MIDDLE_FRAG:
			if (!first_frag)
				return NULL;
			if (ctsn == next_tsn) {
				next_tsn++;
				last_frag = pos;
			} else
				goto done;
			break;
658 659 660 661 662 663 664 665

		case SCTP_DATA_LAST_FRAG:
			if (!first_frag)
				return NULL;
			else
				goto done;
			break;

L
Linus Torvalds 已提交
666 667
		default:
			return NULL;
668
		}
L
Linus Torvalds 已提交
669 670 671 672 673 674
	}

	/* We have the reassembled event. There is no need to look
	 * further.
	 */
done:
675 676
	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
					&ulpq->reasm, first_frag, last_frag);
L
Linus Torvalds 已提交
677 678 679
	return retval;
}

680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719
/*
 * Flush out stale fragments from the reassembly queue when processing
 * a Forward TSN.
 *
 * RFC 3758, Section 3.6
 *
 * After receiving and processing a FORWARD TSN, the data receiver MUST
 * take cautions in updating its re-assembly queue.  The receiver MUST
 * remove any partially reassembled message, which is still missing one
 * or more TSNs earlier than or equal to the new cumulative TSN point.
 * In the event that the receiver has invoked the partial delivery API,
 * a notification SHOULD also be generated to inform the upper layer API
 * that the message being partially delivered will NOT be completed.
 */
void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
{
	struct sk_buff *pos, *tmp;
	struct sctp_ulpevent *event;
	__u32 tsn;

	if (skb_queue_empty(&ulpq->reasm))
		return;

	skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
		event = sctp_skb2event(pos);
		tsn = event->tsn;

		/* Since the entire message must be abandoned by the
		 * sender (item A3 in Section 3.5, RFC 3758), we can
		 * free all fragments on the list that are less then
		 * or equal to ctsn_point
		 */
		if (TSN_lte(tsn, fwd_tsn)) {
			__skb_unlink(pos, &ulpq->reasm);
			sctp_ulpevent_free(event);
		} else
			break;
	}
}

720 721 722 723 724 725 726 727 728 729 730 731 732 733 734
/*
 * Drain the reassembly queue.  If we just cleared parted delivery, it
 * is possible that the reassembly queue will contain already reassembled
 * messages.  Retrieve any such messages and give them to the user.
 */
static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
{
	struct sctp_ulpevent *event = NULL;
	struct sk_buff_head temp;

	if (skb_queue_empty(&ulpq->reasm))
		return;

	while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
		/* Do ordering if needed.  */
735
		if ((event) && (event->msg_flags & MSG_EOR)) {
736 737 738 739 740 741 742 743 744 745 746 747 748 749 750
			skb_queue_head_init(&temp);
			__skb_queue_tail(&temp, sctp_event2skb(event));

			event = sctp_ulpq_order(ulpq, event);
		}

		/* Send event to the ULP.  'event' is the
		 * sctp_ulpevent for  very first SKB on the  temp' list.
		 */
		if (event)
			sctp_ulpq_tail_event(ulpq, event);
	}
}


L
Linus Torvalds 已提交
751 752 753
/* Helper function to gather skbs that have possibly become
 * ordered by an an incoming chunk.
 */
754
static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
L
Linus Torvalds 已提交
755 756
					      struct sctp_ulpevent *event)
{
D
David S. Miller 已提交
757
	struct sk_buff_head *event_list;
L
Linus Torvalds 已提交
758 759 760
	struct sk_buff *pos, *tmp;
	struct sctp_ulpevent *cevent;
	struct sctp_stream *in;
761
	__u16 sid, csid, cssn;
L
Linus Torvalds 已提交
762 763 764 765

	sid = event->stream;
	in  = &ulpq->asoc->ssnmap->in;

D
David S. Miller 已提交
766 767
	event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;

L
Linus Torvalds 已提交
768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787
	/* We are holding the chunks by stream, by SSN.  */
	sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
		cevent = (struct sctp_ulpevent *) pos->cb;
		csid = cevent->stream;
		cssn = cevent->ssn;

		/* Have we gone too far?  */
		if (csid > sid)
			break;

		/* Have we not gone far enough?  */
		if (csid < sid)
			continue;

		if (cssn != sctp_ssn_peek(in, sid))
			break;

		/* Found it, so mark in the ssnmap. */
		sctp_ssn_next(in, sid);

D
David S. Miller 已提交
788
		__skb_unlink(pos, &ulpq->lobby);
L
Linus Torvalds 已提交
789 790

		/* Attach all gathered skbs to the event.  */
D
David S. Miller 已提交
791
		__skb_queue_tail(event_list, pos);
L
Linus Torvalds 已提交
792 793 794 795
	}
}

/* Helper function to store chunks needing ordering.  */
796
static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
L
Linus Torvalds 已提交
797 798 799 800 801 802 803 804 805 806 807 808 809 810 811
					   struct sctp_ulpevent *event)
{
	struct sk_buff *pos;
	struct sctp_ulpevent *cevent;
	__u16 sid, csid;
	__u16 ssn, cssn;

	pos = skb_peek_tail(&ulpq->lobby);
	if (!pos) {
		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
		return;
	}

	sid = event->stream;
	ssn = event->ssn;
812

L
Linus Torvalds 已提交
813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841
	cevent = (struct sctp_ulpevent *) pos->cb;
	csid = cevent->stream;
	cssn = cevent->ssn;
	if (sid > csid) {
		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
		return;
	}

	if ((sid == csid) && SSN_lt(cssn, ssn)) {
		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
		return;
	}

	/* Find the right place in this list.  We store them by
	 * stream ID and then by SSN.
	 */
	skb_queue_walk(&ulpq->lobby, pos) {
		cevent = (struct sctp_ulpevent *) pos->cb;
		csid = cevent->stream;
		cssn = cevent->ssn;

		if (csid > sid)
			break;
		if (csid == sid && SSN_lt(ssn, cssn))
			break;
	}


	/* Insert before pos. */
842
	__skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
L
Linus Torvalds 已提交
843 844 845
}

static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
D
David S. Miller 已提交
846
					     struct sctp_ulpevent *event)
L
Linus Torvalds 已提交
847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882
{
	__u16 sid, ssn;
	struct sctp_stream *in;

	/* Check if this message needs ordering.  */
	if (SCTP_DATA_UNORDERED & event->msg_flags)
		return event;

	/* Note: The stream ID must be verified before this routine.  */
	sid = event->stream;
	ssn = event->ssn;
	in  = &ulpq->asoc->ssnmap->in;

	/* Is this the expected SSN for this stream ID?  */
	if (ssn != sctp_ssn_peek(in, sid)) {
		/* We've received something out of order, so find where it
		 * needs to be placed.  We order by stream and then by SSN.
		 */
		sctp_ulpq_store_ordered(ulpq, event);
		return NULL;
	}

	/* Mark that the next chunk has been found.  */
	sctp_ssn_next(in, sid);

	/* Go find any other chunks that were waiting for
	 * ordering.
	 */
	sctp_ulpq_retrieve_ordered(ulpq, event);

	return event;
}

/* Helper function to gather skbs that have possibly become
 * ordered by forward tsn skipping their dependencies.
 */
883
static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
L
Linus Torvalds 已提交
884 885 886
{
	struct sk_buff *pos, *tmp;
	struct sctp_ulpevent *cevent;
D
David S. Miller 已提交
887
	struct sctp_ulpevent *event;
L
Linus Torvalds 已提交
888 889
	struct sctp_stream *in;
	struct sk_buff_head temp;
890
	struct sk_buff_head *lobby = &ulpq->lobby;
L
Linus Torvalds 已提交
891 892 893 894 895
	__u16 csid, cssn;

	in  = &ulpq->asoc->ssnmap->in;

	/* We are holding the chunks by stream, by SSN.  */
D
David S. Miller 已提交
896 897
	skb_queue_head_init(&temp);
	event = NULL;
898
	sctp_skb_for_each(pos, lobby, tmp) {
L
Linus Torvalds 已提交
899 900 901 902
		cevent = (struct sctp_ulpevent *) pos->cb;
		csid = cevent->stream;
		cssn = cevent->ssn;

903 904
		/* Have we gone too far?  */
		if (csid > sid)
L
Linus Torvalds 已提交
905 906
			break;

907 908 909 910 911
		/* Have we not gone far enough?  */
		if (csid < sid)
			continue;

		/* see if this ssn has been marked by skipping */
912
		if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
913
			break;
L
Linus Torvalds 已提交
914

915
		__skb_unlink(pos, lobby);
916
		if (!event)
L
Linus Torvalds 已提交
917 918
			/* Create a temporary list to collect chunks on.  */
			event = sctp_skb2event(pos);
919 920 921

		/* Attach all gathered skbs to the event.  */
		__skb_queue_tail(&temp, pos);
L
Linus Torvalds 已提交
922 923
	}

924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939
	/* If we didn't reap any data, see if the next expected SSN
	 * is next on the queue and if so, use that.
	 */
	if (event == NULL && pos != (struct sk_buff *)lobby) {
		cevent = (struct sctp_ulpevent *) pos->cb;
		csid = cevent->stream;
		cssn = cevent->ssn;

		if (csid == sid && cssn == sctp_ssn_peek(in, csid)) {
			sctp_ssn_next(in, csid);
			__skb_unlink(pos, lobby);
			__skb_queue_tail(&temp, pos);
			event = sctp_skb2event(pos);
		}
	}

D
David S. Miller 已提交
940 941 942
	/* Send event to the ULP.  'event' is the sctp_ulpevent for
	 * very first SKB on the 'temp' list.
	 */
943 944 945
	if (event) {
		/* see if we have more ordered that we can deliver */
		sctp_ulpq_retrieve_ordered(ulpq, event);
L
Linus Torvalds 已提交
946
		sctp_ulpq_tail_event(ulpq, event);
947
	}
L
Linus Torvalds 已提交
948 949
}

950 951 952
/* Skip over an SSN. This is used during the processing of
 * Forwared TSN chunk to skip over the abandoned ordered data
 */
L
Linus Torvalds 已提交
953 954 955 956 957 958 959 960 961 962 963 964 965 966 967
void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
{
	struct sctp_stream *in;

	/* Note: The stream ID must be verified before this routine.  */
	in  = &ulpq->asoc->ssnmap->in;

	/* Is this an old SSN?  If so ignore. */
	if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))
		return;

	/* Mark that we are no longer expecting this SSN or lower. */
	sctp_ssn_skip(in, sid, ssn);

	/* Go find any other chunks that were waiting for
968
	 * ordering and deliver them if needed.
L
Linus Torvalds 已提交
969
	 */
970
	sctp_ulpq_reap_ordered(ulpq, sid);
L
Linus Torvalds 已提交
971 972
}

973 974
static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
		struct sk_buff_head *list, __u16 needed)
L
Linus Torvalds 已提交
975 976
{
	__u16 freed = 0;
977 978
	__u32 tsn, last_tsn;
	struct sk_buff *skb, *flist, *last;
L
Linus Torvalds 已提交
979 980 981 982 983
	struct sctp_ulpevent *event;
	struct sctp_tsnmap *tsnmap;

	tsnmap = &ulpq->asoc->peer.tsn_map;

984
	while ((skb = skb_peek_tail(list)) != NULL) {
L
Linus Torvalds 已提交
985 986 987
		event = sctp_skb2event(skb);
		tsn = event->tsn;

988 989 990 991
		/* Don't renege below the Cumulative TSN ACK Point. */
		if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
			break;

992 993 994 995
		/* Events in ordering queue may have multiple fragments
		 * corresponding to additional TSNs.  Sum the total
		 * freed space; find the last TSN.
		 */
996
		freed += skb_headlen(skb);
997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008
		flist = skb_shinfo(skb)->frag_list;
		for (last = flist; flist; flist = flist->next) {
			last = flist;
			freed += skb_headlen(last);
		}
		if (last)
			last_tsn = sctp_skb2event(last)->tsn;
		else
			last_tsn = tsn;

		/* Unlink the event, then renege all applicable TSNs. */
		__skb_unlink(skb, list);
L
Linus Torvalds 已提交
1009
		sctp_ulpevent_free(event);
1010 1011 1012 1013
		while (TSN_lte(tsn, last_tsn)) {
			sctp_tsnmap_renege(tsnmap, tsn);
			tsn++;
		}
L
Linus Torvalds 已提交
1014 1015 1016 1017 1018 1019 1020
		if (freed >= needed)
			return freed;
	}

	return freed;
}

1021 1022 1023 1024 1025 1026
/* Renege 'needed' bytes from the ordering queue. */
static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
{
	return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
}

L
Linus Torvalds 已提交
1027 1028 1029
/* Renege 'needed' bytes from the reassembly queue. */
static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
{
1030
	return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
L
Linus Torvalds 已提交
1031 1032 1033 1034
}

/* Partial deliver the first message as there is pressure on rwnd. */
void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
A
Al Viro 已提交
1035
				gfp_t gfp)
L
Linus Torvalds 已提交
1036 1037 1038
{
	struct sctp_ulpevent *event;
	struct sctp_association *asoc;
1039
	struct sctp_sock *sp;
1040 1041
	__u32 ctsn;
	struct sk_buff *skb;
L
Linus Torvalds 已提交
1042 1043

	asoc = ulpq->asoc;
1044
	sp = sctp_sk(asoc->base.sk);
L
Linus Torvalds 已提交
1045

1046
	/* If the association is already in Partial Delivery mode
1047
	 * we have nothing to do.
1048 1049 1050
	 */
	if (ulpq->pd_mode)
		return;
L
Linus Torvalds 已提交
1051

1052 1053 1054 1055 1056 1057 1058 1059 1060 1061
	/* Data must be at or below the Cumulative TSN ACK Point to
	 * start partial delivery.
	 */
	skb = skb_peek(&asoc->ulpq.reasm);
	if (skb != NULL) {
		ctsn = sctp_skb2event(skb)->tsn;
		if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
			return;
	}

1062 1063 1064 1065 1066 1067
	/* If the user enabled fragment interleave socket option,
	 * multiple associations can enter partial delivery.
	 * Otherwise, we can only enter partial delivery if the
	 * socket is not in partial deliver mode.
	 */
	if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
L
Linus Torvalds 已提交
1068 1069 1070 1071 1072
		/* Is partial delivery possible?  */
		event = sctp_ulpq_retrieve_first(ulpq);
		/* Send event to the ULP.   */
		if (event) {
			sctp_ulpq_tail_event(ulpq, event);
1073
			sctp_ulpq_set_pd(ulpq);
L
Linus Torvalds 已提交
1074 1075 1076 1077 1078 1079 1080
			return;
		}
	}
}

/* Renege some packets to make room for an incoming chunk.  */
void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
A
Al Viro 已提交
1081
		      gfp_t gfp)
L
Linus Torvalds 已提交
1082 1083 1084 1085 1086 1087 1088 1089 1090
{
	struct sctp_association *asoc;
	__u16 needed, freed;

	asoc = ulpq->asoc;

	if (chunk) {
		needed = ntohs(chunk->chunk_hdr->length);
		needed -= sizeof(sctp_data_chunk_t);
1091
	} else
L
Linus Torvalds 已提交
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
		needed = SCTP_DEFAULT_MAXWINDOW;

	freed = 0;

	if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
		freed = sctp_ulpq_renege_order(ulpq, needed);
		if (freed < needed) {
			freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
		}
	}
	/* If able to free enough room, accept this chunk. */
	if (chunk && (freed >= needed)) {
1104 1105 1106 1107 1108 1109 1110 1111 1112 1113
		int retval;
		retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
		/*
		 * Enter partial delivery if chunk has not been
		 * delivered; otherwise, drain the reassembly queue.
		 */
		if (retval <= 0)
			sctp_ulpq_partial_delivery(ulpq, gfp);
		else if (retval == 1)
			sctp_ulpq_reasm_drain(ulpq);
L
Linus Torvalds 已提交
1114 1115
	}

1116
	sk_mem_reclaim(asoc->base.sk);
L
Linus Torvalds 已提交
1117 1118 1119 1120 1121 1122 1123
}



/* Notify the application if an association is aborted and in
 * partial delivery mode.  Send up any pending received messages.
 */
A
Al Viro 已提交
1124
void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
L
Linus Torvalds 已提交
1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
{
	struct sctp_ulpevent *ev = NULL;
	struct sock *sk;

	if (!ulpq->pd_mode)
		return;

	sk = ulpq->asoc->base.sk;
	if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
				       &sctp_sk(sk)->subscribe))
		ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
					      SCTP_PARTIAL_DELIVERY_ABORTED,
					      gfp);
	if (ev)
		__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));

	/* If there is data waiting, send it up the socket now. */
	if (sctp_ulpq_clear_pd(ulpq) || ev)
1143
		sk->sk_data_ready(sk);
L
Linus Torvalds 已提交
1144
}