sch_netem.c 23.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 * net/sched/sch_netem.c	Network emulator
 *
 * 		This program is free software; you can redistribute it and/or
 * 		modify it under the terms of the GNU General Public License
 * 		as published by the Free Software Foundation; either version
7
 * 		2 of the License.
L
Linus Torvalds 已提交
8 9
 *
 *  		Many of the algorithms and ideas for this came from
10
 *		NIST Net which is not copyrighted.
L
Linus Torvalds 已提交
11 12 13 14 15
 *
 * Authors:	Stephen Hemminger <shemminger@osdl.org>
 *		Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
 */

16
#include <linux/mm.h>
L
Linus Torvalds 已提交
17
#include <linux/module.h>
18
#include <linux/slab.h>
L
Linus Torvalds 已提交
19 20 21 22
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
23
#include <linux/vmalloc.h>
L
Linus Torvalds 已提交
24
#include <linux/rtnetlink.h>
25
#include <linux/reciprocal_div.h>
L
Linus Torvalds 已提交
26

27
#include <net/netlink.h>
L
Linus Torvalds 已提交
28 29
#include <net/pkt_sched.h>

30
#define VERSION "1.3"
S
Stephen Hemminger 已提交
31

L
Linus Torvalds 已提交
32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
/*	Network Emulation Queuing algorithm.
	====================================

	Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
		 Network Emulation Tool
		 [2] Luigi Rizzo, DummyNet for FreeBSD

	 ----------------------------------------------------------------

	 This started out as a simple way to delay outgoing packets to
	 test TCP but has grown to include most of the functionality
	 of a full blown network emulator like NISTnet. It can delay
	 packets and add random jitter (and correlation). The random
	 distribution can be loaded from a table as well to provide
	 normal, Pareto, or experimental curves. Packet loss,
	 duplication, and reordering can also be emulated.

	 This qdisc does not do classification that can be handled in
	 layering other disciplines.  It does not need to do bandwidth
	 control either since that can be handled by using token
	 bucket or other rate control.
53 54 55 56 57 58 59 60 61 62 63 64 65 66

     Correlated Loss Generator models

	Added generation of correlated loss according to the
	"Gilbert-Elliot" model, a 4-state markov model.

	References:
	[1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
	[2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
	and intuitive loss model for packet networks and its implementation
	in the Netem module in the Linux kernel", available in [1]

	Authors: Stefano Salsano <stefano.salsano at uniroma2.it
		 Fabio Ludovici <fabio.ludovici at yahoo.it>
L
Linus Torvalds 已提交
67 68 69
*/

struct netem_sched_data {
E
Eric Dumazet 已提交
70 71 72
	/* internal t(ime)fifo qdisc uses sch->q and sch->limit */

	/* optional qdisc for classful handling (NULL at netem init) */
L
Linus Torvalds 已提交
73
	struct Qdisc	*qdisc;
E
Eric Dumazet 已提交
74

75
	struct qdisc_watchdog watchdog;
L
Linus Torvalds 已提交
76

77 78 79
	psched_tdiff_t latency;
	psched_tdiff_t jitter;

L
Linus Torvalds 已提交
80 81 82 83 84
	u32 loss;
	u32 limit;
	u32 counter;
	u32 gap;
	u32 duplicate;
85
	u32 reorder;
86
	u32 corrupt;
H
Hagen Paul Pfeifer 已提交
87
	u32 rate;
88 89 90 91
	s32 packet_overhead;
	u32 cell_size;
	u32 cell_size_reciprocal;
	s32 cell_overhead;
L
Linus Torvalds 已提交
92 93

	struct crndstate {
94 95
		u32 last;
		u32 rho;
96
	} delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
L
Linus Torvalds 已提交
97 98 99 100 101

	struct disttable {
		u32  size;
		s16 table[0];
	} *delay_dist;
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121

	enum  {
		CLG_RANDOM,
		CLG_4_STATES,
		CLG_GILB_ELL,
	} loss_model;

	/* Correlated Loss Generation models */
	struct clgstate {
		/* state of the Markov chain */
		u8 state;

		/* 4-states and Gilbert-Elliot models */
		u32 a1;	/* p13 for 4-states or p for GE */
		u32 a2;	/* p31 for 4-states or r for GE */
		u32 a3;	/* p32 for 4-states or h for GE */
		u32 a4;	/* p14 for 4-states or 1-k for GE */
		u32 a5; /* p23 used only in 4-states */
	} clg;

L
Linus Torvalds 已提交
122 123
};

E
Eric Dumazet 已提交
124 125 126
/* Time stamp put into socket buffer control block
 * Only valid when skbs are in our internal t(ime)fifo queue.
 */
L
Linus Torvalds 已提交
127 128 129 130
struct netem_skb_cb {
	psched_time_t	time_to_send;
};

131 132
static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
{
133
	qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
134
	return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
135 136
}

L
Linus Torvalds 已提交
137 138 139 140 141 142 143 144 145 146 147 148 149
/* init_crandom - initialize correlated random number generator
 * Use entropy source for initial seed.
 */
static void init_crandom(struct crndstate *state, unsigned long rho)
{
	state->rho = rho;
	state->last = net_random();
}

/* get_crandom - correlated random number generator
 * Next number depends on last value.
 * rho is scaled to avoid floating point.
 */
150
static u32 get_crandom(struct crndstate *state)
L
Linus Torvalds 已提交
151 152 153 154
{
	u64 value, rho;
	unsigned long answer;

S
Stephen Hemminger 已提交
155
	if (state->rho == 0)	/* no correlation */
L
Linus Torvalds 已提交
156 157 158 159 160 161 162 163 164
		return net_random();

	value = net_random();
	rho = (u64)state->rho + 1;
	answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
	state->last = answer;
	return answer;
}

165 166 167 168 169 170 171 172 173 174
/* loss_4state - 4-state model loss generator
 * Generates losses according to the 4-state Markov chain adopted in
 * the GI (General and Intuitive) loss model.
 */
static bool loss_4state(struct netem_sched_data *q)
{
	struct clgstate *clg = &q->clg;
	u32 rnd = net_random();

	/*
L
Lucas De Marchi 已提交
175
	 * Makes a comparison between rnd and the transition
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
	 * probabilities outgoing from the current state, then decides the
	 * next state and if the next packet has to be transmitted or lost.
	 * The four states correspond to:
	 *   1 => successfully transmitted packets within a gap period
	 *   4 => isolated losses within a gap period
	 *   3 => lost packets within a burst period
	 *   2 => successfully transmitted packets within a burst period
	 */
	switch (clg->state) {
	case 1:
		if (rnd < clg->a4) {
			clg->state = 4;
			return true;
		} else if (clg->a4 < rnd && rnd < clg->a1) {
			clg->state = 3;
			return true;
		} else if (clg->a1 < rnd)
			clg->state = 1;

		break;
	case 2:
		if (rnd < clg->a5) {
			clg->state = 3;
			return true;
		} else
			clg->state = 2;

		break;
	case 3:
		if (rnd < clg->a3)
			clg->state = 2;
		else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
			clg->state = 1;
			return true;
		} else if (clg->a2 + clg->a3 < rnd) {
			clg->state = 3;
			return true;
		}
		break;
	case 4:
		clg->state = 1;
		break;
	}

	return false;
}

/* loss_gilb_ell - Gilbert-Elliot model loss generator
 * Generates losses according to the Gilbert-Elliot loss model or
 * its special cases  (Gilbert or Simple Gilbert)
 *
L
Lucas De Marchi 已提交
227
 * Makes a comparison between random number and the transition
228
 * probabilities outgoing from the current state, then decides the
L
Lucas De Marchi 已提交
229
 * next state. A second random number is extracted and the comparison
230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
 * with the loss probability of the current state decides if the next
 * packet will be transmitted or lost.
 */
static bool loss_gilb_ell(struct netem_sched_data *q)
{
	struct clgstate *clg = &q->clg;

	switch (clg->state) {
	case 1:
		if (net_random() < clg->a1)
			clg->state = 2;
		if (net_random() < clg->a4)
			return true;
	case 2:
		if (net_random() < clg->a2)
			clg->state = 1;
		if (clg->a3 > net_random())
			return true;
	}

	return false;
}

static bool loss_event(struct netem_sched_data *q)
{
	switch (q->loss_model) {
	case CLG_RANDOM:
		/* Random packet drop 0 => none, ~0 => all */
		return q->loss && q->loss >= get_crandom(&q->loss_cor);

	case CLG_4_STATES:
		/* 4state loss model algorithm (used also for GI model)
		* Extracts a value from the markov 4 state loss generator,
		* if it is 1 drops a packet and if needed writes the event in
		* the kernel logs
		*/
		return loss_4state(q);

	case CLG_GILB_ELL:
		/* Gilbert-Elliot loss model algorithm
		* Extracts a value from the Gilbert-Elliot loss generator,
		* if it is 1 drops a packet and if needed writes the event in
		* the kernel logs
		*/
		return loss_gilb_ell(q);
	}

	return false;	/* not reached */
}


L
Linus Torvalds 已提交
281 282 283 284
/* tabledist - return a pseudo-randomly distributed value with mean mu and
 * std deviation sigma.  Uses table lookup to approximate the desired
 * distribution, and a uniformly-distributed pseudo-random source.
 */
285 286 287
static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
				struct crndstate *state,
				const struct disttable *dist)
L
Linus Torvalds 已提交
288
{
289 290 291
	psched_tdiff_t x;
	long t;
	u32 rnd;
L
Linus Torvalds 已提交
292 293 294 295 296 297 298

	if (sigma == 0)
		return mu;

	rnd = get_crandom(state);

	/* default uniform distribution */
299
	if (dist == NULL)
L
Linus Torvalds 已提交
300 301 302 303 304 305 306 307 308 309 310 311
		return (rnd % (2*sigma)) - sigma + mu;

	t = dist->table[rnd % dist->size];
	x = (sigma % NETEM_DIST_SCALE) * t;
	if (x >= 0)
		x += NETEM_DIST_SCALE/2;
	else
		x -= NETEM_DIST_SCALE/2;

	return  x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
}

312
static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q)
H
Hagen Paul Pfeifer 已提交
313
{
314
	u64 ticks;
315

316 317 318 319 320 321 322 323 324 325 326 327 328
	len += q->packet_overhead;

	if (q->cell_size) {
		u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);

		if (len > cells * q->cell_size)	/* extra cell needed for remainder */
			cells++;
		len = cells * (q->cell_size + q->cell_overhead);
	}

	ticks = (u64)len * NSEC_PER_SEC;

	do_div(ticks, q->rate);
329
	return PSCHED_NS2TICKS(ticks);
H
Hagen Paul Pfeifer 已提交
330 331
}

E
Eric Dumazet 已提交
332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356
static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
{
	struct sk_buff_head *list = &sch->q;
	psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
	struct sk_buff *skb;

	if (likely(skb_queue_len(list) < sch->limit)) {
		skb = skb_peek_tail(list);
		/* Optimize for add at tail */
		if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
			return qdisc_enqueue_tail(nskb, sch);

		skb_queue_reverse_walk(list, skb) {
			if (tnext >= netem_skb_cb(skb)->time_to_send)
				break;
		}

		__skb_queue_after(list, skb, nskb);
		sch->qstats.backlog += qdisc_pkt_len(nskb);
		return NET_XMIT_SUCCESS;
	}

	return qdisc_reshape_fail(nskb, sch);
}

357 358 359 360 361 362
/*
 * Insert one skb into qdisc.
 * Note: parent depends on return value to account for queue length.
 * 	NET_XMIT_DROP: queue length didn't change.
 *      NET_XMIT_SUCCESS: one skb was queued.
 */
L
Linus Torvalds 已提交
363 364 365
static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
	struct netem_sched_data *q = qdisc_priv(sch);
366 367
	/* We don't fill cb now as skb_unshare() may invalidate it */
	struct netem_skb_cb *cb;
368
	struct sk_buff *skb2;
L
Linus Torvalds 已提交
369
	int ret;
370
	int count = 1;
L
Linus Torvalds 已提交
371

372 373 374 375
	/* Random duplication */
	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
		++count;

376 377
	/* Drop packet? */
	if (loss_event(q))
378 379 380
		--count;

	if (count == 0) {
L
Linus Torvalds 已提交
381 382
		sch->qstats.drops++;
		kfree_skb(skb);
383
		return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
L
Linus Torvalds 已提交
384 385
	}

386 387
	skb_orphan(skb);

388 389 390 391 392 393
	/*
	 * If we need to duplicate packet, then re-insert at top of the
	 * qdisc tree, since parent queuer expects that only one
	 * skb will be queued.
	 */
	if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
394
		struct Qdisc *rootq = qdisc_root(sch);
395 396 397
		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
		q->duplicate = 0;

398
		qdisc_enqueue_root(skb2, rootq);
399
		q->duplicate = dupsave;
L
Linus Torvalds 已提交
400 401
	}

402 403 404 405 406 407 408
	/*
	 * Randomized packet corruption.
	 * Make copy if needed since we are modifying
	 * If packet is going to be hardware checksummed, then
	 * do it now in software before we mangle it.
	 */
	if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
409 410 411
		if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
		    (skb->ip_summed == CHECKSUM_PARTIAL &&
		     skb_checksum_help(skb))) {
412 413 414 415 416 417 418
			sch->qstats.drops++;
			return NET_XMIT_DROP;
		}

		skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
	}

419
	cb = netem_skb_cb(skb);
E
Eric Dumazet 已提交
420
	if (q->gap == 0 ||		/* not doing reordering */
421
	    q->counter < q->gap - 1 ||	/* inside last reordering gap */
422
	    q->reorder < get_crandom(&q->reorder_cor)) {
423
		psched_time_t now;
S
Stephen Hemminger 已提交
424 425 426 427 428
		psched_tdiff_t delay;

		delay = tabledist(q->latency, q->jitter,
				  &q->delay_cor, q->delay_dist);

429
		now = psched_get_time();
H
Hagen Paul Pfeifer 已提交
430 431

		if (q->rate) {
E
Eric Dumazet 已提交
432
			struct sk_buff_head *list = &sch->q;
H
Hagen Paul Pfeifer 已提交
433

434
			delay += packet_len_2_sched_time(skb->len, q);
H
Hagen Paul Pfeifer 已提交
435 436 437 438 439 440 441 442 443 444 445 446 447

			if (!skb_queue_empty(list)) {
				/*
				 * Last packet in queue is reference point (now).
				 * First packet in queue is already in flight,
				 * calculate this time bonus and substract
				 * from delay.
				 */
				delay -= now - netem_skb_cb(skb_peek(list))->time_to_send;
				now = netem_skb_cb(skb_peek_tail(list))->time_to_send;
			}
		}

448
		cb->time_to_send = now + delay;
L
Linus Torvalds 已提交
449
		++q->counter;
E
Eric Dumazet 已提交
450
		ret = tfifo_enqueue(skb, sch);
L
Linus Torvalds 已提交
451
	} else {
452
		/*
453 454 455
		 * Do re-ordering by putting one out of N packets at the front
		 * of the queue.
		 */
456
		cb->time_to_send = psched_get_time();
457
		q->counter = 0;
458

E
Eric Dumazet 已提交
459
		__skb_queue_head(&sch->q, skb);
460 461
		sch->qstats.backlog += qdisc_pkt_len(skb);
		sch->qstats.requeues++;
462
		ret = NET_XMIT_SUCCESS;
L
Linus Torvalds 已提交
463 464
	}

465 466 467 468 469
	if (ret != NET_XMIT_SUCCESS) {
		if (net_xmit_drop_count(ret)) {
			sch->qstats.drops++;
			return ret;
		}
470
	}
L
Linus Torvalds 已提交
471

472
	return NET_XMIT_SUCCESS;
L
Linus Torvalds 已提交
473 474
}

E
Eric Dumazet 已提交
475
static unsigned int netem_drop(struct Qdisc *sch)
L
Linus Torvalds 已提交
476 477
{
	struct netem_sched_data *q = qdisc_priv(sch);
E
Eric Dumazet 已提交
478
	unsigned int len;
L
Linus Torvalds 已提交
479

E
Eric Dumazet 已提交
480 481 482 483
	len = qdisc_queue_drop(sch);
	if (!len && q->qdisc && q->qdisc->ops->drop)
	    len = q->qdisc->ops->drop(q->qdisc);
	if (len)
L
Linus Torvalds 已提交
484
		sch->qstats.drops++;
E
Eric Dumazet 已提交
485

L
Linus Torvalds 已提交
486 487 488 489 490 491 492 493
	return len;
}

static struct sk_buff *netem_dequeue(struct Qdisc *sch)
{
	struct netem_sched_data *q = qdisc_priv(sch);
	struct sk_buff *skb;

494
	if (qdisc_is_throttled(sch))
495 496
		return NULL;

E
Eric Dumazet 已提交
497 498
tfifo_dequeue:
	skb = qdisc_peek_head(sch);
499
	if (skb) {
500
		const struct netem_skb_cb *cb = netem_skb_cb(skb);
501 502

		/* if more time remaining? */
E
Eric Dumazet 已提交
503
		if (cb->time_to_send <= psched_get_time()) {
E
Eric Dumazet 已提交
504 505
			__skb_unlink(skb, &sch->q);
			sch->qstats.backlog -= qdisc_pkt_len(skb);
506

507 508 509 510 511 512 513 514
#ifdef CONFIG_NET_CLS_ACT
			/*
			 * If it's at ingress let's pretend the delay is
			 * from the network (tstamp will be updated).
			 */
			if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
				skb->tstamp.tv64 = 0;
#endif
515

E
Eric Dumazet 已提交
516 517 518 519 520 521 522 523 524 525 526 527
			if (q->qdisc) {
				int err = qdisc_enqueue(skb, q->qdisc);

				if (unlikely(err != NET_XMIT_SUCCESS)) {
					if (net_xmit_drop_count(err)) {
						sch->qstats.drops++;
						qdisc_tree_decrease_qlen(sch, 1);
					}
				}
				goto tfifo_dequeue;
			}
deliver:
528 529
			qdisc_unthrottled(sch);
			qdisc_bstats_update(sch, skb);
530
			return skb;
S
Stephen Hemminger 已提交
531
		}
532

E
Eric Dumazet 已提交
533 534 535 536 537
		if (q->qdisc) {
			skb = q->qdisc->ops->dequeue(q->qdisc);
			if (skb)
				goto deliver;
		}
538
		qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
539 540
	}

E
Eric Dumazet 已提交
541 542 543 544 545
	if (q->qdisc) {
		skb = q->qdisc->ops->dequeue(q->qdisc);
		if (skb)
			goto deliver;
	}
546
	return NULL;
L
Linus Torvalds 已提交
547 548 549 550 551 552
}

static void netem_reset(struct Qdisc *sch)
{
	struct netem_sched_data *q = qdisc_priv(sch);

E
Eric Dumazet 已提交
553 554 555
	qdisc_reset_queue(sch);
	if (q->qdisc)
		qdisc_reset(q->qdisc);
556
	qdisc_watchdog_cancel(&q->watchdog);
L
Linus Torvalds 已提交
557 558
}

559 560 561 562 563 564 565 566 567 568
static void dist_free(struct disttable *d)
{
	if (d) {
		if (is_vmalloc_addr(d))
			vfree(d);
		else
			kfree(d);
	}
}

L
Linus Torvalds 已提交
569 570 571 572
/*
 * Distribution data is a variable size payload containing
 * signed 16 bit values.
 */
573
static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
L
Linus Torvalds 已提交
574 575
{
	struct netem_sched_data *q = qdisc_priv(sch);
576
	size_t n = nla_len(attr)/sizeof(__s16);
577
	const __s16 *data = nla_data(attr);
578
	spinlock_t *root_lock;
L
Linus Torvalds 已提交
579 580
	struct disttable *d;
	int i;
581
	size_t s;
L
Linus Torvalds 已提交
582

S
stephen hemminger 已提交
583
	if (n > NETEM_DIST_MAX)
L
Linus Torvalds 已提交
584 585
		return -EINVAL;

586
	s = sizeof(struct disttable) + n * sizeof(s16);
587
	d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN);
588 589
	if (!d)
		d = vmalloc(s);
L
Linus Torvalds 已提交
590 591 592 593 594 595
	if (!d)
		return -ENOMEM;

	d->size = n;
	for (i = 0; i < n; i++)
		d->table[i] = data[i];
596

597
	root_lock = qdisc_root_sleeping_lock(sch);
598 599

	spin_lock_bh(root_lock);
600
	swap(q->delay_dist, d);
601
	spin_unlock_bh(root_lock);
602 603

	dist_free(d);
L
Linus Torvalds 已提交
604 605 606
	return 0;
}

607
static void get_correlation(struct Qdisc *sch, const struct nlattr *attr)
L
Linus Torvalds 已提交
608 609
{
	struct netem_sched_data *q = qdisc_priv(sch);
610
	const struct tc_netem_corr *c = nla_data(attr);
L
Linus Torvalds 已提交
611 612 613 614 615 616

	init_crandom(&q->delay_cor, c->delay_corr);
	init_crandom(&q->loss_cor, c->loss_corr);
	init_crandom(&q->dup_cor, c->dup_corr);
}

617
static void get_reorder(struct Qdisc *sch, const struct nlattr *attr)
618 619
{
	struct netem_sched_data *q = qdisc_priv(sch);
620
	const struct tc_netem_reorder *r = nla_data(attr);
621 622 623 624 625

	q->reorder = r->probability;
	init_crandom(&q->reorder_cor, r->correlation);
}

626
static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr)
627 628
{
	struct netem_sched_data *q = qdisc_priv(sch);
629
	const struct tc_netem_corrupt *r = nla_data(attr);
630 631 632 633 634

	q->corrupt = r->probability;
	init_crandom(&q->corrupt_cor, r->correlation);
}

H
Hagen Paul Pfeifer 已提交
635 636 637 638 639 640
static void get_rate(struct Qdisc *sch, const struct nlattr *attr)
{
	struct netem_sched_data *q = qdisc_priv(sch);
	const struct tc_netem_rate *r = nla_data(attr);

	q->rate = r->rate;
641 642 643 644 645
	q->packet_overhead = r->packet_overhead;
	q->cell_size = r->cell_size;
	if (q->cell_size)
		q->cell_size_reciprocal = reciprocal_value(q->cell_size);
	q->cell_overhead = r->cell_overhead;
H
Hagen Paul Pfeifer 已提交
646 647
}

648 649 650 651 652 653 654 655 656 657 658 659 660
static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
{
	struct netem_sched_data *q = qdisc_priv(sch);
	const struct nlattr *la;
	int rem;

	nla_for_each_nested(la, attr, rem) {
		u16 type = nla_type(la);

		switch(type) {
		case NETEM_LOSS_GI: {
			const struct tc_netem_gimodel *gi = nla_data(la);

S
stephen hemminger 已提交
661
			if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679
				pr_info("netem: incorrect gi model size\n");
				return -EINVAL;
			}

			q->loss_model = CLG_4_STATES;

			q->clg.state = 1;
			q->clg.a1 = gi->p13;
			q->clg.a2 = gi->p31;
			q->clg.a3 = gi->p32;
			q->clg.a4 = gi->p14;
			q->clg.a5 = gi->p23;
			break;
		}

		case NETEM_LOSS_GE: {
			const struct tc_netem_gemodel *ge = nla_data(la);

S
stephen hemminger 已提交
680 681
			if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
				pr_info("netem: incorrect ge model size\n");
682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702
				return -EINVAL;
			}

			q->loss_model = CLG_GILB_ELL;
			q->clg.state = 1;
			q->clg.a1 = ge->p;
			q->clg.a2 = ge->r;
			q->clg.a3 = ge->h;
			q->clg.a4 = ge->k1;
			break;
		}

		default:
			pr_info("netem: unknown loss type %u\n", type);
			return -EINVAL;
		}
	}

	return 0;
}

703 704 705 706
static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
	[TCA_NETEM_CORR]	= { .len = sizeof(struct tc_netem_corr) },
	[TCA_NETEM_REORDER]	= { .len = sizeof(struct tc_netem_reorder) },
	[TCA_NETEM_CORRUPT]	= { .len = sizeof(struct tc_netem_corrupt) },
H
Hagen Paul Pfeifer 已提交
707
	[TCA_NETEM_RATE]	= { .len = sizeof(struct tc_netem_rate) },
708
	[TCA_NETEM_LOSS]	= { .type = NLA_NESTED },
709 710
};

711 712 713 714 715
static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
		      const struct nla_policy *policy, int len)
{
	int nested_len = nla_len(nla) - NLA_ALIGN(len);

716 717
	if (nested_len < 0) {
		pr_info("netem: invalid attributes len %d\n", nested_len);
718
		return -EINVAL;
719 720
	}

721 722 723
	if (nested_len >= nla_attr_size(0))
		return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
				 nested_len, policy);
724

725 726 727 728
	memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
	return 0;
}

729
/* Parse netlink message to set options */
730
static int netem_change(struct Qdisc *sch, struct nlattr *opt)
L
Linus Torvalds 已提交
731 732
{
	struct netem_sched_data *q = qdisc_priv(sch);
733
	struct nlattr *tb[TCA_NETEM_MAX + 1];
L
Linus Torvalds 已提交
734 735
	struct tc_netem_qopt *qopt;
	int ret;
736

737
	if (opt == NULL)
L
Linus Torvalds 已提交
738 739
		return -EINVAL;

740 741
	qopt = nla_data(opt);
	ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
742 743 744
	if (ret < 0)
		return ret;

E
Eric Dumazet 已提交
745
	sch->limit = qopt->limit;
746

L
Linus Torvalds 已提交
747 748 749 750
	q->latency = qopt->latency;
	q->jitter = qopt->jitter;
	q->limit = qopt->limit;
	q->gap = qopt->gap;
751
	q->counter = 0;
L
Linus Torvalds 已提交
752 753 754
	q->loss = qopt->loss;
	q->duplicate = qopt->duplicate;

S
Stephen Hemminger 已提交
755 756
	/* for compatibility with earlier versions.
	 * if gap is set, need to assume 100% probability
757
	 */
758 759
	if (q->gap)
		q->reorder = ~0;
760

761 762
	if (tb[TCA_NETEM_CORR])
		get_correlation(sch, tb[TCA_NETEM_CORR]);
L
Linus Torvalds 已提交
763

764 765 766 767 768
	if (tb[TCA_NETEM_DELAY_DIST]) {
		ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
		if (ret)
			return ret;
	}
769

770 771
	if (tb[TCA_NETEM_REORDER])
		get_reorder(sch, tb[TCA_NETEM_REORDER]);
L
Linus Torvalds 已提交
772

773 774
	if (tb[TCA_NETEM_CORRUPT])
		get_corrupt(sch, tb[TCA_NETEM_CORRUPT]);
L
Linus Torvalds 已提交
775

H
Hagen Paul Pfeifer 已提交
776 777 778
	if (tb[TCA_NETEM_RATE])
		get_rate(sch, tb[TCA_NETEM_RATE]);

779 780 781 782 783
	q->loss_model = CLG_RANDOM;
	if (tb[TCA_NETEM_LOSS])
		ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]);

	return ret;
L
Linus Torvalds 已提交
784 785
}

786
static int netem_init(struct Qdisc *sch, struct nlattr *opt)
L
Linus Torvalds 已提交
787 788 789 790 791 792 793
{
	struct netem_sched_data *q = qdisc_priv(sch);
	int ret;

	if (!opt)
		return -EINVAL;

794
	qdisc_watchdog_init(&q->watchdog, sch);
L
Linus Torvalds 已提交
795

796
	q->loss_model = CLG_RANDOM;
L
Linus Torvalds 已提交
797
	ret = netem_change(sch, opt);
E
Eric Dumazet 已提交
798
	if (ret)
799
		pr_info("netem: change failed\n");
L
Linus Torvalds 已提交
800 801 802 803 804 805 806
	return ret;
}

static void netem_destroy(struct Qdisc *sch)
{
	struct netem_sched_data *q = qdisc_priv(sch);

807
	qdisc_watchdog_cancel(&q->watchdog);
E
Eric Dumazet 已提交
808 809
	if (q->qdisc)
		qdisc_destroy(q->qdisc);
810
	dist_free(q->delay_dist);
L
Linus Torvalds 已提交
811 812
}

813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860
static int dump_loss_model(const struct netem_sched_data *q,
			   struct sk_buff *skb)
{
	struct nlattr *nest;

	nest = nla_nest_start(skb, TCA_NETEM_LOSS);
	if (nest == NULL)
		goto nla_put_failure;

	switch (q->loss_model) {
	case CLG_RANDOM:
		/* legacy loss model */
		nla_nest_cancel(skb, nest);
		return 0;	/* no data */

	case CLG_4_STATES: {
		struct tc_netem_gimodel gi = {
			.p13 = q->clg.a1,
			.p31 = q->clg.a2,
			.p32 = q->clg.a3,
			.p14 = q->clg.a4,
			.p23 = q->clg.a5,
		};

		NLA_PUT(skb, NETEM_LOSS_GI, sizeof(gi), &gi);
		break;
	}
	case CLG_GILB_ELL: {
		struct tc_netem_gemodel ge = {
			.p = q->clg.a1,
			.r = q->clg.a2,
			.h = q->clg.a3,
			.k1 = q->clg.a4,
		};

		NLA_PUT(skb, NETEM_LOSS_GE, sizeof(ge), &ge);
		break;
	}
	}

	nla_nest_end(skb, nest);
	return 0;

nla_put_failure:
	nla_nest_cancel(skb, nest);
	return -1;
}

L
Linus Torvalds 已提交
861 862 863
static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
{
	const struct netem_sched_data *q = qdisc_priv(sch);
S
stephen hemminger 已提交
864
	struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
L
Linus Torvalds 已提交
865 866
	struct tc_netem_qopt qopt;
	struct tc_netem_corr cor;
867
	struct tc_netem_reorder reorder;
868
	struct tc_netem_corrupt corrupt;
H
Hagen Paul Pfeifer 已提交
869
	struct tc_netem_rate rate;
L
Linus Torvalds 已提交
870 871 872 873 874 875 876

	qopt.latency = q->latency;
	qopt.jitter = q->jitter;
	qopt.limit = q->limit;
	qopt.loss = q->loss;
	qopt.gap = q->gap;
	qopt.duplicate = q->duplicate;
877
	NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
L
Linus Torvalds 已提交
878 879 880 881

	cor.delay_corr = q->delay_cor.rho;
	cor.loss_corr = q->loss_cor.rho;
	cor.dup_corr = q->dup_cor.rho;
882
	NLA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
883 884 885

	reorder.probability = q->reorder;
	reorder.correlation = q->reorder_cor.rho;
886
	NLA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
887

888 889
	corrupt.probability = q->corrupt;
	corrupt.correlation = q->corrupt_cor.rho;
890
	NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
891

H
Hagen Paul Pfeifer 已提交
892
	rate.rate = q->rate;
893 894 895
	rate.packet_overhead = q->packet_overhead;
	rate.cell_size = q->cell_size;
	rate.cell_overhead = q->cell_overhead;
H
Hagen Paul Pfeifer 已提交
896 897
	NLA_PUT(skb, TCA_NETEM_RATE, sizeof(rate), &rate);

898 899 900
	if (dump_loss_model(q, skb) != 0)
		goto nla_put_failure;

S
stephen hemminger 已提交
901
	return nla_nest_end(skb, nla);
L
Linus Torvalds 已提交
902

903
nla_put_failure:
S
stephen hemminger 已提交
904
	nlmsg_trim(skb, nla);
L
Linus Torvalds 已提交
905 906 907
	return -1;
}

908 909 910 911 912
static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
			  struct sk_buff *skb, struct tcmsg *tcm)
{
	struct netem_sched_data *q = qdisc_priv(sch);

E
Eric Dumazet 已提交
913
	if (cl != 1 || !q->qdisc) 	/* only one class */
914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929
		return -ENOENT;

	tcm->tcm_handle |= TC_H_MIN(1);
	tcm->tcm_info = q->qdisc->handle;

	return 0;
}

static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
		     struct Qdisc **old)
{
	struct netem_sched_data *q = qdisc_priv(sch);

	sch_tree_lock(sch);
	*old = q->qdisc;
	q->qdisc = new;
E
Eric Dumazet 已提交
930 931 932 933
	if (*old) {
		qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
		qdisc_reset(*old);
	}
934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974
	sch_tree_unlock(sch);

	return 0;
}

static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
{
	struct netem_sched_data *q = qdisc_priv(sch);
	return q->qdisc;
}

static unsigned long netem_get(struct Qdisc *sch, u32 classid)
{
	return 1;
}

static void netem_put(struct Qdisc *sch, unsigned long arg)
{
}

static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
{
	if (!walker->stop) {
		if (walker->count >= walker->skip)
			if (walker->fn(sch, 1, walker) < 0) {
				walker->stop = 1;
				return;
			}
		walker->count++;
	}
}

static const struct Qdisc_class_ops netem_class_ops = {
	.graft		=	netem_graft,
	.leaf		=	netem_leaf,
	.get		=	netem_get,
	.put		=	netem_put,
	.walk		=	netem_walk,
	.dump		=	netem_dump_class,
};

975
static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
L
Linus Torvalds 已提交
976
	.id		=	"netem",
977
	.cl_ops		=	&netem_class_ops,
L
Linus Torvalds 已提交
978 979 980
	.priv_size	=	sizeof(struct netem_sched_data),
	.enqueue	=	netem_enqueue,
	.dequeue	=	netem_dequeue,
981
	.peek		=	qdisc_peek_dequeued,
L
Linus Torvalds 已提交
982 983 984 985 986 987 988 989 990 991 992 993
	.drop		=	netem_drop,
	.init		=	netem_init,
	.reset		=	netem_reset,
	.destroy	=	netem_destroy,
	.change		=	netem_change,
	.dump		=	netem_dump,
	.owner		=	THIS_MODULE,
};


static int __init netem_module_init(void)
{
S
Stephen Hemminger 已提交
994
	pr_info("netem: version " VERSION "\n");
L
Linus Torvalds 已提交
995 996 997 998 999 1000 1001 1002 1003
	return register_qdisc(&netem_qdisc_ops);
}
static void __exit netem_module_exit(void)
{
	unregister_qdisc(&netem_qdisc_ops);
}
module_init(netem_module_init)
module_exit(netem_module_exit)
MODULE_LICENSE("GPL");