proto.c 27.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 *  net/dccp/proto.c
 *
 *  An implementation of the DCCP protocol
 *  Arnaldo Carvalho de Melo <acme@conectiva.com.br>
 *
 *	This program is free software; you can redistribute it and/or modify it
 *	under the terms of the GNU General Public License version 2 as
 *	published by the Free Software Foundation.
 */

#include <linux/dccp.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/in.h>
#include <linux/if_arp.h>
#include <linux/init.h>
#include <linux/random.h>
#include <net/checksum.h>

25
#include <net/inet_sock.h>
26 27 28
#include <net/sock.h>
#include <net/xfrm.h>

29
#include <asm/ioctls.h>
30 31 32 33 34 35 36
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/poll.h>

#include "ccid.h"
#include "dccp.h"
37
#include "feat.h"
38

39
DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
40

41 42
EXPORT_SYMBOL_GPL(dccp_statistics);

43
struct percpu_counter dccp_orphan_count;
44 45
EXPORT_SYMBOL_GPL(dccp_orphan_count);

46
struct inet_hashinfo dccp_hashinfo;
47 48
EXPORT_SYMBOL_GPL(dccp_hashinfo);

49 50 51
/* the maximum queue length for tx in packets. 0 is no limit */
int sysctl_dccp_tx_qlen __read_mostly = 5;

52 53 54 55
void dccp_set_state(struct sock *sk, const int state)
{
	const int oldstate = sk->sk_state;

56
	dccp_pr_debug("%s(%p)  %s  -->  %s\n", dccp_role(sk), sk,
57 58 59 60 61 62 63
		      dccp_state_name(oldstate), dccp_state_name(state));
	WARN_ON(state == oldstate);

	switch (state) {
	case DCCP_OPEN:
		if (oldstate != DCCP_OPEN)
			DCCP_INC_STATS(DCCP_MIB_CURRESTAB);
64 65 66
		/* Client retransmits all Confirm options until entering OPEN */
		if (oldstate == DCCP_PARTOPEN)
			dccp_feat_list_purge(&dccp_sk(sk)->dccps_featneg);
67 68 69
		break;

	case DCCP_CLOSED:
70 71
		if (oldstate == DCCP_OPEN || oldstate == DCCP_ACTIVE_CLOSEREQ ||
		    oldstate == DCCP_CLOSING)
72 73 74 75 76
			DCCP_INC_STATS(DCCP_MIB_ESTABRESETS);

		sk->sk_prot->unhash(sk);
		if (inet_csk(sk)->icsk_bind_hash != NULL &&
		    !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
77
			inet_put_port(sk);
78 79 80 81 82 83 84 85 86 87 88 89 90 91
		/* fall through */
	default:
		if (oldstate == DCCP_OPEN)
			DCCP_DEC_STATS(DCCP_MIB_CURRESTAB);
	}

	/* Change state AFTER socket is unhashed to avoid closed
	 * socket sitting in hash tables.
	 */
	sk->sk_state = state;
}

EXPORT_SYMBOL_GPL(dccp_set_state);

92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
static void dccp_finish_passive_close(struct sock *sk)
{
	switch (sk->sk_state) {
	case DCCP_PASSIVE_CLOSE:
		/* Node (client or server) has received Close packet. */
		dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED);
		dccp_set_state(sk, DCCP_CLOSED);
		break;
	case DCCP_PASSIVE_CLOSEREQ:
		/*
		 * Client received CloseReq. We set the `active' flag so that
		 * dccp_send_close() retransmits the Close as per RFC 4340, 8.3.
		 */
		dccp_send_close(sk, 1);
		dccp_set_state(sk, DCCP_CLOSING);
	}
}

110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
void dccp_done(struct sock *sk)
{
	dccp_set_state(sk, DCCP_CLOSED);
	dccp_clear_xmit_timers(sk);

	sk->sk_shutdown = SHUTDOWN_MASK;

	if (!sock_flag(sk, SOCK_DEAD))
		sk->sk_state_change(sk);
	else
		inet_csk_destroy_sock(sk);
}

EXPORT_SYMBOL_GPL(dccp_done);

125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
const char *dccp_packet_name(const int type)
{
	static const char *dccp_packet_names[] = {
		[DCCP_PKT_REQUEST]  = "REQUEST",
		[DCCP_PKT_RESPONSE] = "RESPONSE",
		[DCCP_PKT_DATA]	    = "DATA",
		[DCCP_PKT_ACK]	    = "ACK",
		[DCCP_PKT_DATAACK]  = "DATAACK",
		[DCCP_PKT_CLOSEREQ] = "CLOSEREQ",
		[DCCP_PKT_CLOSE]    = "CLOSE",
		[DCCP_PKT_RESET]    = "RESET",
		[DCCP_PKT_SYNC]	    = "SYNC",
		[DCCP_PKT_SYNCACK]  = "SYNCACK",
	};

	if (type >= DCCP_NR_PKT_TYPES)
		return "INVALID";
	else
		return dccp_packet_names[type];
}

EXPORT_SYMBOL_GPL(dccp_packet_name);

const char *dccp_state_name(const int state)
{
	static char *dccp_state_names[] = {
151 152 153 154 155 156 157 158 159 160 161
	[DCCP_OPEN]		= "OPEN",
	[DCCP_REQUESTING]	= "REQUESTING",
	[DCCP_PARTOPEN]		= "PARTOPEN",
	[DCCP_LISTEN]		= "LISTEN",
	[DCCP_RESPOND]		= "RESPOND",
	[DCCP_CLOSING]		= "CLOSING",
	[DCCP_ACTIVE_CLOSEREQ]	= "CLOSEREQ",
	[DCCP_PASSIVE_CLOSE]	= "PASSIVE_CLOSE",
	[DCCP_PASSIVE_CLOSEREQ]	= "PASSIVE_CLOSEREQ",
	[DCCP_TIME_WAIT]	= "TIME_WAIT",
	[DCCP_CLOSED]		= "CLOSED",
162 163 164 165 166 167 168 169 170 171
	};

	if (state >= DCCP_MAX_STATES)
		return "INVALID STATE!";
	else
		return dccp_state_names[state];
}

EXPORT_SYMBOL_GPL(dccp_state_name);

172
int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
173 174 175 176
{
	struct dccp_sock *dp = dccp_sk(sk);
	struct inet_connection_sock *icsk = inet_csk(sk);

177 178
	dccp_minisock_init(&dp->dccps_minisock);

179 180 181 182 183
	icsk->icsk_rto		= DCCP_TIMEOUT_INIT;
	icsk->icsk_syn_retries	= sysctl_dccp_request_retries;
	sk->sk_state		= DCCP_CLOSED;
	sk->sk_write_space	= dccp_write_space;
	icsk->icsk_sync_mss	= dccp_sync_mss;
184
	dp->dccps_mss_cache	= 536;
185 186 187
	dp->dccps_rate_last	= jiffies;
	dp->dccps_role		= DCCP_ROLE_UNDEFINED;
	dp->dccps_service	= DCCP_SERVICE_CODE_IS_ABSENT;
188
	dp->dccps_l_ack_ratio	= dp->dccps_r_ack_ratio = 1;
189 190 191

	dccp_init_xmit_timers(sk);

192
	INIT_LIST_HEAD(&dp->dccps_featneg);
193 194 195
	/* control socket doesn't need feat nego */
	if (likely(ctl_sock_initialized))
		return dccp_feat_init(sk);
196 197 198 199 200
	return 0;
}

EXPORT_SYMBOL_GPL(dccp_init_sock);

201
void dccp_destroy_sock(struct sock *sk)
202 203
{
	struct dccp_sock *dp = dccp_sk(sk);
204
	struct dccp_minisock *dmsk = dccp_msk(sk);
205 206 207 208 209 210 211 212 213 214 215 216

	/*
	 * DCCP doesn't use sk_write_queue, just sk_send_head
	 * for retransmissions
	 */
	if (sk->sk_send_head != NULL) {
		kfree_skb(sk->sk_send_head);
		sk->sk_send_head = NULL;
	}

	/* Clean up a referenced DCCP bind bucket. */
	if (inet_csk(sk)->icsk_bind_hash != NULL)
217
		inet_put_port(sk);
218 219 220 221

	kfree(dp->dccps_service_list);
	dp->dccps_service_list = NULL;

222
	if (dmsk->dccpms_send_ack_vector) {
223 224 225 226 227 228 229 230
		dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
		dp->dccps_hc_rx_ackvec = NULL;
	}
	ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
	ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
	dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;

	/* clean up feature negotiation state */
231
	dccp_feat_list_purge(&dp->dccps_featneg);
232 233 234 235
}

EXPORT_SYMBOL_GPL(dccp_destroy_sock);

236
static inline int dccp_listen_start(struct sock *sk, int backlog)
237
{
238 239 240
	struct dccp_sock *dp = dccp_sk(sk);

	dp->dccps_role = DCCP_ROLE_LISTEN;
241 242 243
	/* do not start to listen if feature negotiation setup fails */
	if (dccp_feat_finalise_settings(dp))
		return -EPROTO;
244
	return inet_csk_listen_start(sk, backlog);
245 246
}

247 248 249 250 251 252
static inline int dccp_need_reset(int state)
{
	return state != DCCP_CLOSED && state != DCCP_LISTEN &&
	       state != DCCP_REQUESTING;
}

253 254 255 256 257 258 259 260 261 262
int dccp_disconnect(struct sock *sk, int flags)
{
	struct inet_connection_sock *icsk = inet_csk(sk);
	struct inet_sock *inet = inet_sk(sk);
	int err = 0;
	const int old_state = sk->sk_state;

	if (old_state != DCCP_CLOSED)
		dccp_set_state(sk, DCCP_CLOSED);

263 264 265 266
	/*
	 * This corresponds to the ABORT function of RFC793, sec. 3.8
	 * TCP uses a RST segment, DCCP a Reset packet with Code 2, "Aborted".
	 */
267 268
	if (old_state == DCCP_LISTEN) {
		inet_csk_listen_stop(sk);
269 270 271
	} else if (dccp_need_reset(old_state)) {
		dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
		sk->sk_err = ECONNRESET;
272 273 274 275
	} else if (old_state == DCCP_REQUESTING)
		sk->sk_err = ECONNRESET;

	dccp_clear_xmit_timers(sk);
276

277
	__skb_queue_purge(&sk->sk_receive_queue);
278
	__skb_queue_purge(&sk->sk_write_queue);
279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
	if (sk->sk_send_head != NULL) {
		__kfree_skb(sk->sk_send_head);
		sk->sk_send_head = NULL;
	}

	inet->dport = 0;

	if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
		inet_reset_saddr(sk);

	sk->sk_shutdown = 0;
	sock_reset_flag(sk, SOCK_DONE);

	icsk->icsk_backoff = 0;
	inet_csk_delack_init(sk);
	__sk_dst_reset(sk);

296
	WARN_ON(inet->num && !icsk->icsk_bind_hash);
297 298 299 300 301

	sk->sk_error_report(sk);
	return err;
}

302 303
EXPORT_SYMBOL_GPL(dccp_disconnect);

304 305 306 307 308 309 310
/*
 *	Wait for a DCCP event.
 *
 *	Note that we don't need to lock the socket, as the upper poll layers
 *	take care of normal races (between the test and the event) and we don't
 *	go look at any of the socket buffers directly.
 */
311 312
unsigned int dccp_poll(struct file *file, struct socket *sock,
		       poll_table *wait)
313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
{
	unsigned int mask;
	struct sock *sk = sock->sk;

	poll_wait(file, sk->sk_sleep, wait);
	if (sk->sk_state == DCCP_LISTEN)
		return inet_csk_listen_poll(sk);

	/* Socket is not locked. We are protected from async events
	   by poll logic and correct handling of state changes
	   made by another threads is impossible in any case.
	 */

	mask = 0;
	if (sk->sk_err)
		mask = POLLERR;

	if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
		mask |= POLLHUP;
	if (sk->sk_shutdown & RCV_SHUTDOWN)
333
		mask |= POLLIN | POLLRDNORM | POLLRDHUP;
334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359

	/* Connected? */
	if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
		if (atomic_read(&sk->sk_rmem_alloc) > 0)
			mask |= POLLIN | POLLRDNORM;

		if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
			if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
				mask |= POLLOUT | POLLWRNORM;
			} else {  /* send SIGIO later */
				set_bit(SOCK_ASYNC_NOSPACE,
					&sk->sk_socket->flags);
				set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);

				/* Race breaker. If space is freed after
				 * wspace test but before the flags are set,
				 * IO signal will be lost.
				 */
				if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
					mask |= POLLOUT | POLLWRNORM;
			}
		}
	}
	return mask;
}

360 361
EXPORT_SYMBOL_GPL(dccp_poll);

362 363
int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393
	int rc = -ENOTCONN;

	lock_sock(sk);

	if (sk->sk_state == DCCP_LISTEN)
		goto out;

	switch (cmd) {
	case SIOCINQ: {
		struct sk_buff *skb;
		unsigned long amount = 0;

		skb = skb_peek(&sk->sk_receive_queue);
		if (skb != NULL) {
			/*
			 * We will only return the amount of this packet since
			 * that is all that will be read.
			 */
			amount = skb->len;
		}
		rc = put_user(amount, (int __user *)arg);
	}
		break;
	default:
		rc = -ENOIOCTLCMD;
		break;
	}
out:
	release_sock(sk);
	return rc;
394 395
}

396 397
EXPORT_SYMBOL_GPL(dccp_ioctl);

398
static int dccp_setsockopt_service(struct sock *sk, const __be32 service,
399 400 401 402 403
				   char __user *optval, int optlen)
{
	struct dccp_sock *dp = dccp_sk(sk);
	struct dccp_service_list *sl = NULL;

404
	if (service == DCCP_SERVICE_INVALID_VALUE ||
405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425
	    optlen > DCCP_SERVICE_LIST_MAX_LEN * sizeof(u32))
		return -EINVAL;

	if (optlen > sizeof(service)) {
		sl = kmalloc(optlen, GFP_KERNEL);
		if (sl == NULL)
			return -ENOMEM;

		sl->dccpsl_nr = optlen / sizeof(u32) - 1;
		if (copy_from_user(sl->dccpsl_list,
				   optval + sizeof(service),
				   optlen - sizeof(service)) ||
		    dccp_list_has_service(sl, DCCP_SERVICE_INVALID_VALUE)) {
			kfree(sl);
			return -EFAULT;
		}
	}

	lock_sock(sk);
	dp->dccps_service = service;

J
Jesper Juhl 已提交
426
	kfree(dp->dccps_service_list);
427 428 429 430 431 432

	dp->dccps_service_list = sl;
	release_sock(sk);
	return 0;
}

433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468
static int dccp_setsockopt_cscov(struct sock *sk, int cscov, bool rx)
{
	u8 *list, len;
	int i, rc;

	if (cscov < 0 || cscov > 15)
		return -EINVAL;
	/*
	 * Populate a list of permissible values, in the range cscov...15. This
	 * is necessary since feature negotiation of single values only works if
	 * both sides incidentally choose the same value. Since the list starts
	 * lowest-value first, negotiation will pick the smallest shared value.
	 */
	if (cscov == 0)
		return 0;
	len = 16 - cscov;

	list = kmalloc(len, GFP_KERNEL);
	if (list == NULL)
		return -ENOBUFS;

	for (i = 0; i < len; i++)
		list[i] = cscov++;

	rc = dccp_feat_register_sp(sk, DCCPF_MIN_CSUM_COVER, rx, list, len);

	if (rc == 0) {
		if (rx)
			dccp_sk(sk)->dccps_pcrlen = cscov;
		else
			dccp_sk(sk)->dccps_pcslen = cscov;
	}
	kfree(list);
	return rc;
}

469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
static int dccp_setsockopt_ccid(struct sock *sk, int type,
				char __user *optval, int optlen)
{
	u8 *val;
	int rc = 0;

	if (optlen < 1 || optlen > DCCP_FEAT_MAX_SP_VALS)
		return -EINVAL;

	val = kmalloc(optlen, GFP_KERNEL);
	if (val == NULL)
		return -ENOMEM;

	if (copy_from_user(val, optval, optlen)) {
		kfree(val);
		return -EFAULT;
	}

	lock_sock(sk);
	if (type == DCCP_SOCKOPT_TX_CCID || type == DCCP_SOCKOPT_CCID)
		rc = dccp_feat_register_sp(sk, DCCPF_CCID, 1, val, optlen);

	if (!rc && (type == DCCP_SOCKOPT_RX_CCID || type == DCCP_SOCKOPT_CCID))
		rc = dccp_feat_register_sp(sk, DCCPF_CCID, 0, val, optlen);
	release_sock(sk);

	kfree(val);
	return rc;
}

499 500
static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
		char __user *optval, int optlen)
501
{
502 503
	struct dccp_sock *dp = dccp_sk(sk);
	int val, err = 0;
504

G
Gerrit Renker 已提交
505 506 507 508 509 510 511 512
	switch (optname) {
	case DCCP_SOCKOPT_PACKET_SIZE:
		DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
		return 0;
	case DCCP_SOCKOPT_CHANGE_L:
	case DCCP_SOCKOPT_CHANGE_R:
		DCCP_WARN("sockopt(CHANGE_L/R) is deprecated: fix your app\n");
		return 0;
513 514 515 516
	case DCCP_SOCKOPT_CCID:
	case DCCP_SOCKOPT_RX_CCID:
	case DCCP_SOCKOPT_TX_CCID:
		return dccp_setsockopt_ccid(sk, optname, optval, optlen);
G
Gerrit Renker 已提交
517 518 519
	}

	if (optlen < (int)sizeof(int))
520 521 522 523 524
		return -EINVAL;

	if (get_user(val, (int __user *)optval))
		return -EFAULT;

525 526
	if (optname == DCCP_SOCKOPT_SERVICE)
		return dccp_setsockopt_service(sk, val, optval, optlen);
527

528
	lock_sock(sk);
529
	switch (optname) {
530 531 532 533 534 535
	case DCCP_SOCKOPT_SERVER_TIMEWAIT:
		if (dp->dccps_role != DCCP_ROLE_SERVER)
			err = -EOPNOTSUPP;
		else
			dp->dccps_server_timewait = (val != 0);
		break;
536 537
	case DCCP_SOCKOPT_SEND_CSCOV:
		err = dccp_setsockopt_cscov(sk, val, false);
538
		break;
539 540
	case DCCP_SOCKOPT_RECV_CSCOV:
		err = dccp_setsockopt_cscov(sk, val, true);
541
		break;
542 543 544 545
	default:
		err = -ENOPROTOOPT;
		break;
	}
546
	release_sock(sk);
G
Gerrit Renker 已提交
547

548
	return err;
549 550
}

551 552 553 554 555 556 557 558 559
int dccp_setsockopt(struct sock *sk, int level, int optname,
		    char __user *optval, int optlen)
{
	if (level != SOL_DCCP)
		return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level,
							     optname, optval,
							     optlen);
	return do_dccp_setsockopt(sk, level, optname, optval, optlen);
}
560

561 562
EXPORT_SYMBOL_GPL(dccp_setsockopt);

563 564
#ifdef CONFIG_COMPAT
int compat_dccp_setsockopt(struct sock *sk, int level, int optname,
565
			   char __user *optval, int optlen)
566
{
567 568 569
	if (level != SOL_DCCP)
		return inet_csk_compat_setsockopt(sk, level, optname,
						  optval, optlen);
570 571
	return do_dccp_setsockopt(sk, level, optname, optval, optlen);
}
572

573 574 575
EXPORT_SYMBOL_GPL(compat_dccp_setsockopt);
#endif

576
static int dccp_getsockopt_service(struct sock *sk, int len,
577
				   __be32 __user *optval,
578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603
				   int __user *optlen)
{
	const struct dccp_sock *dp = dccp_sk(sk);
	const struct dccp_service_list *sl;
	int err = -ENOENT, slen = 0, total_len = sizeof(u32);

	lock_sock(sk);
	if ((sl = dp->dccps_service_list) != NULL) {
		slen = sl->dccpsl_nr * sizeof(u32);
		total_len += slen;
	}

	err = -EINVAL;
	if (total_len > len)
		goto out;

	err = 0;
	if (put_user(total_len, optlen) ||
	    put_user(dp->dccps_service, optval) ||
	    (sl != NULL && copy_to_user(optval + 1, sl->dccpsl_list, slen)))
		err = -EFAULT;
out:
	release_sock(sk);
	return err;
}

604
static int do_dccp_getsockopt(struct sock *sk, int level, int optname,
605
		    char __user *optval, int __user *optlen)
606
{
607 608
	struct dccp_sock *dp;
	int val, len;
609

610 611 612
	if (get_user(len, optlen))
		return -EFAULT;

613
	if (len < (int)sizeof(int))
614 615 616 617 618 619
		return -EINVAL;

	dp = dccp_sk(sk);

	switch (optname) {
	case DCCP_SOCKOPT_PACKET_SIZE:
G
Gerrit Renker 已提交
620
		DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
621
		return 0;
622 623
	case DCCP_SOCKOPT_SERVICE:
		return dccp_getsockopt_service(sk, len,
624
					       (__be32 __user *)optval, optlen);
625 626 627
	case DCCP_SOCKOPT_GET_CUR_MPS:
		val = dp->dccps_mss_cache;
		break;
G
Gerrit Renker 已提交
628 629
	case DCCP_SOCKOPT_AVAILABLE_CCIDS:
		return ccid_getsockopt_builtin_ccids(sk, len, optval, optlen);
630 631 632 633 634 635 636 637 638 639
	case DCCP_SOCKOPT_TX_CCID:
		val = ccid_get_current_tx_ccid(dp);
		if (val < 0)
			return -ENOPROTOOPT;
		break;
	case DCCP_SOCKOPT_RX_CCID:
		val = ccid_get_current_rx_ccid(dp);
		if (val < 0)
			return -ENOPROTOOPT;
		break;
640 641 642
	case DCCP_SOCKOPT_SERVER_TIMEWAIT:
		val = dp->dccps_server_timewait;
		break;
643 644 645 646 647 648
	case DCCP_SOCKOPT_SEND_CSCOV:
		val = dp->dccps_pcslen;
		break;
	case DCCP_SOCKOPT_RECV_CSCOV:
		val = dp->dccps_pcrlen;
		break;
649 650 651 652 653 654
	case 128 ... 191:
		return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname,
					     len, (u32 __user *)optval, optlen);
	case 192 ... 255:
		return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname,
					     len, (u32 __user *)optval, optlen);
655 656 657 658
	default:
		return -ENOPROTOOPT;
	}

659
	len = sizeof(val);
660 661 662 663
	if (put_user(len, optlen) || copy_to_user(optval, &val, len))
		return -EFAULT;

	return 0;
664 665
}

666 667 668 669 670 671 672 673 674
int dccp_getsockopt(struct sock *sk, int level, int optname,
		    char __user *optval, int __user *optlen)
{
	if (level != SOL_DCCP)
		return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level,
							     optname, optval,
							     optlen);
	return do_dccp_getsockopt(sk, level, optname, optval, optlen);
}
675

676 677
EXPORT_SYMBOL_GPL(dccp_getsockopt);

678 679
#ifdef CONFIG_COMPAT
int compat_dccp_getsockopt(struct sock *sk, int level, int optname,
680
			   char __user *optval, int __user *optlen)
681
{
682 683 684
	if (level != SOL_DCCP)
		return inet_csk_compat_getsockopt(sk, level, optname,
						  optval, optlen);
685 686
	return do_dccp_getsockopt(sk, level, optname, optval, optlen);
}
687

688 689 690
EXPORT_SYMBOL_GPL(compat_dccp_getsockopt);
#endif

691 692 693 694 695 696 697 698 699 700 701 702 703 704
int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
		 size_t len)
{
	const struct dccp_sock *dp = dccp_sk(sk);
	const int flags = msg->msg_flags;
	const int noblock = flags & MSG_DONTWAIT;
	struct sk_buff *skb;
	int rc, size;
	long timeo;

	if (len > dp->dccps_mss_cache)
		return -EMSGSIZE;

	lock_sock(sk);
705

706 707
	if (sysctl_dccp_tx_qlen &&
	    (sk->sk_write_queue.qlen >= sysctl_dccp_tx_qlen)) {
708 709 710 711
		rc = -EAGAIN;
		goto out_release;
	}

712
	timeo = sock_sndtimeo(sk, noblock);
713 714 715 716 717 718

	/*
	 * We have to use sk_stream_wait_connect here to set sk_write_pending,
	 * so that the trick in dccp_rcv_request_sent_state_process.
	 */
	/* Wait for a connection to finish. */
719
	if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN))
720
		if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0)
721
			goto out_release;
722 723 724 725 726 727 728 729 730 731

	size = sk->sk_prot->max_header + len;
	release_sock(sk);
	skb = sock_alloc_send_skb(sk, size, noblock, &rc);
	lock_sock(sk);
	if (skb == NULL)
		goto out_release;

	skb_reserve(skb, sk->sk_prot->max_header);
	rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
732 733 734
	if (rc != 0)
		goto out_discard;

735 736
	skb_queue_tail(&sk->sk_write_queue, skb);
	dccp_write_xmit(sk,0);
737 738 739
out_release:
	release_sock(sk);
	return rc ? : len;
740 741
out_discard:
	kfree_skb(skb);
742 743 744
	goto out_release;
}

745 746
EXPORT_SYMBOL_GPL(dccp_sendmsg);

747 748 749 750 751 752 753 754
int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
		 size_t len, int nonblock, int flags, int *addr_len)
{
	const struct dccp_hdr *dh;
	long timeo;

	lock_sock(sk);

755 756
	if (sk->sk_state == DCCP_LISTEN) {
		len = -ENOTCONN;
757 758 759
		goto out;
	}

760
	timeo = sock_rcvtimeo(sk, nonblock);
761 762

	do {
763
		struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
764

765 766
		if (skb == NULL)
			goto verify_sock_status;
767

768
		dh = dccp_hdr(skb);
769

770 771 772
		switch (dh->dccph_type) {
		case DCCP_PKT_DATA:
		case DCCP_PKT_DATAACK:
773
			goto found_ok_skb;
774

775 776 777 778 779 780 781 782
		case DCCP_PKT_CLOSE:
		case DCCP_PKT_CLOSEREQ:
			if (!(flags & MSG_PEEK))
				dccp_finish_passive_close(sk);
			/* fall through */
		case DCCP_PKT_RESET:
			dccp_pr_debug("found fin (%s) ok!\n",
				      dccp_packet_name(dh->dccph_type));
783 784
			len = 0;
			goto found_fin_ok;
785 786 787 788
		default:
			dccp_pr_debug("packet_type=%s\n",
				      dccp_packet_name(dh->dccph_type));
			sk_eat_skb(sk, skb, 0);
789 790 791 792
		}
verify_sock_status:
		if (sock_flag(sk, SOCK_DONE)) {
			len = 0;
793
			break;
794
		}
795

796 797 798 799
		if (sk->sk_err) {
			len = sock_error(sk);
			break;
		}
800

801 802 803 804
		if (sk->sk_shutdown & RCV_SHUTDOWN) {
			len = 0;
			break;
		}
805

806 807 808 809 810 811
		if (sk->sk_state == DCCP_CLOSED) {
			if (!sock_flag(sk, SOCK_DONE)) {
				/* This occurs when user tries to read
				 * from never connected socket.
				 */
				len = -ENOTCONN;
812 813
				break;
			}
814 815
			len = 0;
			break;
816 817
		}

818 819 820 821
		if (!timeo) {
			len = -EAGAIN;
			break;
		}
822

823 824 825 826
		if (signal_pending(current)) {
			len = sock_intr_errno(timeo);
			break;
		}
827

828
		sk_wait_data(sk, &timeo);
829 830
		continue;
	found_ok_skb:
831 832 833 834 835 836 837 838 839
		if (len > skb->len)
			len = skb->len;
		else if (len < skb->len)
			msg->msg_flags |= MSG_TRUNC;

		if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len)) {
			/* Exception. Bailout! */
			len = -EFAULT;
			break;
840 841 842
		}
	found_fin_ok:
		if (!(flags & MSG_PEEK))
843
			sk_eat_skb(sk, skb, 0);
844
		break;
845
	} while (1);
846 847
out:
	release_sock(sk);
848
	return len;
849 850
}

851 852 853
EXPORT_SYMBOL_GPL(dccp_recvmsg);

int inet_dccp_listen(struct socket *sock, int backlog)
854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876
{
	struct sock *sk = sock->sk;
	unsigned char old_state;
	int err;

	lock_sock(sk);

	err = -EINVAL;
	if (sock->state != SS_UNCONNECTED || sock->type != SOCK_DCCP)
		goto out;

	old_state = sk->sk_state;
	if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
		goto out;

	/* Really, if the socket is already in listen state
	 * we can only allow the backlog to be adjusted.
	 */
	if (old_state != DCCP_LISTEN) {
		/*
		 * FIXME: here it probably should be sk->sk_prot->listen_start
		 * see tcp_listen_start
		 */
877
		err = dccp_listen_start(sk, backlog);
878 879 880 881 882 883 884 885 886 887 888
		if (err)
			goto out;
	}
	sk->sk_max_ack_backlog = backlog;
	err = 0;

out:
	release_sock(sk);
	return err;
}

889 890
EXPORT_SYMBOL_GPL(inet_dccp_listen);

891
static void dccp_terminate_connection(struct sock *sk)
892
{
893
	u8 next_state = DCCP_CLOSED;
894

895 896 897 898 899 900 901 902 903 904 905
	switch (sk->sk_state) {
	case DCCP_PASSIVE_CLOSE:
	case DCCP_PASSIVE_CLOSEREQ:
		dccp_finish_passive_close(sk);
		break;
	case DCCP_PARTOPEN:
		dccp_pr_debug("Stop PARTOPEN timer (%p)\n", sk);
		inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
		/* fall through */
	case DCCP_OPEN:
		dccp_send_close(sk, 1);
906

907 908
		if (dccp_sk(sk)->dccps_role == DCCP_ROLE_SERVER &&
		    !dccp_sk(sk)->dccps_server_timewait)
909 910 911 912 913 914 915
			next_state = DCCP_ACTIVE_CLOSEREQ;
		else
			next_state = DCCP_CLOSING;
		/* fall through */
	default:
		dccp_set_state(sk, next_state);
	}
916 917 918 919
}

void dccp_close(struct sock *sk, long timeout)
{
I
Ian McDonald 已提交
920
	struct dccp_sock *dp = dccp_sk(sk);
921
	struct sk_buff *skb;
922
	u32 data_was_unread = 0;
H
Herbert Xu 已提交
923
	int state;
924 925 926 927 928 929 930 931 932 933 934 935 936 937

	lock_sock(sk);

	sk->sk_shutdown = SHUTDOWN_MASK;

	if (sk->sk_state == DCCP_LISTEN) {
		dccp_set_state(sk, DCCP_CLOSED);

		/* Special case. */
		inet_csk_listen_stop(sk);

		goto adjudge_to_death;
	}

I
Ian McDonald 已提交
938 939
	sk_stop_timer(sk, &dp->dccps_xmit_timer);

940 941 942 943 944 945
	/*
	 * We need to flush the recv. buffs.  We do this only on the
	 * descriptor close, not protocol-sourced closes, because the
	  *reader process may not have drained the data yet!
	 */
	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
946
		data_was_unread += skb->len;
947 948 949
		__kfree_skb(skb);
	}

950 951 952 953 954 955
	if (data_was_unread) {
		/* Unread data was tossed, send an appropriate Reset Code */
		DCCP_WARN("DCCP: ABORT -- %u bytes unread\n", data_was_unread);
		dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
		dccp_set_state(sk, DCCP_CLOSED);
	} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
956 957
		/* Check zero linger _after_ checking for unread data. */
		sk->sk_prot->disconnect(sk, 0);
958 959
	} else if (sk->sk_state != DCCP_CLOSED) {
		dccp_terminate_connection(sk);
960 961 962 963 964
	}

	sk_stream_wait_close(sk, timeout);

adjudge_to_death:
H
Herbert Xu 已提交
965 966 967
	state = sk->sk_state;
	sock_hold(sk);
	sock_orphan(sk);
968
	percpu_counter_inc(sk->sk_prot->orphan_count);
H
Herbert Xu 已提交
969

970 971 972
	/*
	 * It is the last release_sock in its life. It will remove backlog.
	 */
973 974 975 976 977 978 979
	release_sock(sk);
	/*
	 * Now socket is owned by kernel and we acquire BH lock
	 * to finish close. No need to check for user refs.
	 */
	local_bh_disable();
	bh_lock_sock(sk);
980
	WARN_ON(sock_owned_by_user(sk));
981

H
Herbert Xu 已提交
982 983 984
	/* Have we already been destroyed by a softirq or backlog? */
	if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
		goto out;
985

986 987 988 989 990
	if (sk->sk_state == DCCP_CLOSED)
		inet_csk_destroy_sock(sk);

	/* Otherwise, socket is reprieved until protocol close. */

H
Herbert Xu 已提交
991
out:
992 993 994 995 996
	bh_unlock_sock(sk);
	local_bh_enable();
	sock_put(sk);
}

997 998
EXPORT_SYMBOL_GPL(dccp_close);

999 1000
void dccp_shutdown(struct sock *sk, int how)
{
1001
	dccp_pr_debug("called shutdown(%x)\n", how);
1002 1003
}

1004 1005
EXPORT_SYMBOL_GPL(dccp_shutdown);

1006
static inline int dccp_mib_init(void)
1007
{
1008
	return snmp_mib_init((void**)dccp_statistics, sizeof(struct dccp_mib));
1009 1010
}

1011
static inline void dccp_mib_exit(void)
1012
{
1013
	snmp_mib_free((void**)dccp_statistics);
1014 1015
}

1016 1017 1018 1019
static int thash_entries;
module_param(thash_entries, int, 0444);
MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");

1020
#ifdef CONFIG_IP_DCCP_DEBUG
1021
int dccp_debug;
1022
module_param(dccp_debug, bool, 0644);
1023
MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
1024 1025

EXPORT_SYMBOL_GPL(dccp_debug);
1026
#endif
1027 1028 1029 1030 1031

static int __init dccp_init(void)
{
	unsigned long goal;
	int ehash_order, bhash_order, i;
1032
	int rc;
1033

1034 1035
	BUILD_BUG_ON(sizeof(struct dccp_skb_cb) >
		     FIELD_SIZEOF(struct sk_buff, cb));
1036 1037 1038 1039
	rc = percpu_counter_init(&dccp_orphan_count, 0);
	if (rc)
		goto out;
	rc = -ENOBUFS;
1040
	inet_hashinfo_init(&dccp_hashinfo);
1041 1042 1043
	dccp_hashinfo.bind_bucket_cachep =
		kmem_cache_create("dccp_bind_bucket",
				  sizeof(struct inet_bind_bucket), 0,
1044
				  SLAB_HWCACHE_ALIGN, NULL);
1045
	if (!dccp_hashinfo.bind_bucket_cachep)
1046
		goto out_free_percpu;
1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059

	/*
	 * Size and allocate the main established and bind bucket
	 * hash tables.
	 *
	 * The methodology is similar to that of the buffer cache.
	 */
	if (num_physpages >= (128 * 1024))
		goal = num_physpages >> (21 - PAGE_SHIFT);
	else
		goal = num_physpages >> (23 - PAGE_SHIFT);

	if (thash_entries)
1060 1061
		goal = (thash_entries *
			sizeof(struct inet_ehash_bucket)) >> PAGE_SHIFT;
1062 1063 1064 1065 1066
	for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++)
		;
	do {
		dccp_hashinfo.ehash_size = (1UL << ehash_order) * PAGE_SIZE /
					sizeof(struct inet_ehash_bucket);
1067 1068
		while (dccp_hashinfo.ehash_size &
		       (dccp_hashinfo.ehash_size - 1))
1069 1070 1071 1072 1073 1074
			dccp_hashinfo.ehash_size--;
		dccp_hashinfo.ehash = (struct inet_ehash_bucket *)
			__get_free_pages(GFP_ATOMIC, ehash_order);
	} while (!dccp_hashinfo.ehash && --ehash_order > 0);

	if (!dccp_hashinfo.ehash) {
1075
		DCCP_CRIT("Failed to allocate DCCP established hash table");
1076 1077 1078
		goto out_free_bind_bucket_cachep;
	}

1079
	for (i = 0; i < dccp_hashinfo.ehash_size; i++) {
1080 1081
		INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].chain, i);
		INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].twchain, i);
1082 1083
	}

1084 1085 1086
	if (inet_ehash_locks_alloc(&dccp_hashinfo))
			goto out_free_dccp_ehash;

1087 1088 1089 1090 1091
	bhash_order = ehash_order;

	do {
		dccp_hashinfo.bhash_size = (1UL << bhash_order) * PAGE_SIZE /
					sizeof(struct inet_bind_hashbucket);
1092 1093
		if ((dccp_hashinfo.bhash_size > (64 * 1024)) &&
		    bhash_order > 0)
1094 1095 1096 1097 1098 1099
			continue;
		dccp_hashinfo.bhash = (struct inet_bind_hashbucket *)
			__get_free_pages(GFP_ATOMIC, bhash_order);
	} while (!dccp_hashinfo.bhash && --bhash_order >= 0);

	if (!dccp_hashinfo.bhash) {
1100
		DCCP_CRIT("Failed to allocate DCCP bind hash table");
1101
		goto out_free_dccp_locks;
1102 1103 1104 1105 1106 1107 1108
	}

	for (i = 0; i < dccp_hashinfo.bhash_size; i++) {
		spin_lock_init(&dccp_hashinfo.bhash[i].lock);
		INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain);
	}

1109
	rc = dccp_mib_init();
1110
	if (rc)
1111 1112
		goto out_free_dccp_bhash;

1113
	rc = dccp_ackvec_init();
1114
	if (rc)
1115
		goto out_free_dccp_mib;
1116

1117
	rc = dccp_sysctl_init();
1118 1119
	if (rc)
		goto out_ackvec_exit;
1120 1121

	dccp_timestamping_init();
1122 1123
out:
	return rc;
1124 1125
out_ackvec_exit:
	dccp_ackvec_exit();
1126
out_free_dccp_mib:
1127
	dccp_mib_exit();
1128 1129 1130
out_free_dccp_bhash:
	free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
	dccp_hashinfo.bhash = NULL;
1131 1132
out_free_dccp_locks:
	inet_ehash_locks_free(&dccp_hashinfo);
1133 1134 1135 1136 1137 1138
out_free_dccp_ehash:
	free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
	dccp_hashinfo.ehash = NULL;
out_free_bind_bucket_cachep:
	kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
	dccp_hashinfo.bind_bucket_cachep = NULL;
1139 1140
out_free_percpu:
	percpu_counter_destroy(&dccp_orphan_count);
1141 1142 1143 1144 1145
	goto out;
}

static void __exit dccp_fini(void)
{
1146
	dccp_mib_exit();
1147 1148 1149 1150 1151 1152
	free_pages((unsigned long)dccp_hashinfo.bhash,
		   get_order(dccp_hashinfo.bhash_size *
			     sizeof(struct inet_bind_hashbucket)));
	free_pages((unsigned long)dccp_hashinfo.ehash,
		   get_order(dccp_hashinfo.ehash_size *
			     sizeof(struct inet_ehash_bucket)));
1153
	inet_ehash_locks_free(&dccp_hashinfo);
1154
	kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1155
	dccp_ackvec_exit();
1156
	dccp_sysctl_exit();
1157 1158 1159 1160 1161 1162 1163 1164
}

module_init(dccp_init);
module_exit(dccp_fini);

MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>");
MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");