output.c 15.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 *  net/dccp/output.c
 * 
 *  An implementation of the DCCP protocol
 *  Arnaldo Carvalho de Melo <acme@conectiva.com.br>
 *
 *	This program is free software; you can redistribute it and/or
 *	modify it under the terms of the GNU General Public License
 *	as published by the Free Software Foundation; either version
 *	2 of the License, or (at your option) any later version.
 */

#include <linux/dccp.h>
14
#include <linux/kernel.h>
15 16
#include <linux/skbuff.h>

17
#include <net/inet_sock.h>
18 19
#include <net/sock.h>

20
#include "ackvec.h"
21 22 23 24 25 26 27 28
#include "ccid.h"
#include "dccp.h"

static inline void dccp_event_ack_sent(struct sock *sk)
{
	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
}

29
static void dccp_skb_entail(struct sock *sk, struct sk_buff *skb)
30 31 32 33 34 35
{
	skb_set_owner_w(skb, sk);
	WARN_ON(sk->sk_send_head);
	sk->sk_send_head = skb;
}

36 37 38 39 40 41
/*
 * All SKB's seen here are completely headerless. It is our
 * job to build the DCCP header, and pass the packet down to
 * IP so it can do the same plus pass the packet off to the
 * device.
 */
42
static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
43 44 45
{
	if (likely(skb != NULL)) {
		const struct inet_sock *inet = inet_sk(sk);
46
		const struct inet_connection_sock *icsk = inet_csk(sk);
47 48 49 50
		struct dccp_sock *dp = dccp_sk(sk);
		struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
		struct dccp_hdr *dh;
		/* XXX For now we're using only 48 bits sequence numbers */
51
		const u32 dccp_header_size = sizeof(*dh) +
52
					     sizeof(struct dccp_hdr_ext) +
53
					  dccp_packet_hdr_len(dcb->dccpd_type);
54 55 56 57 58 59 60 61
		int err, set_ack = 1;
		u64 ackno = dp->dccps_gsr;

		dccp_inc_seqno(&dp->dccps_gss);

		switch (dcb->dccpd_type) {
		case DCCP_PKT_DATA:
			set_ack = 0;
62 63
			/* fall through */
		case DCCP_PKT_DATAACK:
64
			break;
65

66 67 68 69
		case DCCP_PKT_REQUEST:
			set_ack = 0;
			/* fall through */

70 71 72
		case DCCP_PKT_SYNC:
		case DCCP_PKT_SYNCACK:
			ackno = dcb->dccpd_seq;
73 74 75 76 77 78 79 80
			/* fall through */
		default:
			/*
			 * Only data packets should come through with skb->sk
			 * set.
			 */
			WARN_ON(skb->sk);
			skb_set_owner_w(skb, sk);
81 82
			break;
		}
83 84

		dcb->dccpd_seq = dp->dccps_gss;
85 86 87 88 89

		if (dccp_insert_options(sk, skb)) {
			kfree_skb(skb);
			return -EPROTO;
		}
90
		
91

92
		/* Build DCCP header and checksum it. */
93
		dh = dccp_zeroed_hdr(skb, dccp_header_size);
94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
		dh->dccph_type	= dcb->dccpd_type;
		dh->dccph_sport	= inet->sport;
		dh->dccph_dport	= inet->dport;
		dh->dccph_doff	= (dccp_header_size + dcb->dccpd_opt_len) / 4;
		dh->dccph_ccval	= dcb->dccpd_ccval;
		/* XXX For now we're using only 48 bits sequence numbers */
		dh->dccph_x	= 1;

		dp->dccps_awh = dp->dccps_gss;
		dccp_hdr_set_seq(dh, dp->dccps_gss);
		if (set_ack)
			dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno);

		switch (dcb->dccpd_type) {
		case DCCP_PKT_REQUEST:
109
			dccp_hdr_request(skb)->dccph_req_service =
110
							dp->dccps_service;
111 112
			break;
		case DCCP_PKT_RESET:
113 114
			dccp_hdr_reset(skb)->dccph_reset_code =
							dcb->dccpd_reset_code;
115 116 117
			break;
		}

118
		icsk->icsk_af_ops->send_check(sk, skb->len, skb);
119

120
		if (set_ack)
121 122 123 124
			dccp_event_ack_sent(sk);

		DCCP_INC_STATS(DCCP_MIB_OUTSEGS);

H
Herbert Xu 已提交
125
		memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
126
		err = icsk->icsk_af_ops->queue_xmit(skb, sk, 0);
127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
		if (err <= 0)
			return err;

		/* NET_XMIT_CN is special. It does not guarantee,
		 * that this packet is lost. It tells that device
		 * is about to start to drop packets or already
		 * drops some packets of the same priority and
		 * invokes us to send less aggressively.
		 */
		return err == NET_XMIT_CN ? 0 : err;
	}
	return -ENOBUFS;
}

unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu)
{
143
	struct inet_connection_sock *icsk = inet_csk(sk);
144
	struct dccp_sock *dp = dccp_sk(sk);
145
	int mss_now = (pmtu - icsk->icsk_af_ops->net_header_len -
146
		       sizeof(struct dccp_hdr) - sizeof(struct dccp_hdr_ext));
147 148

	/* Now subtract optional transport overhead */
149
	mss_now -= icsk->icsk_ext_hdr_len;
150 151 152 153 154 155 156 157 158 159 160 161

	/*
	 * FIXME: this should come from the CCID infrastructure, where, say,
	 * TFRC will say it wants TIMESTAMPS, ELAPSED time, etc, for now lets
	 * put a rough estimate for NDP + TIMESTAMP + TIMESTAMP_ECHO + ELAPSED
	 * TIME + TFRC_OPT_LOSS_EVENT_RATE + TFRC_OPT_RECEIVE_RATE + padding to
	 * make it a multiple of 4
	 */

	mss_now -= ((5 + 6 + 10 + 6 + 6 + 6 + 3) / 4) * 4;

	/* And store cached results */
162
	icsk->icsk_pmtu_cookie = pmtu;
163 164 165 166 167
	dp->dccps_mss_cache = mss_now;

	return mss_now;
}

168 169
EXPORT_SYMBOL_GPL(dccp_sync_mss);

170 171 172 173 174 175 176 177 178 179 180 181 182
void dccp_write_space(struct sock *sk)
{
	read_lock(&sk->sk_callback_lock);

	if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
		wake_up_interruptible(sk->sk_sleep);
	/* Should agree with poll, otherwise some programs break */
	if (sock_writeable(sk))
		sk_wake_async(sk, 2, POLL_OUT);

	read_unlock(&sk->sk_callback_lock);
}

183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198
/**
 * dccp_wait_for_ccid - Wait for ccid to tell us we can send a packet
 * @sk: socket to wait for
 * @timeo: for how long
 */
static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb,
			      long *timeo)
{
	struct dccp_sock *dp = dccp_sk(sk);
	DEFINE_WAIT(wait);
	long delay;
	int rc;

	while (1) {
		prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);

I
Ian McDonald 已提交
199
		if (sk->sk_err)
200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
			goto do_error;
		if (!*timeo)
			goto do_nonblock;
		if (signal_pending(current))
			goto do_interrupted;

		rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb,
					    skb->len);
		if (rc <= 0)
			break;
		delay = msecs_to_jiffies(rc);
		if (delay > *timeo || delay < 0)
			goto do_nonblock;

		sk->sk_write_pending++;
		release_sock(sk);
		*timeo -= schedule_timeout(delay);
		lock_sock(sk);
		sk->sk_write_pending--;
	}
out:
	finish_wait(sk->sk_sleep, &wait);
	return rc;

do_error:
	rc = -EPIPE;
	goto out;
do_nonblock:
	rc = -EAGAIN;
	goto out;
do_interrupted:
	rc = sock_intr_errno(*timeo);
	goto out;
}

I
Ian McDonald 已提交
235 236 237 238 239 240 241 242 243 244 245 246 247 248
static void dccp_write_xmit_timer(unsigned long data) {
	struct sock *sk = (struct sock *)data;
	struct dccp_sock *dp = dccp_sk(sk);

	bh_lock_sock(sk);
	if (sock_owned_by_user(sk))
		sk_reset_timer(sk, &dp->dccps_xmit_timer, jiffies+1);
	else
		dccp_write_xmit(sk, 0);
	bh_unlock_sock(sk);
	sock_put(sk);
}

void dccp_write_xmit(struct sock *sk, int block)
249
{
I
Ian McDonald 已提交
250 251
	struct dccp_sock *dp = dccp_sk(sk);
	struct sk_buff *skb;
I
Ian McDonald 已提交
252 253
	long timeo = DCCP_XMIT_TIMEO; 	/* If a packet is taking longer than
					   this we have other issues */
I
Ian McDonald 已提交
254 255 256

	while ((skb = skb_peek(&sk->sk_write_queue))) {
		int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb,
257 258
					 skb->len);

I
Ian McDonald 已提交
259 260 261 262 263
		if (err > 0) {
			if (!block) {
				sk_reset_timer(sk, &dp->dccps_xmit_timer,
						msecs_to_jiffies(err)+jiffies);
				break;
I
Ian McDonald 已提交
264
			} else {
I
Ian McDonald 已提交
265
				err = dccp_wait_for_ccid(sk, skb, &timeo);
I
Ian McDonald 已提交
266 267
				timeo = DCCP_XMIT_TIMEO;
			}
I
Ian McDonald 已提交
268 269 270 271 272 273
			if (err) {
				printk(KERN_CRIT "%s:err at dccp_wait_for_ccid"
						 " %d\n", __FUNCTION__, err);
				dump_stack();
			}
		}
274

I
Ian McDonald 已提交
275 276 277 278
		skb_dequeue(&sk->sk_write_queue);
		if (err == 0) {
			struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
			const int len = skb->len;
279

I
Ian McDonald 已提交
280 281 282 283
			if (sk->sk_state == DCCP_PARTOPEN) {
				/* See 8.1.5.  Handshake Completion */
				inet_csk_schedule_ack(sk);
				inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
284 285
						  inet_csk(sk)->icsk_rto,
						  DCCP_RTO_MAX);
I
Ian McDonald 已提交
286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
				dcb->dccpd_type = DCCP_PKT_DATAACK;
			} else if (dccp_ack_pending(sk))
				dcb->dccpd_type = DCCP_PKT_DATAACK;
			else
				dcb->dccpd_type = DCCP_PKT_DATA;

			err = dccp_transmit_skb(sk, skb);
			ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len);
			if (err) {
				printk(KERN_CRIT "%s:err from "
					         "ccid_hc_tx_packet_sent %d\n",
					         __FUNCTION__, err);
				dump_stack();
			}
		} else
			kfree(skb);
	}
303 304
}

305 306
int dccp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
{
307
	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0)
308 309 310 311 312 313 314 315 316 317 318
		return -EHOSTUNREACH; /* Routing failure or similar. */

	return dccp_transmit_skb(sk, (skb_cloned(skb) ?
				      pskb_copy(skb, GFP_ATOMIC):
				      skb_clone(skb, GFP_ATOMIC)));
}

struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
				   struct request_sock *req)
{
	struct dccp_hdr *dh;
319
	struct dccp_request_sock *dreq;
320
	const u32 dccp_header_size = sizeof(struct dccp_hdr) +
321 322
				     sizeof(struct dccp_hdr_ext) +
				     sizeof(struct dccp_hdr_response);
323
	struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1,
324 325 326 327 328
					   GFP_ATOMIC);
	if (skb == NULL)
		return NULL;

	/* Reserve space for headers. */
329
	skb_reserve(skb, sk->sk_prot->max_header);
330 331 332 333

	skb->dst = dst_clone(dst);
	skb->csum = 0;

334
	dreq = dccp_rsk(req);
335
	DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE;
336
	DCCP_SKB_CB(skb)->dccpd_seq  = dreq->dreq_iss;
337 338 339 340 341

	if (dccp_insert_options(sk, skb)) {
		kfree_skb(skb);
		return NULL;
	}
342

343
	dh = dccp_zeroed_hdr(skb, dccp_header_size);
344 345 346

	dh->dccph_sport	= inet_sk(sk)->sport;
	dh->dccph_dport	= inet_rsk(req)->rmt_port;
347 348
	dh->dccph_doff	= (dccp_header_size +
			   DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
349 350
	dh->dccph_type	= DCCP_PKT_RESPONSE;
	dh->dccph_x	= 1;
351 352 353
	dccp_hdr_set_seq(dh, dreq->dreq_iss);
	dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_isr);
	dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service;
354 355 356 357 358

	DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
	return skb;
}

359 360
EXPORT_SYMBOL_GPL(dccp_make_response);

361 362
static struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst,
				       const enum dccp_reset_codes code)
363 364 365 366
				   
{
	struct dccp_hdr *dh;
	struct dccp_sock *dp = dccp_sk(sk);
367
	const u32 dccp_header_size = sizeof(struct dccp_hdr) +
368 369
				     sizeof(struct dccp_hdr_ext) +
				     sizeof(struct dccp_hdr_reset);
370
	struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1,
371 372 373 374 375
					   GFP_ATOMIC);
	if (skb == NULL)
		return NULL;

	/* Reserve space for headers. */
376
	skb_reserve(skb, sk->sk_prot->max_header);
377 378 379 380 381 382 383 384 385

	skb->dst = dst_clone(dst);
	skb->csum = 0;

	dccp_inc_seqno(&dp->dccps_gss);

	DCCP_SKB_CB(skb)->dccpd_reset_code = code;
	DCCP_SKB_CB(skb)->dccpd_type	   = DCCP_PKT_RESET;
	DCCP_SKB_CB(skb)->dccpd_seq	   = dp->dccps_gss;
386 387 388 389 390

	if (dccp_insert_options(sk, skb)) {
		kfree_skb(skb);
		return NULL;
	}
391

392
	dh = dccp_zeroed_hdr(skb, dccp_header_size);
393 394 395

	dh->dccph_sport	= inet_sk(sk)->sport;
	dh->dccph_dport	= inet_sk(sk)->dport;
396 397
	dh->dccph_doff	= (dccp_header_size +
			   DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
398 399 400 401 402 403
	dh->dccph_type	= DCCP_PKT_RESET;
	dh->dccph_x	= 1;
	dccp_hdr_set_seq(dh, dp->dccps_gss);
	dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dp->dccps_gsr);

	dccp_hdr_reset(skb)->dccph_reset_code = code;
404
	inet_csk(sk)->icsk_af_ops->send_check(sk, skb->len, skb);
405 406 407 408 409

	DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
	return skb;
}

410 411 412 413 414 415 416 417 418 419 420 421 422
int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code)
{
	/*
	 * FIXME: what if rebuild_header fails?
	 * Should we be doing a rebuild_header here?
	 */
	int err = inet_sk_rebuild_header(sk);

	if (err == 0) {
		struct sk_buff *skb = dccp_make_reset(sk, sk->sk_dst_cache,
						      code);
		if (skb != NULL) {
			memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
423
			err = inet_csk(sk)->icsk_af_ops->queue_xmit(skb, sk, 0);
424 425 426 427 428 429 430 431
			if (err == NET_XMIT_CN)
				err = 0;
		}
	}

	return err;
}

432 433 434 435 436
/*
 * Do all connect socket setups that can be done AF independent.
 */
static inline void dccp_connect_init(struct sock *sk)
{
437
	struct dccp_sock *dp = dccp_sk(sk);
438 439 440 441 442 443 444 445
	struct dst_entry *dst = __sk_dst_get(sk);
	struct inet_connection_sock *icsk = inet_csk(sk);

	sk->sk_err = 0;
	sock_reset_flag(sk, SOCK_DONE);
	
	dccp_sync_mss(sk, dst_mtu(dst));

446 447 448 449 450 451 452 453 454 455
	dccp_update_gss(sk, dp->dccps_iss);
 	/*
	 * SWL and AWL are initially adjusted so that they are not less than
	 * the initial Sequence Numbers received and sent, respectively:
	 *	SWL := max(GSR + 1 - floor(W/4), ISR),
	 *	AWL := max(GSS - W' + 1, ISS).
	 * These adjustments MUST be applied only at the beginning of the
	 * connection.
 	 */
	dccp_set_seqno(&dp->dccps_awl, max48(dp->dccps_awl, dp->dccps_iss));
456 457

	icsk->icsk_retransmits = 0;
I
Ian McDonald 已提交
458 459 460
	init_timer(&dp->dccps_xmit_timer);
	dp->dccps_xmit_timer.data = (unsigned long)sk;
	dp->dccps_xmit_timer.function = dccp_write_xmit_timer;
461 462 463 464 465 466 467 468 469
}

int dccp_connect(struct sock *sk)
{
	struct sk_buff *skb;
	struct inet_connection_sock *icsk = inet_csk(sk);

	dccp_connect_init(sk);

470
	skb = alloc_skb(sk->sk_prot->max_header, sk->sk_allocation);
471 472 473 474
	if (unlikely(skb == NULL))
		return -ENOBUFS;

	/* Reserve space for headers. */
475
	skb_reserve(skb, sk->sk_prot->max_header);
476 477 478 479

	DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST;
	skb->csum = 0;

480
	dccp_skb_entail(sk, skb);
481 482 483 484
	dccp_transmit_skb(sk, skb_clone(skb, GFP_KERNEL));
	DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS);

	/* Timer for repeating the REQUEST until an answer. */
485 486
	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
				  icsk->icsk_rto, DCCP_RTO_MAX);
487 488 489
	return 0;
}

490 491
EXPORT_SYMBOL_GPL(dccp_connect);

492 493 494 495
void dccp_send_ack(struct sock *sk)
{
	/* If we have been reset, we may not send again. */
	if (sk->sk_state != DCCP_CLOSED) {
496 497
		struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header,
						GFP_ATOMIC);
498 499 500 501

		if (skb == NULL) {
			inet_csk_schedule_ack(sk);
			inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
502 503 504
			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
						  TCP_DELACK_MAX,
						  DCCP_RTO_MAX);
505 506 507 508
			return;
		}

		/* Reserve space for headers */
509
		skb_reserve(skb, sk->sk_prot->max_header);
510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547
		skb->csum = 0;
		DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK;
		dccp_transmit_skb(sk, skb);
	}
}

EXPORT_SYMBOL_GPL(dccp_send_ack);

void dccp_send_delayed_ack(struct sock *sk)
{
	struct inet_connection_sock *icsk = inet_csk(sk);
	/*
	 * FIXME: tune this timer. elapsed time fixes the skew, so no problem
	 * with using 2s, and active senders also piggyback the ACK into a
	 * DATAACK packet, so this is really for quiescent senders.
	 */
	unsigned long timeout = jiffies + 2 * HZ;

	/* Use new timeout only if there wasn't a older one earlier. */
	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
		/* If delack timer was blocked or is about to expire,
		 * send ACK now.
		 *
		 * FIXME: check the "about to expire" part
		 */
		if (icsk->icsk_ack.blocked) {
			dccp_send_ack(sk);
			return;
		}

		if (!time_before(timeout, icsk->icsk_ack.timeout))
			timeout = icsk->icsk_ack.timeout;
	}
	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
	icsk->icsk_ack.timeout = timeout;
	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
}

548 549
void dccp_send_sync(struct sock *sk, const u64 seq,
		    const enum dccp_pkt_type pkt_type)
550 551 552 553 554 555
{
	/*
	 * We are not putting this on the write queue, so
	 * dccp_transmit_skb() will set the ownership to this
	 * sock.
	 */
556
	struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
557 558 559 560 561 562

	if (skb == NULL)
		/* FIXME: how to make sure the sync is sent? */
		return;

	/* Reserve space for headers and prepare control bits. */
563
	skb_reserve(skb, sk->sk_prot->max_header);
564
	skb->csum = 0;
565
	DCCP_SKB_CB(skb)->dccpd_type = pkt_type;
566 567 568 569 570
	DCCP_SKB_CB(skb)->dccpd_seq = seq;

	dccp_transmit_skb(sk, skb);
}

571 572
EXPORT_SYMBOL_GPL(dccp_send_sync);

573 574 575 576
/*
 * Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This
 * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under
 * any circumstances.
577
 */
578
void dccp_send_close(struct sock *sk, const int active)
579 580 581
{
	struct dccp_sock *dp = dccp_sk(sk);
	struct sk_buff *skb;
A
Al Viro 已提交
582
	const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC;
583

584 585 586
	skb = alloc_skb(sk->sk_prot->max_header, prio);
	if (skb == NULL)
		return;
587 588 589 590

	/* Reserve space for headers and prepare control bits. */
	skb_reserve(skb, sk->sk_prot->max_header);
	skb->csum = 0;
591 592
	DCCP_SKB_CB(skb)->dccpd_type = dp->dccps_role == DCCP_ROLE_CLIENT ?
					DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ;
593

594
	if (active) {
I
Ian McDonald 已提交
595
		dccp_write_xmit(sk, 1);
596
		dccp_skb_entail(sk, skb);
597
		dccp_transmit_skb(sk, skb_clone(skb, prio));
I
Ian McDonald 已提交
598
		/* FIXME do we need a retransmit timer here? */
599 600
	} else
		dccp_transmit_skb(sk, skb);
601
}