xprtsock.c 28.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * linux/net/sunrpc/xprtsock.c
 *
 * Client-side transport implementation for sockets.
 *
 * TCP callback races fixes (C) 1998 Red Hat Software <alan@redhat.com>
 * TCP send fixes (C) 1998 Red Hat Software <alan@redhat.com>
 * TCP NFS related read + write fixes
 *  (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
 *
 * Rewrite of larges part of the code in order to stabilize TCP stuff.
 * Fix behaviour when socket buffer is full.
 *  (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
14 15
 *
 * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com>
16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
 */

#include <linux/types.h>
#include <linux/slab.h>
#include <linux/capability.h>
#include <linux/sched.h>
#include <linux/pagemap.h>
#include <linux/errno.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/net.h>
#include <linux/mm.h>
#include <linux/udp.h>
#include <linux/tcp.h>
#include <linux/sunrpc/clnt.h>
#include <linux/file.h>

#include <net/sock.h>
#include <net/checksum.h>
#include <net/udp.h>
#include <net/tcp.h>

38 39 40 41 42
/*
 * Maximum port number to use when requesting a reserved port.
 */
#define XS_MAX_RESVPORT		(800U)

43 44 45 46 47 48
/*
 * How many times to try sending a request on a socket before waiting
 * for the socket buffer to clear.
 */
#define XS_SENDMSG_RETRY	(10U)

49 50
#ifdef RPC_DEBUG
# undef  RPC_DEBUG_DATA
51
# define RPCDBG_FACILITY	RPCDBG_TRANS
52 53 54
#endif

#ifdef RPC_DEBUG_DATA
55
static void xs_pktdump(char *msg, u32 *packet, unsigned int count)
56
{
57 58
	u8 *buf = (u8 *) packet;
	int j;
59 60 61 62 63 64 65 66 67 68 69 70 71 72

	dprintk("RPC:      %s\n", msg);
	for (j = 0; j < count && j < 128; j += 4) {
		if (!(j & 31)) {
			if (j)
				dprintk("\n");
			dprintk("0x%04x ", j);
		}
		dprintk("%02x%02x%02x%02x ",
			buf[j], buf[j+1], buf[j+2], buf[j+3]);
	}
	dprintk("\n");
}
#else
73
static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count)
74 75 76 77 78
{
	/* NOP */
}
#endif

79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
#define XS_SENDMSG_FLAGS	(MSG_DONTWAIT | MSG_NOSIGNAL)

static inline int xs_send_head(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, unsigned int len)
{
	struct kvec iov = {
		.iov_base	= xdr->head[0].iov_base + base,
		.iov_len	= len - base,
	};
	struct msghdr msg = {
		.msg_name	= addr,
		.msg_namelen	= addrlen,
		.msg_flags	= XS_SENDMSG_FLAGS,
	};

	if (xdr->len > len)
		msg.msg_flags |= MSG_MORE;

	if (likely(iov.iov_len))
		return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
	return kernel_sendmsg(sock, &msg, NULL, 0, 0);
}

static int xs_send_tail(struct socket *sock, struct xdr_buf *xdr, unsigned int base, unsigned int len)
{
	struct kvec iov = {
		.iov_base	= xdr->tail[0].iov_base + base,
		.iov_len	= len - base,
	};
	struct msghdr msg = {
		.msg_flags	= XS_SENDMSG_FLAGS,
	};

	return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
}

114 115 116 117 118 119 120 121
/**
 * xs_sendpages - write pages directly to a socket
 * @sock: socket to send on
 * @addr: UDP only -- address of destination
 * @addrlen: UDP only -- length of destination address
 * @xdr: buffer containing this request
 * @base: starting position in the buffer
 *
122
 */
123
static inline int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base)
124 125 126 127 128 129
{
	struct page **ppage = xdr->pages;
	unsigned int len, pglen = xdr->page_len;
	int err, ret = 0;
	ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);

130 131 132 133 134
	if (unlikely(!sock))
		return -ENOTCONN;

	clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags);

135 136
	len = xdr->head[0].iov_len;
	if (base < len || (addr != NULL && base == 0)) {
137
		err = xs_send_head(sock, addr, addrlen, xdr, base, len);
138 139 140 141
		if (ret == 0)
			ret = err;
		else if (err > 0)
			ret += err;
142
		if (err != (len - base))
143 144 145 146 147
			goto out;
		base = 0;
	} else
		base -= len;

148
	if (unlikely(pglen == 0))
149
		goto copy_tail;
150
	if (unlikely(base >= pglen)) {
151 152 153 154 155
		base -= pglen;
		goto copy_tail;
	}
	if (base || xdr->page_base) {
		pglen -= base;
156
		base += xdr->page_base;
157 158 159 160 161 162
		ppage += base >> PAGE_CACHE_SHIFT;
		base &= ~PAGE_CACHE_MASK;
	}

	sendpage = sock->ops->sendpage ? : sock_no_sendpage;
	do {
163
		int flags = XS_SENDMSG_FLAGS;
164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189

		len = PAGE_CACHE_SIZE;
		if (base)
			len -= base;
		if (pglen < len)
			len = pglen;

		if (pglen != len || xdr->tail[0].iov_len != 0)
			flags |= MSG_MORE;

		/* Hmm... We might be dealing with highmem pages */
		if (PageHighMem(*ppage))
			sendpage = sock_no_sendpage;
		err = sendpage(sock, *ppage, base, len, flags);
		if (ret == 0)
			ret = err;
		else if (err > 0)
			ret += err;
		if (err != len)
			goto out;
		base = 0;
		ppage++;
	} while ((pglen -= len) != 0);
copy_tail:
	len = xdr->tail[0].iov_len;
	if (base < len) {
190
		err = xs_send_tail(sock, xdr, base, len);
191 192 193 194 195 196 197 198 199
		if (ret == 0)
			ret = err;
		else if (err > 0)
			ret += err;
	}
out:
	return ret;
}

200
/**
201 202
 * xs_nospace - place task on wait queue if transmit was incomplete
 * @task: task to put to sleep
203
 *
204
 */
205
static void xs_nospace(struct rpc_task *task)
206
{
207 208
	struct rpc_rqst *req = task->tk_rqstp;
	struct rpc_xprt *xprt = req->rq_xprt;
209

210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
	dprintk("RPC: %4d xmit incomplete (%u left of %u)\n",
			task->tk_pid, req->rq_slen - req->rq_bytes_sent,
			req->rq_slen);

	if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) {
		/* Protect against races with write_space */
		spin_lock_bh(&xprt->transport_lock);

		/* Don't race with disconnect */
		if (!xprt_connected(xprt))
			task->tk_status = -ENOTCONN;
		else if (test_bit(SOCK_NOSPACE, &xprt->sock->flags))
			xprt_wait_for_buffer_space(task);

		spin_unlock_bh(&xprt->transport_lock);
	} else
		/* Keep holding the socket if it is blocked */
		rpc_delay(task, HZ>>4);
}

/**
 * xs_udp_send_request - write an RPC request to a UDP socket
 * @task: address of RPC task that manages the state of an RPC request
 *
 * Return values:
 *        0:	The request has been sent
 *   EAGAIN:	The socket was blocked, please call again later to
 *		complete the request
 * ENOTCONN:	Caller needs to invoke connect logic then call again
 *    other:	Some other error occured, the request was not sent
 */
static int xs_udp_send_request(struct rpc_task *task)
{
	struct rpc_rqst *req = task->tk_rqstp;
	struct rpc_xprt *xprt = req->rq_xprt;
	struct xdr_buf *xdr = &req->rq_snd_buf;
	int status;
247

248
	xs_pktdump("packet data:",
249 250 251
				req->rq_svec->iov_base,
				req->rq_svec->iov_len);

252 253 254
	req->rq_xtime = jiffies;
	status = xs_sendpages(xprt->sock, (struct sockaddr *) &xprt->addr,
				sizeof(xprt->addr), xdr, req->rq_bytes_sent);
255

256 257
	dprintk("RPC:      xs_udp_send_request(%u) = %d\n",
			xdr->len - req->rq_bytes_sent, status);
258

259 260
	if (likely(status >= (int) req->rq_slen))
		return 0;
261

262 263 264
	/* Still some bytes left; set up for a retry later. */
	if (status > 0)
		status = -EAGAIN;
265

266 267 268
	switch (status) {
	case -ENETUNREACH:
	case -EPIPE:
269 270
	case -ECONNREFUSED:
		/* When the server has died, an ICMP port unreachable message
271
		 * prompts ECONNREFUSED. */
272
		break;
273 274
	case -EAGAIN:
		xs_nospace(task);
275 276
		break;
	default:
277 278
		dprintk("RPC:      sendmsg returned unrecognized error %d\n",
			-status);
279
		break;
280
	}
281 282

	return status;
283 284
}

285 286 287 288 289 290 291
static inline void xs_encode_tcp_record_marker(struct xdr_buf *buf)
{
	u32 reclen = buf->len - sizeof(rpc_fraghdr);
	rpc_fraghdr *base = buf->head[0].iov_base;
	*base = htonl(RPC_LAST_STREAM_FRAGMENT | reclen);
}

292
/**
293
 * xs_tcp_send_request - write an RPC request to a TCP socket
294 295 296
 * @task: address of RPC task that manages the state of an RPC request
 *
 * Return values:
297 298 299 300 301
 *        0:	The request has been sent
 *   EAGAIN:	The socket was blocked, please call again later to
 *		complete the request
 * ENOTCONN:	Caller needs to invoke connect logic then call again
 *    other:	Some other error occured, the request was not sent
302 303
 *
 * XXX: In the case of soft timeouts, should we eventually give up
304
 *	if sendmsg is not able to make progress?
305
 */
306
static int xs_tcp_send_request(struct rpc_task *task)
307 308 309
{
	struct rpc_rqst *req = task->tk_rqstp;
	struct rpc_xprt *xprt = req->rq_xprt;
310
	struct xdr_buf *xdr = &req->rq_snd_buf;
311 312
	int status, retry = 0;

313
	xs_encode_tcp_record_marker(&req->rq_snd_buf);
314

315 316 317
	xs_pktdump("packet data:",
				req->rq_svec->iov_base,
				req->rq_svec->iov_len);
318 319 320

	/* Continue transmitting the packet/record. We must be careful
	 * to cope with writespace callbacks arriving _after_ we have
321
	 * called sendmsg(). */
322 323
	while (1) {
		req->rq_xtime = jiffies;
324 325
		status = xs_sendpages(xprt->sock, NULL, 0, xdr,
						req->rq_bytes_sent);
326

327 328
		dprintk("RPC:      xs_tcp_send_request(%u) = %d\n",
				xdr->len - req->rq_bytes_sent, status);
329

330
		if (unlikely(status < 0))
331 332
			break;

333 334 335 336 337 338 339
		/* If we've sent the entire packet, immediately
		 * reset the count of bytes sent. */
		req->rq_bytes_sent += status;
		if (likely(req->rq_bytes_sent >= req->rq_slen)) {
			req->rq_bytes_sent = 0;
			return 0;
		}
340 341

		status = -EAGAIN;
342
		if (retry++ > XS_SENDMSG_RETRY)
343 344 345
			break;
	}

346 347 348 349 350 351 352 353 354 355 356 357 358
	switch (status) {
	case -EAGAIN:
		xs_nospace(task);
		break;
	case -ECONNREFUSED:
	case -ECONNRESET:
	case -ENOTCONN:
	case -EPIPE:
		status = -ENOTCONN;
		break;
	default:
		dprintk("RPC:      sendmsg returned unrecognized error %d\n",
			-status);
359
		xprt_disconnect(xprt);
360
		break;
361
	}
362

363 364 365
	return status;
}

366 367 368 369
/**
 * xs_close - close a socket
 * @xprt: transport
 *
370
 */
371
static void xs_close(struct rpc_xprt *xprt)
372
{
373 374
	struct socket *sock = xprt->sock;
	struct sock *sk = xprt->inet;
375 376 377 378

	if (!sk)
		return;

379 380
	dprintk("RPC:      xs_close xprt %p\n", xprt);

381 382 383 384
	write_lock_bh(&sk->sk_callback_lock);
	xprt->inet = NULL;
	xprt->sock = NULL;

385 386
	sk->sk_user_data = NULL;
	sk->sk_data_ready = xprt->old_data_ready;
387
	sk->sk_state_change = xprt->old_state_change;
388
	sk->sk_write_space = xprt->old_write_space;
389 390
	write_unlock_bh(&sk->sk_callback_lock);

391
	sk->sk_no_check = 0;
392 393 394 395

	sock_release(sock);
}

396 397 398 399 400 401
/**
 * xs_destroy - prepare to shutdown a transport
 * @xprt: doomed transport
 *
 */
static void xs_destroy(struct rpc_xprt *xprt)
402
{
403 404
	dprintk("RPC:      xs_destroy xprt %p\n", xprt);

405
	cancel_delayed_work(&xprt->connect_worker);
406 407 408
	flush_scheduled_work();

	xprt_disconnect(xprt);
409
	xs_close(xprt);
410 411 412
	kfree(xprt->slot);
}

413 414 415 416 417 418 419 420 421 422
static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
{
	return (struct rpc_xprt *) sk->sk_user_data;
}

/**
 * xs_udp_data_ready - "data ready" callback for UDP sockets
 * @sk: socket with data to read
 * @len: how much data to read
 *
423
 */
424
static void xs_udp_data_ready(struct sock *sk, int len)
425
{
426 427
	struct rpc_task *task;
	struct rpc_xprt *xprt;
428
	struct rpc_rqst *rovr;
429
	struct sk_buff *skb;
430 431 432 433
	int err, repsize, copied;
	u32 _xid, *xp;

	read_lock(&sk->sk_callback_lock);
434 435
	dprintk("RPC:      xs_udp_data_ready...\n");
	if (!(xprt = xprt_from_sock(sk)))
436 437 438 439 440 441 442 443 444 445
		goto out;

	if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL)
		goto out;

	if (xprt->shutdown)
		goto dropit;

	repsize = skb->len - sizeof(struct udphdr);
	if (repsize < 4) {
446
		dprintk("RPC:      impossible RPC reply size %d!\n", repsize);
447 448 449 450 451 452 453 454 455 456
		goto dropit;
	}

	/* Copy the XID from the skb... */
	xp = skb_header_pointer(skb, sizeof(struct udphdr),
				sizeof(_xid), &_xid);
	if (xp == NULL)
		goto dropit;

	/* Look up and lock the request corresponding to the given XID */
C
Chuck Lever 已提交
457
	spin_lock(&xprt->transport_lock);
458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
	rovr = xprt_lookup_rqst(xprt, *xp);
	if (!rovr)
		goto out_unlock;
	task = rovr->rq_task;

	if ((copied = rovr->rq_private_buf.buflen) > repsize)
		copied = repsize;

	/* Suck it into the iovec, verify checksum if not done by hw. */
	if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb))
		goto out_unlock;

	/* Something worked... */
	dst_confirm(skb->dst);

473 474 475
	xprt_adjust_cwnd(task, copied);
	xprt_update_rtt(task);
	xprt_complete_rqst(task, copied);
476 477

 out_unlock:
C
Chuck Lever 已提交
478
	spin_unlock(&xprt->transport_lock);
479 480 481 482 483 484
 dropit:
	skb_free_datagram(sk, skb);
 out:
	read_unlock(&sk->sk_callback_lock);
}

485
static inline size_t xs_tcp_copy_data(skb_reader_t *desc, void *p, size_t len)
486 487 488 489 490 491 492 493 494 495 496 497 498 499 500
{
	if (len > desc->count)
		len = desc->count;
	if (skb_copy_bits(desc->skb, desc->offset, p, len)) {
		dprintk("RPC:      failed to copy %zu bytes from skb. %zu bytes remain\n",
				len, desc->count);
		return 0;
	}
	desc->offset += len;
	desc->count -= len;
	dprintk("RPC:      copied %zu bytes from skb. %zu bytes remain\n",
			len, desc->count);
	return len;
}

501
static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc)
502 503 504 505 506 507
{
	size_t len, used;
	char *p;

	p = ((char *) &xprt->tcp_recm) + xprt->tcp_offset;
	len = sizeof(xprt->tcp_recm) - xprt->tcp_offset;
508
	used = xs_tcp_copy_data(desc, p, len);
509 510 511
	xprt->tcp_offset += used;
	if (used != len)
		return;
512

513
	xprt->tcp_reclen = ntohl(xprt->tcp_recm);
514
	if (xprt->tcp_reclen & RPC_LAST_STREAM_FRAGMENT)
515 516 517
		xprt->tcp_flags |= XPRT_LAST_FRAG;
	else
		xprt->tcp_flags &= ~XPRT_LAST_FRAG;
518 519
	xprt->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK;

520 521
	xprt->tcp_flags &= ~XPRT_COPY_RECM;
	xprt->tcp_offset = 0;
522

523
	/* Sanity check of the record length */
524
	if (unlikely(xprt->tcp_reclen < 4)) {
525
		dprintk("RPC:      invalid TCP record fragment length\n");
526
		xprt_disconnect(xprt);
527
		return;
528 529 530 531 532
	}
	dprintk("RPC:      reading TCP record fragment of length %d\n",
			xprt->tcp_reclen);
}

533
static void xs_tcp_check_recm(struct rpc_xprt *xprt)
534 535 536 537 538 539 540 541 542 543 544 545 546 547
{
	dprintk("RPC:      xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u, tcp_flags = %lx\n",
			xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen, xprt->tcp_flags);
	if (xprt->tcp_offset == xprt->tcp_reclen) {
		xprt->tcp_flags |= XPRT_COPY_RECM;
		xprt->tcp_offset = 0;
		if (xprt->tcp_flags & XPRT_LAST_FRAG) {
			xprt->tcp_flags &= ~XPRT_COPY_DATA;
			xprt->tcp_flags |= XPRT_COPY_XID;
			xprt->tcp_copied = 0;
		}
	}
}

548
static inline void xs_tcp_read_xid(struct rpc_xprt *xprt, skb_reader_t *desc)
549 550 551 552 553 554 555
{
	size_t len, used;
	char *p;

	len = sizeof(xprt->tcp_xid) - xprt->tcp_offset;
	dprintk("RPC:      reading XID (%Zu bytes)\n", len);
	p = ((char *) &xprt->tcp_xid) + xprt->tcp_offset;
556
	used = xs_tcp_copy_data(desc, p, len);
557 558 559 560 561 562 563 564
	xprt->tcp_offset += used;
	if (used != len)
		return;
	xprt->tcp_flags &= ~XPRT_COPY_XID;
	xprt->tcp_flags |= XPRT_COPY_DATA;
	xprt->tcp_copied = 4;
	dprintk("RPC:      reading reply for XID %08x\n",
						ntohl(xprt->tcp_xid));
565
	xs_tcp_check_recm(xprt);
566 567
}

568
static inline void xs_tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc)
569 570 571 572 573 574 575
{
	struct rpc_rqst *req;
	struct xdr_buf *rcvbuf;
	size_t len;
	ssize_t r;

	/* Find and lock the request corresponding to this xid */
C
Chuck Lever 已提交
576
	spin_lock(&xprt->transport_lock);
577 578 579 580 581
	req = xprt_lookup_rqst(xprt, xprt->tcp_xid);
	if (!req) {
		xprt->tcp_flags &= ~XPRT_COPY_DATA;
		dprintk("RPC:      XID %08x request not found!\n",
				ntohl(xprt->tcp_xid));
C
Chuck Lever 已提交
582
		spin_unlock(&xprt->transport_lock);
583 584 585 586 587 588 589 590 591 592 593 594
		return;
	}

	rcvbuf = &req->rq_private_buf;
	len = desc->count;
	if (len > xprt->tcp_reclen - xprt->tcp_offset) {
		skb_reader_t my_desc;

		len = xprt->tcp_reclen - xprt->tcp_offset;
		memcpy(&my_desc, desc, sizeof(my_desc));
		my_desc.count = len;
		r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied,
595
					  &my_desc, xs_tcp_copy_data);
596 597 598 599
		desc->count -= r;
		desc->offset += r;
	} else
		r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied,
600
					  desc, xs_tcp_copy_data);
601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636

	if (r > 0) {
		xprt->tcp_copied += r;
		xprt->tcp_offset += r;
	}
	if (r != len) {
		/* Error when copying to the receive buffer,
		 * usually because we weren't able to allocate
		 * additional buffer pages. All we can do now
		 * is turn off XPRT_COPY_DATA, so the request
		 * will not receive any additional updates,
		 * and time out.
		 * Any remaining data from this record will
		 * be discarded.
		 */
		xprt->tcp_flags &= ~XPRT_COPY_DATA;
		dprintk("RPC:      XID %08x truncated request\n",
				ntohl(xprt->tcp_xid));
		dprintk("RPC:      xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n",
				xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen);
		goto out;
	}

	dprintk("RPC:      XID %08x read %Zd bytes\n",
			ntohl(xprt->tcp_xid), r);
	dprintk("RPC:      xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n",
			xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen);

	if (xprt->tcp_copied == req->rq_private_buf.buflen)
		xprt->tcp_flags &= ~XPRT_COPY_DATA;
	else if (xprt->tcp_offset == xprt->tcp_reclen) {
		if (xprt->tcp_flags & XPRT_LAST_FRAG)
			xprt->tcp_flags &= ~XPRT_COPY_DATA;
	}

out:
637 638
	if (!(xprt->tcp_flags & XPRT_COPY_DATA))
		xprt_complete_rqst(req->rq_task, xprt->tcp_copied);
C
Chuck Lever 已提交
639
	spin_unlock(&xprt->transport_lock);
640
	xs_tcp_check_recm(xprt);
641 642
}

643
static inline void xs_tcp_read_discard(struct rpc_xprt *xprt, skb_reader_t *desc)
644 645 646 647 648 649 650 651 652 653
{
	size_t len;

	len = xprt->tcp_reclen - xprt->tcp_offset;
	if (len > desc->count)
		len = desc->count;
	desc->count -= len;
	desc->offset += len;
	xprt->tcp_offset += len;
	dprintk("RPC:      discarded %Zu bytes\n", len);
654
	xs_tcp_check_recm(xprt);
655 656
}

657
static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len)
658 659 660 661 662 663 664
{
	struct rpc_xprt *xprt = rd_desc->arg.data;
	skb_reader_t desc = {
		.skb	= skb,
		.offset	= offset,
		.count	= len,
		.csum	= 0
665
	};
666

667
	dprintk("RPC:      xs_tcp_data_recv started\n");
668 669 670 671
	do {
		/* Read in a new fragment marker if necessary */
		/* Can we ever really expect to get completely empty fragments? */
		if (xprt->tcp_flags & XPRT_COPY_RECM) {
672
			xs_tcp_read_fraghdr(xprt, &desc);
673 674 675 676
			continue;
		}
		/* Read in the xid if necessary */
		if (xprt->tcp_flags & XPRT_COPY_XID) {
677
			xs_tcp_read_xid(xprt, &desc);
678 679 680 681
			continue;
		}
		/* Read in the request data */
		if (xprt->tcp_flags & XPRT_COPY_DATA) {
682
			xs_tcp_read_request(xprt, &desc);
683 684 685
			continue;
		}
		/* Skip over any trailing bytes on short reads */
686
		xs_tcp_read_discard(xprt, &desc);
687
	} while (desc.count);
688
	dprintk("RPC:      xs_tcp_data_recv done\n");
689 690 691
	return len - desc.count;
}

692 693 694 695 696 697 698
/**
 * xs_tcp_data_ready - "data ready" callback for TCP sockets
 * @sk: socket with data to read
 * @bytes: how much data to read
 *
 */
static void xs_tcp_data_ready(struct sock *sk, int bytes)
699 700 701 702 703
{
	struct rpc_xprt *xprt;
	read_descriptor_t rd_desc;

	read_lock(&sk->sk_callback_lock);
704 705
	dprintk("RPC:      xs_tcp_data_ready...\n");
	if (!(xprt = xprt_from_sock(sk)))
706 707 708 709
		goto out;
	if (xprt->shutdown)
		goto out;

710
	/* We use rd_desc to pass struct xprt to xs_tcp_data_recv */
711 712
	rd_desc.arg.data = xprt;
	rd_desc.count = 65536;
713
	tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
714 715 716 717
out:
	read_unlock(&sk->sk_callback_lock);
}

718 719 720 721 722 723
/**
 * xs_tcp_state_change - callback to handle TCP socket state changes
 * @sk: socket whose state has changed
 *
 */
static void xs_tcp_state_change(struct sock *sk)
724
{
725
	struct rpc_xprt *xprt;
726 727 728 729

	read_lock(&sk->sk_callback_lock);
	if (!(xprt = xprt_from_sock(sk)))
		goto out;
730
	dprintk("RPC:      xs_tcp_state_change client %p...\n", xprt);
731 732 733 734 735 736 737
	dprintk("RPC:      state %x conn %d dead %d zapped %d\n",
				sk->sk_state, xprt_connected(xprt),
				sock_flag(sk, SOCK_DEAD),
				sock_flag(sk, SOCK_ZAPPED));

	switch (sk->sk_state) {
	case TCP_ESTABLISHED:
C
Chuck Lever 已提交
738
		spin_lock_bh(&xprt->transport_lock);
739 740 741 742 743 744
		if (!xprt_test_and_set_connected(xprt)) {
			/* Reset TCP record info */
			xprt->tcp_offset = 0;
			xprt->tcp_reclen = 0;
			xprt->tcp_copied = 0;
			xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID;
745
			xprt_wake_pending_tasks(xprt, 0);
746
		}
C
Chuck Lever 已提交
747
		spin_unlock_bh(&xprt->transport_lock);
748 749 750 751 752 753 754 755 756 757 758 759
		break;
	case TCP_SYN_SENT:
	case TCP_SYN_RECV:
		break;
	default:
		xprt_disconnect(xprt);
		break;
	}
 out:
	read_unlock(&sk->sk_callback_lock);
}

760
/**
761 762
 * xs_udp_write_space - callback invoked when socket buffer space
 *                             becomes available
763 764
 * @sk: socket whose state has changed
 *
765 766
 * Called when more output buffer space is available for this socket.
 * We try not to wake our writers until they can make "significant"
767
 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
768 769
 * with a bunch of small requests.
 */
770
static void xs_udp_write_space(struct sock *sk)
771 772 773
{
	read_lock(&sk->sk_callback_lock);

774 775 776 777 778 779
	/* from net/core/sock.c:sock_def_write_space */
	if (sock_writeable(sk)) {
		struct socket *sock;
		struct rpc_xprt *xprt;

		if (unlikely(!(sock = sk->sk_socket)))
780
			goto out;
781 782 783
		if (unlikely(!(xprt = xprt_from_sock(sk))))
			goto out;
		if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags)))
784
			goto out;
785 786

		xprt_write_space(xprt);
787 788
	}

789 790 791
 out:
	read_unlock(&sk->sk_callback_lock);
}
792

793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822
/**
 * xs_tcp_write_space - callback invoked when socket buffer space
 *                             becomes available
 * @sk: socket whose state has changed
 *
 * Called when more output buffer space is available for this socket.
 * We try not to wake our writers until they can make "significant"
 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
 * with a bunch of small requests.
 */
static void xs_tcp_write_space(struct sock *sk)
{
	read_lock(&sk->sk_callback_lock);

	/* from net/core/stream.c:sk_stream_write_space */
	if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
		struct socket *sock;
		struct rpc_xprt *xprt;

		if (unlikely(!(sock = sk->sk_socket)))
			goto out;
		if (unlikely(!(xprt = xprt_from_sock(sk))))
			goto out;
		if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags)))
			goto out;

		xprt_write_space(xprt);
	}

 out:
823 824 825
	read_unlock(&sk->sk_callback_lock);
}

826
/**
827
 * xs_udp_set_buffer_size - set send and receive limits
828 829 830 831
 * @xprt: generic transport
 *
 * Set socket send and receive limits based on the
 * sndsize and rcvsize fields in the generic transport
832
 * structure.
833
 */
834
static void xs_udp_set_buffer_size(struct rpc_xprt *xprt)
835 836 837 838 839 840 841 842 843 844 845 846 847 848
{
	struct sock *sk = xprt->inet;

	if (xprt->rcvsize) {
		sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
		sk->sk_rcvbuf = xprt->rcvsize * xprt->max_reqs *  2;
	}
	if (xprt->sndsize) {
		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
		sk->sk_sndbuf = xprt->sndsize * xprt->max_reqs * 2;
		sk->sk_write_space(sk);
	}
}

849 850 851 852 853 854 855 856 857 858 859
/**
 * xs_tcp_set_buffer_size - set send and receive limits
 * @xprt: generic transport
 *
 * Nothing to do for TCP.
 */
static void xs_tcp_set_buffer_size(struct rpc_xprt *xprt)
{
	return;
}

860 861 862 863 864 865 866 867 868 869 870
/**
 * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport
 * @task: task that timed out
 *
 * Adjust the congestion window after a retransmit timeout has occurred.
 */
static void xs_udp_timer(struct rpc_task *task)
{
	xprt_adjust_cwnd(task, -ETIMEDOUT);
}

871
static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock)
872 873 874 875
{
	struct sockaddr_in myaddr = {
		.sin_family = AF_INET,
	};
876
	int err, port;
877 878 879 880 881 882 883 884 885

	/* Were we already bound to a given port? Try to reuse it */
	port = xprt->port;
	do {
		myaddr.sin_port = htons(port);
		err = sock->ops->bind(sock, (struct sockaddr *) &myaddr,
						sizeof(myaddr));
		if (err == 0) {
			xprt->port = port;
886 887
			dprintk("RPC:      xs_bindresvport bound to port %u\n",
					port);
888 889 890
			return 0;
		}
		if (--port == 0)
891
			port = XS_MAX_RESVPORT;
892 893
	} while (err == -EADDRINUSE && port != xprt->port);

894
	dprintk("RPC:      can't bind to reserved port (%d).\n", -err);
895 896 897
	return err;
}

898 899 900 901 902 903 904
/**
 * xs_udp_connect_worker - set up a UDP socket
 * @args: RPC transport to connect
 *
 * Invoked by a work queue tasklet.
 */
static void xs_udp_connect_worker(void *args)
905
{
906 907 908
	struct rpc_xprt *xprt = (struct rpc_xprt *) args;
	struct socket *sock = xprt->sock;
	int err, status = -EIO;
909

910 911
	if (xprt->shutdown || xprt->addr.sin_port == 0)
		goto out;
912

913
	dprintk("RPC:      xs_udp_connect_worker for xprt %p\n", xprt);
914

915 916
	/* Start by resetting any existing state */
	xs_close(xprt);
917

918 919 920 921
	if ((err = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock)) < 0) {
		dprintk("RPC:      can't create UDP transport socket (%d).\n", -err);
		goto out;
	}
922

923 924 925 926
	if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) {
		sock_release(sock);
		goto out;
	}
927

928 929
	if (!xprt->inet) {
		struct sock *sk = sock->sk;
930

931
		write_lock_bh(&sk->sk_callback_lock);
932

933 934 935 936
		sk->sk_user_data = xprt;
		xprt->old_data_ready = sk->sk_data_ready;
		xprt->old_state_change = sk->sk_state_change;
		xprt->old_write_space = sk->sk_write_space;
937
		sk->sk_data_ready = xs_udp_data_ready;
938
		sk->sk_write_space = xs_udp_write_space;
939
		sk->sk_no_check = UDP_CSUM_NORCV;
940

941 942
		xprt_set_connected(xprt);

943 944 945
		/* Reset to new socket */
		xprt->sock = sock;
		xprt->inet = sk;
946

947 948
		write_unlock_bh(&sk->sk_callback_lock);
	}
949
	xs_udp_set_buffer_size(xprt);
950 951 952 953
	status = 0;
out:
	xprt_wake_pending_tasks(xprt, status);
	xprt_clear_connecting(xprt);
954 955
}

956
/**
957
 * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint
958 959 960
 * @args: RPC transport to connect
 *
 * Invoked by a work queue tasklet.
961
 */
962
static void xs_tcp_connect_worker(void *args)
963 964 965
{
	struct rpc_xprt *xprt = (struct rpc_xprt *)args;
	struct socket *sock = xprt->sock;
966
	int err, status = -EIO;
967 968 969 970

	if (xprt->shutdown || xprt->addr.sin_port == 0)
		goto out;

971
	dprintk("RPC:      xs_tcp_connect_worker for xprt %p\n", xprt);
972

973
	/* Start by resetting any existing socket state */
974
	xs_close(xprt);
975 976 977

	if ((err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) {
		dprintk("RPC:      can't create TCP transport socket (%d).\n", -err);
978 979 980
		goto out;
	}

981 982
	if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) {
		sock_release(sock);
983
		goto out;
984
	}
985

986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009
	if (!xprt->inet) {
		struct sock *sk = sock->sk;

		write_lock_bh(&sk->sk_callback_lock);

		sk->sk_user_data = xprt;
		xprt->old_data_ready = sk->sk_data_ready;
		xprt->old_state_change = sk->sk_state_change;
		xprt->old_write_space = sk->sk_write_space;
		sk->sk_data_ready = xs_tcp_data_ready;
		sk->sk_state_change = xs_tcp_state_change;
		sk->sk_write_space = xs_tcp_write_space;
		tcp_sk(sk)->nonagle = 1;

		xprt_clear_connected(xprt);

		/* Reset to new socket */
		xprt->sock = sock;
		xprt->inet = sk;

		write_unlock_bh(&sk->sk_callback_lock);
	}

	/* Tell the socket layer to start connecting... */
1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021
	status = sock->ops->connect(sock, (struct sockaddr *) &xprt->addr,
			sizeof(xprt->addr), O_NONBLOCK);
	dprintk("RPC: %p  connect status %d connected %d sock state %d\n",
			xprt, -status, xprt_connected(xprt), sock->sk->sk_state);
	if (status < 0) {
		switch (status) {
			case -EINPROGRESS:
			case -EALREADY:
				goto out_clear;
		}
	}
out:
1022
	xprt_wake_pending_tasks(xprt, status);
1023
out_clear:
1024
	xprt_clear_connecting(xprt);
1025 1026
}

1027 1028 1029 1030 1031 1032 1033
/**
 * xs_connect - connect a socket to a remote endpoint
 * @task: address of RPC task that manages state of connect request
 *
 * TCP: If the remote end dropped the connection, delay reconnecting.
 */
static void xs_connect(struct rpc_task *task)
1034 1035 1036
{
	struct rpc_xprt *xprt = task->tk_xprt;

1037 1038 1039 1040 1041 1042
	if (xprt_test_and_set_connecting(xprt))
		return;

	if (xprt->sock != NULL) {
		dprintk("RPC:      xs_connect delayed xprt %p\n", xprt);
		schedule_delayed_work(&xprt->connect_worker,
1043
					RPC_REESTABLISH_TIMEOUT);
1044 1045 1046 1047 1048 1049 1050
	} else {
		dprintk("RPC:      xs_connect scheduled xprt %p\n", xprt);
		schedule_work(&xprt->connect_worker);

		/* flush_scheduled_work can sleep... */
		if (!RPC_IS_ASYNC(task))
			flush_scheduled_work();
1051 1052 1053
	}
}

1054
static struct rpc_xprt_ops xs_udp_ops = {
1055
	.set_buffer_size	= xs_udp_set_buffer_size,
1056
	.reserve_xprt		= xprt_reserve_xprt_cong,
1057
	.release_xprt		= xprt_release_xprt_cong,
1058 1059
	.connect		= xs_connect,
	.send_request		= xs_udp_send_request,
1060
	.set_retrans_timeout	= xprt_set_retrans_timeout_rtt,
1061
	.timer			= xs_udp_timer,
1062
	.release_request	= xprt_release_rqst_cong,
1063 1064 1065 1066 1067
	.close			= xs_close,
	.destroy		= xs_destroy,
};

static struct rpc_xprt_ops xs_tcp_ops = {
1068
	.set_buffer_size	= xs_tcp_set_buffer_size,
1069
	.reserve_xprt		= xprt_reserve_xprt,
1070
	.release_xprt		= xprt_release_xprt,
1071
	.connect		= xs_connect,
1072
	.send_request		= xs_tcp_send_request,
1073
	.set_retrans_timeout	= xprt_set_retrans_timeout_def,
1074 1075
	.close			= xs_close,
	.destroy		= xs_destroy,
1076 1077 1078 1079 1080
};

extern unsigned int xprt_udp_slot_table_entries;
extern unsigned int xprt_tcp_slot_table_entries;

1081 1082 1083 1084 1085 1086
/**
 * xs_setup_udp - Set up transport to use a UDP socket
 * @xprt: transport to set up
 * @to:   timeout parameters
 *
 */
1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100
int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to)
{
	size_t slot_table_size;

	dprintk("RPC:      setting up udp-ipv4 transport...\n");

	xprt->max_reqs = xprt_udp_slot_table_entries;
	slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]);
	xprt->slot = kmalloc(slot_table_size, GFP_KERNEL);
	if (xprt->slot == NULL)
		return -ENOMEM;
	memset(xprt->slot, 0, slot_table_size);

	xprt->prot = IPPROTO_UDP;
1101
	xprt->port = XS_MAX_RESVPORT;
1102
	xprt->tsh_size = 0;
1103 1104 1105 1106
	xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0;
	/* XXX: header size can vary due to auth type, IPv6, etc. */
	xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);

1107
	INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt);
1108

1109
	xprt->ops = &xs_udp_ops;
1110 1111 1112 1113

	if (to)
		xprt->timeout = *to;
	else
1114
		xprt_set_timeout(&xprt->timeout, 5, 5 * HZ);
1115 1116 1117 1118

	return 0;
}

1119 1120 1121 1122 1123 1124
/**
 * xs_setup_tcp - Set up transport to use a TCP socket
 * @xprt: transport to set up
 * @to: timeout parameters
 *
 */
1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138
int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to)
{
	size_t slot_table_size;

	dprintk("RPC:      setting up tcp-ipv4 transport...\n");

	xprt->max_reqs = xprt_tcp_slot_table_entries;
	slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]);
	xprt->slot = kmalloc(slot_table_size, GFP_KERNEL);
	if (xprt->slot == NULL)
		return -ENOMEM;
	memset(xprt->slot, 0, slot_table_size);

	xprt->prot = IPPROTO_TCP;
1139
	xprt->port = XS_MAX_RESVPORT;
1140
	xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
1141
	xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0;
1142
	xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
1143

1144
	INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt);
1145

1146
	xprt->ops = &xs_tcp_ops;
1147 1148 1149 1150

	if (to)
		xprt->timeout = *to;
	else
1151
		xprt_set_timeout(&xprt->timeout, 2, 60 * HZ);
1152 1153 1154

	return 0;
}