xprtsock.c 25.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
/*
 * linux/net/sunrpc/xprtsock.c
 *
 * Client-side transport implementation for sockets.
 *
 * TCP callback races fixes (C) 1998 Red Hat Software <alan@redhat.com>
 * TCP send fixes (C) 1998 Red Hat Software <alan@redhat.com>
 * TCP NFS related read + write fixes
 *  (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
 *
 * Rewrite of larges part of the code in order to stabilize TCP stuff.
 * Fix behaviour when socket buffer is full.
 *  (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
 */

#include <linux/types.h>
#include <linux/slab.h>
#include <linux/capability.h>
#include <linux/sched.h>
#include <linux/pagemap.h>
#include <linux/errno.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/net.h>
#include <linux/mm.h>
#include <linux/udp.h>
#include <linux/tcp.h>
#include <linux/sunrpc/clnt.h>
#include <linux/file.h>

#include <net/sock.h>
#include <net/checksum.h>
#include <net/udp.h>
#include <net/tcp.h>

36 37 38 39 40
/*
 * Maximum port number to use when requesting a reserved port.
 */
#define XS_MAX_RESVPORT		(800U)

41 42
#ifdef RPC_DEBUG
# undef  RPC_DEBUG_DATA
43
# define RPCDBG_FACILITY	RPCDBG_TRANS
44 45 46
#endif

#ifdef RPC_DEBUG_DATA
47
static void xs_pktdump(char *msg, u32 *packet, unsigned int count)
48
{
49 50
	u8 *buf = (u8 *) packet;
	int j;
51 52 53 54 55 56 57 58 59 60 61 62 63 64

	dprintk("RPC:      %s\n", msg);
	for (j = 0; j < count && j < 128; j += 4) {
		if (!(j & 31)) {
			if (j)
				dprintk("\n");
			dprintk("0x%04x ", j);
		}
		dprintk("%02x%02x%02x%02x ",
			buf[j], buf[j+1], buf[j+2], buf[j+3]);
	}
	dprintk("\n");
}
#else
65
static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count)
66 67 68 69 70
{
	/* NOP */
}
#endif

71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
#define XS_SENDMSG_FLAGS	(MSG_DONTWAIT | MSG_NOSIGNAL)

static inline int xs_send_head(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, unsigned int len)
{
	struct kvec iov = {
		.iov_base	= xdr->head[0].iov_base + base,
		.iov_len	= len - base,
	};
	struct msghdr msg = {
		.msg_name	= addr,
		.msg_namelen	= addrlen,
		.msg_flags	= XS_SENDMSG_FLAGS,
	};

	if (xdr->len > len)
		msg.msg_flags |= MSG_MORE;

	if (likely(iov.iov_len))
		return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
	return kernel_sendmsg(sock, &msg, NULL, 0, 0);
}

static int xs_send_tail(struct socket *sock, struct xdr_buf *xdr, unsigned int base, unsigned int len)
{
	struct kvec iov = {
		.iov_base	= xdr->tail[0].iov_base + base,
		.iov_len	= len - base,
	};
	struct msghdr msg = {
		.msg_flags	= XS_SENDMSG_FLAGS,
	};

	return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
}

106 107 108 109 110 111 112 113
/**
 * xs_sendpages - write pages directly to a socket
 * @sock: socket to send on
 * @addr: UDP only -- address of destination
 * @addrlen: UDP only -- length of destination address
 * @xdr: buffer containing this request
 * @base: starting position in the buffer
 *
114
 */
115
static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base)
116 117 118 119 120 121 122 123
{
	struct page **ppage = xdr->pages;
	unsigned int len, pglen = xdr->page_len;
	int err, ret = 0;
	ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);

	len = xdr->head[0].iov_len;
	if (base < len || (addr != NULL && base == 0)) {
124
		err = xs_send_head(sock, addr, addrlen, xdr, base, len);
125 126 127 128
		if (ret == 0)
			ret = err;
		else if (err > 0)
			ret += err;
129
		if (err != (len - base))
130 131 132 133 134
			goto out;
		base = 0;
	} else
		base -= len;

135
	if (unlikely(pglen == 0))
136
		goto copy_tail;
137
	if (unlikely(base >= pglen)) {
138 139 140 141 142
		base -= pglen;
		goto copy_tail;
	}
	if (base || xdr->page_base) {
		pglen -= base;
143
		base += xdr->page_base;
144 145 146 147 148 149
		ppage += base >> PAGE_CACHE_SHIFT;
		base &= ~PAGE_CACHE_MASK;
	}

	sendpage = sock->ops->sendpage ? : sock_no_sendpage;
	do {
150
		int flags = XS_SENDMSG_FLAGS;
151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176

		len = PAGE_CACHE_SIZE;
		if (base)
			len -= base;
		if (pglen < len)
			len = pglen;

		if (pglen != len || xdr->tail[0].iov_len != 0)
			flags |= MSG_MORE;

		/* Hmm... We might be dealing with highmem pages */
		if (PageHighMem(*ppage))
			sendpage = sock_no_sendpage;
		err = sendpage(sock, *ppage, base, len, flags);
		if (ret == 0)
			ret = err;
		else if (err > 0)
			ret += err;
		if (err != len)
			goto out;
		base = 0;
		ppage++;
	} while ((pglen -= len) != 0);
copy_tail:
	len = xdr->tail[0].iov_len;
	if (base < len) {
177
		err = xs_send_tail(sock, xdr, base, len);
178 179 180 181 182 183 184 185 186
		if (ret == 0)
			ret = err;
		else if (err > 0)
			ret += err;
	}
out:
	return ret;
}

187 188 189 190 191
/**
 * xs_sendmsg - write an RPC request to a socket
 * @xprt: generic transport
 * @req: the RPC request to write
 *
192
 */
193
static int xs_sendmsg(struct rpc_xprt *xprt, struct rpc_rqst *req)
194
{
195 196
	struct socket *sock = xprt->sock;
	struct xdr_buf *xdr = &req->rq_snd_buf;
197 198
	struct sockaddr *addr = NULL;
	int addrlen = 0;
199 200
	unsigned int skip;
	int result;
201 202 203 204

	if (!sock)
		return -ENOTCONN;

205
	xs_pktdump("packet data:",
206 207 208 209 210 211 212 213
				req->rq_svec->iov_base,
				req->rq_svec->iov_len);

	/* For UDP, we need to provide an address */
	if (!xprt->stream) {
		addr = (struct sockaddr *) &xprt->addr;
		addrlen = sizeof(xprt->addr);
	}
214
	/* Don't repeat bytes */
215 216 217
	skip = req->rq_bytes_sent;

	clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
218
	result = xs_sendpages(sock, addr, addrlen, xdr, skip);
219

220
	dprintk("RPC:      xs_sendmsg(%d) = %d\n", xdr->len - skip, result);
221 222 223 224 225 226 227

	if (result >= 0)
		return result;

	switch (result) {
	case -ECONNREFUSED:
		/* When the server has died, an ICMP port unreachable message
228
		 * prompts ECONNREFUSED. */
229 230 231 232 233 234 235 236 237 238
	case -EAGAIN:
		break;
	case -ECONNRESET:
	case -ENOTCONN:
	case -EPIPE:
		/* connection broken */
		if (xprt->stream)
			result = -ENOTCONN;
		break;
	default:
239
		break;
240 241 242 243
	}
	return result;
}

244 245 246 247 248 249 250 251 252 253 254 255 256 257
/**
 * xs_send_request - write an RPC request to a socket
 * @task: address of RPC task that manages the state of an RPC request
 *
 * Return values:
 *      0:  The request has been sent
 * EAGAIN:  The socket was blocked, please call again later to
 *          complete the request
 *  other:  Some other error occured, the request was not sent
 *
 * XXX: In the case of soft timeouts, should we eventually give up
 *      if the socket is not able to make progress?
 */
static int xs_send_request(struct rpc_task *task)
258 259 260 261 262 263 264 265
{
	struct rpc_rqst *req = task->tk_rqstp;
	struct rpc_xprt *xprt = req->rq_xprt;
	int status, retry = 0;

	/* set up everything as needed. */
	/* Write the record marker */
	if (xprt->stream) {
266
		u32 *marker = req->rq_svec[0].iov_base;
267 268 269 270 271 272

		*marker = htonl(0x80000000|(req->rq_slen-sizeof(*marker)));
	}

	/* Continue transmitting the packet/record. We must be careful
	 * to cope with writespace callbacks arriving _after_ we have
273
	 * called sendmsg().
274 275 276
	 */
	while (1) {
		req->rq_xtime = jiffies;
277
		status = xs_sendmsg(xprt, req);
278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308

		if (status < 0)
			break;

		if (xprt->stream) {
			req->rq_bytes_sent += status;

			/* If we've sent the entire packet, immediately
			 * reset the count of bytes sent. */
			if (req->rq_bytes_sent >= req->rq_slen) {
				req->rq_bytes_sent = 0;
				return 0;
			}
		} else {
			if (status >= req->rq_slen)
				return 0;
			status = -EAGAIN;
			break;
		}

		dprintk("RPC: %4d xmit incomplete (%d left of %d)\n",
				task->tk_pid, req->rq_slen - req->rq_bytes_sent,
				req->rq_slen);

		status = -EAGAIN;
		if (retry++ > 50)
			break;
	}

	if (status == -EAGAIN) {
		if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) {
309
			/* Protect against races with xs_write_space */
310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
			spin_lock_bh(&xprt->sock_lock);
			/* Don't race with disconnect */
			if (!xprt_connected(xprt))
				task->tk_status = -ENOTCONN;
			else if (test_bit(SOCK_NOSPACE, &xprt->sock->flags)) {
				task->tk_timeout = req->rq_timeout;
				rpc_sleep_on(&xprt->pending, task, NULL, NULL);
			}
			spin_unlock_bh(&xprt->sock_lock);
			return status;
		}
		/* Keep holding the socket if it is blocked */
		rpc_delay(task, HZ>>4);
	}
	return status;
}

327 328 329 330
/**
 * xs_close - close a socket
 * @xprt: transport
 *
331
 */
332
static void xs_close(struct rpc_xprt *xprt)
333
{
334 335
	struct socket *sock = xprt->sock;
	struct sock *sk = xprt->inet;
336 337 338 339

	if (!sk)
		return;

340 341
	dprintk("RPC:      xs_close xprt %p\n", xprt);

342 343 344 345
	write_lock_bh(&sk->sk_callback_lock);
	xprt->inet = NULL;
	xprt->sock = NULL;

346 347
	sk->sk_user_data = NULL;
	sk->sk_data_ready = xprt->old_data_ready;
348
	sk->sk_state_change = xprt->old_state_change;
349
	sk->sk_write_space = xprt->old_write_space;
350 351
	write_unlock_bh(&sk->sk_callback_lock);

352
	sk->sk_no_check = 0;
353 354 355 356

	sock_release(sock);
}

357 358 359 360 361 362
/**
 * xs_destroy - prepare to shutdown a transport
 * @xprt: doomed transport
 *
 */
static void xs_destroy(struct rpc_xprt *xprt)
363
{
364 365
	dprintk("RPC:      xs_destroy xprt %p\n", xprt);

366 367 368 369
	cancel_delayed_work(&xprt->sock_connect);
	flush_scheduled_work();

	xprt_disconnect(xprt);
370
	xs_close(xprt);
371 372 373
	kfree(xprt->slot);
}

374 375 376 377 378 379 380 381 382 383
static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
{
	return (struct rpc_xprt *) sk->sk_user_data;
}

/**
 * xs_udp_data_ready - "data ready" callback for UDP sockets
 * @sk: socket with data to read
 * @len: how much data to read
 *
384
 */
385
static void xs_udp_data_ready(struct sock *sk, int len)
386
{
387 388
	struct rpc_task *task;
	struct rpc_xprt *xprt;
389
	struct rpc_rqst *rovr;
390
	struct sk_buff *skb;
391 392 393 394
	int err, repsize, copied;
	u32 _xid, *xp;

	read_lock(&sk->sk_callback_lock);
395 396
	dprintk("RPC:      xs_udp_data_ready...\n");
	if (!(xprt = xprt_from_sock(sk)))
397 398 399 400 401 402 403 404 405 406
		goto out;

	if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL)
		goto out;

	if (xprt->shutdown)
		goto dropit;

	repsize = skb->len - sizeof(struct udphdr);
	if (repsize < 4) {
407
		dprintk("RPC:      impossible RPC reply size %d!\n", repsize);
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445
		goto dropit;
	}

	/* Copy the XID from the skb... */
	xp = skb_header_pointer(skb, sizeof(struct udphdr),
				sizeof(_xid), &_xid);
	if (xp == NULL)
		goto dropit;

	/* Look up and lock the request corresponding to the given XID */
	spin_lock(&xprt->sock_lock);
	rovr = xprt_lookup_rqst(xprt, *xp);
	if (!rovr)
		goto out_unlock;
	task = rovr->rq_task;

	dprintk("RPC: %4d received reply\n", task->tk_pid);

	if ((copied = rovr->rq_private_buf.buflen) > repsize)
		copied = repsize;

	/* Suck it into the iovec, verify checksum if not done by hw. */
	if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb))
		goto out_unlock;

	/* Something worked... */
	dst_confirm(skb->dst);

	xprt_complete_rqst(xprt, rovr, copied);

 out_unlock:
	spin_unlock(&xprt->sock_lock);
 dropit:
	skb_free_datagram(sk, skb);
 out:
	read_unlock(&sk->sk_callback_lock);
}

446
static inline size_t xs_tcp_copy_data(skb_reader_t *desc, void *p, size_t len)
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461
{
	if (len > desc->count)
		len = desc->count;
	if (skb_copy_bits(desc->skb, desc->offset, p, len)) {
		dprintk("RPC:      failed to copy %zu bytes from skb. %zu bytes remain\n",
				len, desc->count);
		return 0;
	}
	desc->offset += len;
	desc->count -= len;
	dprintk("RPC:      copied %zu bytes from skb. %zu bytes remain\n",
			len, desc->count);
	return len;
}

462
static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc)
463 464 465 466 467 468
{
	size_t len, used;
	char *p;

	p = ((char *) &xprt->tcp_recm) + xprt->tcp_offset;
	len = sizeof(xprt->tcp_recm) - xprt->tcp_offset;
469
	used = xs_tcp_copy_data(desc, p, len);
470 471 472 473 474 475 476 477 478 479 480 481 482
	xprt->tcp_offset += used;
	if (used != len)
		return;
	xprt->tcp_reclen = ntohl(xprt->tcp_recm);
	if (xprt->tcp_reclen & 0x80000000)
		xprt->tcp_flags |= XPRT_LAST_FRAG;
	else
		xprt->tcp_flags &= ~XPRT_LAST_FRAG;
	xprt->tcp_reclen &= 0x7fffffff;
	xprt->tcp_flags &= ~XPRT_COPY_RECM;
	xprt->tcp_offset = 0;
	/* Sanity check of the record length */
	if (xprt->tcp_reclen < 4) {
483
		dprintk("RPC:      invalid TCP record fragment length\n");
484
		xprt_disconnect(xprt);
485
		return;
486 487 488 489 490
	}
	dprintk("RPC:      reading TCP record fragment of length %d\n",
			xprt->tcp_reclen);
}

491
static void xs_tcp_check_recm(struct rpc_xprt *xprt)
492 493 494 495 496 497 498 499 500 501 502 503 504 505
{
	dprintk("RPC:      xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u, tcp_flags = %lx\n",
			xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen, xprt->tcp_flags);
	if (xprt->tcp_offset == xprt->tcp_reclen) {
		xprt->tcp_flags |= XPRT_COPY_RECM;
		xprt->tcp_offset = 0;
		if (xprt->tcp_flags & XPRT_LAST_FRAG) {
			xprt->tcp_flags &= ~XPRT_COPY_DATA;
			xprt->tcp_flags |= XPRT_COPY_XID;
			xprt->tcp_copied = 0;
		}
	}
}

506
static inline void xs_tcp_read_xid(struct rpc_xprt *xprt, skb_reader_t *desc)
507 508 509 510 511 512 513
{
	size_t len, used;
	char *p;

	len = sizeof(xprt->tcp_xid) - xprt->tcp_offset;
	dprintk("RPC:      reading XID (%Zu bytes)\n", len);
	p = ((char *) &xprt->tcp_xid) + xprt->tcp_offset;
514
	used = xs_tcp_copy_data(desc, p, len);
515 516 517 518 519 520 521 522
	xprt->tcp_offset += used;
	if (used != len)
		return;
	xprt->tcp_flags &= ~XPRT_COPY_XID;
	xprt->tcp_flags |= XPRT_COPY_DATA;
	xprt->tcp_copied = 4;
	dprintk("RPC:      reading reply for XID %08x\n",
						ntohl(xprt->tcp_xid));
523
	xs_tcp_check_recm(xprt);
524 525
}

526
static inline void xs_tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc)
527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552
{
	struct rpc_rqst *req;
	struct xdr_buf *rcvbuf;
	size_t len;
	ssize_t r;

	/* Find and lock the request corresponding to this xid */
	spin_lock(&xprt->sock_lock);
	req = xprt_lookup_rqst(xprt, xprt->tcp_xid);
	if (!req) {
		xprt->tcp_flags &= ~XPRT_COPY_DATA;
		dprintk("RPC:      XID %08x request not found!\n",
				ntohl(xprt->tcp_xid));
		spin_unlock(&xprt->sock_lock);
		return;
	}

	rcvbuf = &req->rq_private_buf;
	len = desc->count;
	if (len > xprt->tcp_reclen - xprt->tcp_offset) {
		skb_reader_t my_desc;

		len = xprt->tcp_reclen - xprt->tcp_offset;
		memcpy(&my_desc, desc, sizeof(my_desc));
		my_desc.count = len;
		r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied,
553
					  &my_desc, xs_tcp_copy_data);
554 555 556 557
		desc->count -= r;
		desc->offset += r;
	} else
		r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied,
558
					  desc, xs_tcp_copy_data);
559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600

	if (r > 0) {
		xprt->tcp_copied += r;
		xprt->tcp_offset += r;
	}
	if (r != len) {
		/* Error when copying to the receive buffer,
		 * usually because we weren't able to allocate
		 * additional buffer pages. All we can do now
		 * is turn off XPRT_COPY_DATA, so the request
		 * will not receive any additional updates,
		 * and time out.
		 * Any remaining data from this record will
		 * be discarded.
		 */
		xprt->tcp_flags &= ~XPRT_COPY_DATA;
		dprintk("RPC:      XID %08x truncated request\n",
				ntohl(xprt->tcp_xid));
		dprintk("RPC:      xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n",
				xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen);
		goto out;
	}

	dprintk("RPC:      XID %08x read %Zd bytes\n",
			ntohl(xprt->tcp_xid), r);
	dprintk("RPC:      xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n",
			xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen);

	if (xprt->tcp_copied == req->rq_private_buf.buflen)
		xprt->tcp_flags &= ~XPRT_COPY_DATA;
	else if (xprt->tcp_offset == xprt->tcp_reclen) {
		if (xprt->tcp_flags & XPRT_LAST_FRAG)
			xprt->tcp_flags &= ~XPRT_COPY_DATA;
	}

out:
	if (!(xprt->tcp_flags & XPRT_COPY_DATA)) {
		dprintk("RPC: %4d received reply complete\n",
				req->rq_task->tk_pid);
		xprt_complete_rqst(xprt, req, xprt->tcp_copied);
	}
	spin_unlock(&xprt->sock_lock);
601
	xs_tcp_check_recm(xprt);
602 603
}

604
static inline void xs_tcp_read_discard(struct rpc_xprt *xprt, skb_reader_t *desc)
605 606 607 608 609 610 611 612 613 614
{
	size_t len;

	len = xprt->tcp_reclen - xprt->tcp_offset;
	if (len > desc->count)
		len = desc->count;
	desc->count -= len;
	desc->offset += len;
	xprt->tcp_offset += len;
	dprintk("RPC:      discarded %Zu bytes\n", len);
615
	xs_tcp_check_recm(xprt);
616 617
}

618
static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len)
619 620 621 622 623 624 625
{
	struct rpc_xprt *xprt = rd_desc->arg.data;
	skb_reader_t desc = {
		.skb	= skb,
		.offset	= offset,
		.count	= len,
		.csum	= 0
626
	};
627

628
	dprintk("RPC:      xs_tcp_data_recv started\n");
629 630 631 632
	do {
		/* Read in a new fragment marker if necessary */
		/* Can we ever really expect to get completely empty fragments? */
		if (xprt->tcp_flags & XPRT_COPY_RECM) {
633
			xs_tcp_read_fraghdr(xprt, &desc);
634 635 636 637
			continue;
		}
		/* Read in the xid if necessary */
		if (xprt->tcp_flags & XPRT_COPY_XID) {
638
			xs_tcp_read_xid(xprt, &desc);
639 640 641 642
			continue;
		}
		/* Read in the request data */
		if (xprt->tcp_flags & XPRT_COPY_DATA) {
643
			xs_tcp_read_request(xprt, &desc);
644 645 646
			continue;
		}
		/* Skip over any trailing bytes on short reads */
647
		xs_tcp_read_discard(xprt, &desc);
648
	} while (desc.count);
649
	dprintk("RPC:      xs_tcp_data_recv done\n");
650 651 652
	return len - desc.count;
}

653 654 655 656 657 658 659
/**
 * xs_tcp_data_ready - "data ready" callback for TCP sockets
 * @sk: socket with data to read
 * @bytes: how much data to read
 *
 */
static void xs_tcp_data_ready(struct sock *sk, int bytes)
660 661 662 663 664
{
	struct rpc_xprt *xprt;
	read_descriptor_t rd_desc;

	read_lock(&sk->sk_callback_lock);
665 666
	dprintk("RPC:      xs_tcp_data_ready...\n");
	if (!(xprt = xprt_from_sock(sk)))
667 668 669 670
		goto out;
	if (xprt->shutdown)
		goto out;

671
	/* We use rd_desc to pass struct xprt to xs_tcp_data_recv */
672 673
	rd_desc.arg.data = xprt;
	rd_desc.count = 65536;
674
	tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
675 676 677 678
out:
	read_unlock(&sk->sk_callback_lock);
}

679 680 681 682 683 684
/**
 * xs_tcp_state_change - callback to handle TCP socket state changes
 * @sk: socket whose state has changed
 *
 */
static void xs_tcp_state_change(struct sock *sk)
685
{
686
	struct rpc_xprt *xprt;
687 688 689 690

	read_lock(&sk->sk_callback_lock);
	if (!(xprt = xprt_from_sock(sk)))
		goto out;
691
	dprintk("RPC:      xs_tcp_state_change client %p...\n", xprt);
692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720
	dprintk("RPC:      state %x conn %d dead %d zapped %d\n",
				sk->sk_state, xprt_connected(xprt),
				sock_flag(sk, SOCK_DEAD),
				sock_flag(sk, SOCK_ZAPPED));

	switch (sk->sk_state) {
	case TCP_ESTABLISHED:
		spin_lock_bh(&xprt->sock_lock);
		if (!xprt_test_and_set_connected(xprt)) {
			/* Reset TCP record info */
			xprt->tcp_offset = 0;
			xprt->tcp_reclen = 0;
			xprt->tcp_copied = 0;
			xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID;
			rpc_wake_up(&xprt->pending);
		}
		spin_unlock_bh(&xprt->sock_lock);
		break;
	case TCP_SYN_SENT:
	case TCP_SYN_RECV:
		break;
	default:
		xprt_disconnect(xprt);
		break;
	}
 out:
	read_unlock(&sk->sk_callback_lock);
}

721 722 723 724 725
/**
 * xs_write_space - callback invoked when socket buffer space becomes
 *                         available
 * @sk: socket whose state has changed
 *
726 727 728 729 730
 * Called when more output buffer space is available for this socket.
 * We try not to wake our writers until they can make "significant"
 * progress, otherwise we'll waste resources thrashing sock_sendmsg
 * with a bunch of small requests.
 */
731
static void xs_write_space(struct sock *sk)
732
{
733 734
	struct rpc_xprt *xprt;
	struct socket *sock;
735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763

	read_lock(&sk->sk_callback_lock);
	if (!(xprt = xprt_from_sock(sk)) || !(sock = sk->sk_socket))
		goto out;
	if (xprt->shutdown)
		goto out;

	/* Wait until we have enough socket memory */
	if (xprt->stream) {
		/* from net/core/stream.c:sk_stream_write_space */
		if (sk_stream_wspace(sk) < sk_stream_min_wspace(sk))
			goto out;
	} else {
		/* from net/core/sock.c:sock_def_write_space */
		if (!sock_writeable(sk))
			goto out;
	}

	if (!test_and_clear_bit(SOCK_NOSPACE, &sock->flags))
		goto out;

	spin_lock_bh(&xprt->sock_lock);
	if (xprt->snd_task)
		rpc_wake_up_task(xprt->snd_task);
	spin_unlock_bh(&xprt->sock_lock);
out:
	read_unlock(&sk->sk_callback_lock);
}

764 765 766 767 768 769 770
/**
 * xs_set_buffer_size - set send and receive limits
 * @xprt: generic transport
 *
 * Set socket send and receive limits based on the
 * sndsize and rcvsize fields in the generic transport
 * structure. This applies only to UDP sockets.
771
 */
772
static void xs_set_buffer_size(struct rpc_xprt *xprt)
773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788
{
	struct sock *sk = xprt->inet;

	if (xprt->stream)
		return;
	if (xprt->rcvsize) {
		sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
		sk->sk_rcvbuf = xprt->rcvsize * xprt->max_reqs *  2;
	}
	if (xprt->sndsize) {
		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
		sk->sk_sndbuf = xprt->sndsize * xprt->max_reqs * 2;
		sk->sk_write_space(sk);
	}
}

789
static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock)
790 791 792 793
{
	struct sockaddr_in myaddr = {
		.sin_family = AF_INET,
	};
794
	int err, port;
795 796 797 798 799 800 801 802 803

	/* Were we already bound to a given port? Try to reuse it */
	port = xprt->port;
	do {
		myaddr.sin_port = htons(port);
		err = sock->ops->bind(sock, (struct sockaddr *) &myaddr,
						sizeof(myaddr));
		if (err == 0) {
			xprt->port = port;
804 805
			dprintk("RPC:      xs_bindresvport bound to port %u\n",
					port);
806 807 808
			return 0;
		}
		if (--port == 0)
809
			port = XS_MAX_RESVPORT;
810 811
	} while (err == -EADDRINUSE && port != xprt->port);

812
	dprintk("RPC:      can't bind to reserved port (%d).\n", -err);
813 814 815
	return err;
}

816
static struct socket *xs_create(struct rpc_xprt *xprt, int proto, int resvport)
817
{
818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844
	struct socket *sock;
	int type, err;

	dprintk("RPC:      xs_create(%s %d)\n",
			   (proto == IPPROTO_UDP)? "udp" : "tcp", proto);

	type = (proto == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM;

	if ((err = sock_create_kern(PF_INET, type, proto, &sock)) < 0) {
		dprintk("RPC:      can't create socket (%d).\n", -err);
		return NULL;
	}

	/* If the caller has the capability, bind to a reserved port */
	if (resvport && xs_bindresvport(xprt, sock) < 0)
		goto failed;

	return sock;

failed:
	sock_release(sock);
	return NULL;
}

static void xs_bind(struct rpc_xprt *xprt, struct socket *sock)
{
	struct sock *sk = sock->sk;
845 846 847 848 849 850 851 852 853 854

	if (xprt->inet)
		return;

	write_lock_bh(&sk->sk_callback_lock);
	sk->sk_user_data = xprt;
	xprt->old_data_ready = sk->sk_data_ready;
	xprt->old_state_change = sk->sk_state_change;
	xprt->old_write_space = sk->sk_write_space;
	if (xprt->prot == IPPROTO_UDP) {
855
		sk->sk_data_ready = xs_udp_data_ready;
856 857 858 859
		sk->sk_no_check = UDP_CSUM_NORCV;
		xprt_set_connected(xprt);
	} else {
		tcp_sk(sk)->nonagle = 1;	/* disable Nagle's algorithm */
860 861
		sk->sk_data_ready = xs_tcp_data_ready;
		sk->sk_state_change = xs_tcp_state_change;
862 863
		xprt_clear_connected(xprt);
	}
864
	sk->sk_write_space = xs_write_space;
865 866 867 868 869 870 871 872 873

	/* Reset to new socket */
	xprt->sock = sock;
	xprt->inet = sk;
	write_unlock_bh(&sk->sk_callback_lock);

	return;
}

874 875 876 877 878
/**
 * xs_connect_worker - try to connect a socket to a remote endpoint
 * @args: RPC transport to connect
 *
 * Invoked by a work queue tasklet.
879
 */
880
static void xs_connect_worker(void *args)
881 882 883 884 885 886 887 888
{
	struct rpc_xprt *xprt = (struct rpc_xprt *)args;
	struct socket *sock = xprt->sock;
	int status = -EIO;

	if (xprt->shutdown || xprt->addr.sin_port == 0)
		goto out;

889 890
	dprintk("RPC:      xs_connect_worker xprt %p\n", xprt);

891 892 893
	/*
	 * Start by resetting any existing state
	 */
894 895
	xs_close(xprt);
	sock = xs_create(xprt, xprt->prot, xprt->resvport);
896 897 898 899 900
	if (sock == NULL) {
		/* couldn't create socket or bind to reserved port;
		 * this is likely a permanent error, so cause an abort */
		goto out;
	}
901 902
	xs_bind(xprt, sock);
	xs_set_buffer_size(xprt);
903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932

	status = 0;
	if (!xprt->stream)
		goto out;

	/*
	 * Tell the socket layer to start connecting...
	 */
	status = sock->ops->connect(sock, (struct sockaddr *) &xprt->addr,
			sizeof(xprt->addr), O_NONBLOCK);
	dprintk("RPC: %p  connect status %d connected %d sock state %d\n",
			xprt, -status, xprt_connected(xprt), sock->sk->sk_state);
	if (status < 0) {
		switch (status) {
			case -EINPROGRESS:
			case -EALREADY:
				goto out_clear;
		}
	}
out:
	if (status < 0)
		rpc_wake_up_status(&xprt->pending, status);
	else
		rpc_wake_up(&xprt->pending);
out_clear:
	smp_mb__before_clear_bit();
	clear_bit(XPRT_CONNECTING, &xprt->sockstate);
	smp_mb__after_clear_bit();
}

933 934 935 936 937 938 939
/**
 * xs_connect - connect a socket to a remote endpoint
 * @task: address of RPC task that manages state of connect request
 *
 * TCP: If the remote end dropped the connection, delay reconnecting.
 */
static void xs_connect(struct rpc_task *task)
940 941 942 943
{
	struct rpc_xprt *xprt = task->tk_xprt;

	if (!test_and_set_bit(XPRT_CONNECTING, &xprt->sockstate)) {
944 945
		if (xprt->sock != NULL) {
			dprintk("RPC:      xs_connect delayed xprt %p\n", xprt);
946 947
			schedule_delayed_work(&xprt->sock_connect,
					RPC_REESTABLISH_TIMEOUT);
948 949
		} else {
			dprintk("RPC:      xs_connect scheduled xprt %p\n", xprt);
950 951 952 953 954 955 956 957
			schedule_work(&xprt->sock_connect);
			/* flush_scheduled_work can sleep... */
			if (!RPC_IS_ASYNC(task))
				flush_scheduled_work();
		}
	}
}

958 959 960 961 962 963
static struct rpc_xprt_ops xs_ops = {
	.set_buffer_size	= xs_set_buffer_size,
	.connect		= xs_connect,
	.send_request		= xs_send_request,
	.close			= xs_close,
	.destroy		= xs_destroy,
964 965 966 967 968
};

extern unsigned int xprt_udp_slot_table_entries;
extern unsigned int xprt_tcp_slot_table_entries;

969 970 971 972 973 974
/**
 * xs_setup_udp - Set up transport to use a UDP socket
 * @xprt: transport to set up
 * @to:   timeout parameters
 *
 */
975 976 977 978 979 980 981 982 983 984 985 986 987 988
int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to)
{
	size_t slot_table_size;

	dprintk("RPC:      setting up udp-ipv4 transport...\n");

	xprt->max_reqs = xprt_udp_slot_table_entries;
	slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]);
	xprt->slot = kmalloc(slot_table_size, GFP_KERNEL);
	if (xprt->slot == NULL)
		return -ENOMEM;
	memset(xprt->slot, 0, slot_table_size);

	xprt->prot = IPPROTO_UDP;
989
	xprt->port = XS_MAX_RESVPORT;
990 991 992 993 994 995 996
	xprt->stream = 0;
	xprt->nocong = 0;
	xprt->cwnd = RPC_INITCWND;
	xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0;
	/* XXX: header size can vary due to auth type, IPv6, etc. */
	xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);

997
	INIT_WORK(&xprt->sock_connect, xs_connect_worker, xprt);
998

999
	xprt->ops = &xs_ops;
1000 1001 1002 1003

	if (to)
		xprt->timeout = *to;
	else
1004
		xprt_set_timeout(&xprt->timeout, 5, 5 * HZ);
1005 1006 1007 1008

	return 0;
}

1009 1010 1011 1012 1013 1014
/**
 * xs_setup_tcp - Set up transport to use a TCP socket
 * @xprt: transport to set up
 * @to: timeout parameters
 *
 */
1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to)
{
	size_t slot_table_size;

	dprintk("RPC:      setting up tcp-ipv4 transport...\n");

	xprt->max_reqs = xprt_tcp_slot_table_entries;
	slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]);
	xprt->slot = kmalloc(slot_table_size, GFP_KERNEL);
	if (xprt->slot == NULL)
		return -ENOMEM;
	memset(xprt->slot, 0, slot_table_size);

	xprt->prot = IPPROTO_TCP;
1029
	xprt->port = XS_MAX_RESVPORT;
1030 1031 1032 1033 1034 1035
	xprt->stream = 1;
	xprt->nocong = 1;
	xprt->cwnd = RPC_MAXCWND(xprt);
	xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0;
	xprt->max_payload = (1U << 31) - 1;

1036
	INIT_WORK(&xprt->sock_connect, xs_connect_worker, xprt);
1037

1038
	xprt->ops = &xs_ops;
1039 1040 1041 1042

	if (to)
		xprt->timeout = *to;
	else
1043
		xprt_set_timeout(&xprt->timeout, 2, 60 * HZ);
1044 1045 1046

	return 0;
}