xprtsock.c 25.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
/*
 * linux/net/sunrpc/xprtsock.c
 *
 * Client-side transport implementation for sockets.
 *
 * TCP callback races fixes (C) 1998 Red Hat Software <alan@redhat.com>
 * TCP send fixes (C) 1998 Red Hat Software <alan@redhat.com>
 * TCP NFS related read + write fixes
 *  (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
 *
 * Rewrite of larges part of the code in order to stabilize TCP stuff.
 * Fix behaviour when socket buffer is full.
 *  (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
 */

#include <linux/types.h>
#include <linux/slab.h>
#include <linux/capability.h>
#include <linux/sched.h>
#include <linux/pagemap.h>
#include <linux/errno.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/net.h>
#include <linux/mm.h>
#include <linux/udp.h>
#include <linux/tcp.h>
#include <linux/sunrpc/clnt.h>
#include <linux/file.h>

#include <net/sock.h>
#include <net/checksum.h>
#include <net/udp.h>
#include <net/tcp.h>

36 37 38 39 40
/*
 * Maximum port number to use when requesting a reserved port.
 */
#define XS_MAX_RESVPORT		(800U)

41 42
#ifdef RPC_DEBUG
# undef  RPC_DEBUG_DATA
43
# define RPCDBG_FACILITY	RPCDBG_TRANS
44 45 46
#endif

#ifdef RPC_DEBUG_DATA
47
static void xs_pktdump(char *msg, u32 *packet, unsigned int count)
48
{
49 50
	u8 *buf = (u8 *) packet;
	int j;
51 52 53 54 55 56 57 58 59 60 61 62 63 64

	dprintk("RPC:      %s\n", msg);
	for (j = 0; j < count && j < 128; j += 4) {
		if (!(j & 31)) {
			if (j)
				dprintk("\n");
			dprintk("0x%04x ", j);
		}
		dprintk("%02x%02x%02x%02x ",
			buf[j], buf[j+1], buf[j+2], buf[j+3]);
	}
	dprintk("\n");
}
#else
65
static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count)
66 67 68 69 70
{
	/* NOP */
}
#endif

71 72 73 74 75 76 77 78
/**
 * xs_sendpages - write pages directly to a socket
 * @sock: socket to send on
 * @addr: UDP only -- address of destination
 * @addrlen: UDP only -- length of destination address
 * @xdr: buffer containing this request
 * @base: starting position in the buffer
 *
79
 */
80
static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, int msgflags)
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
{
	struct page **ppage = xdr->pages;
	unsigned int len, pglen = xdr->page_len;
	int err, ret = 0;
	ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);

	len = xdr->head[0].iov_len;
	if (base < len || (addr != NULL && base == 0)) {
		struct kvec iov = {
			.iov_base = xdr->head[0].iov_base + base,
			.iov_len  = len - base,
		};
		struct msghdr msg = {
			.msg_name    = addr,
			.msg_namelen = addrlen,
			.msg_flags   = msgflags,
		};
		if (xdr->len > len)
			msg.msg_flags |= MSG_MORE;

		if (iov.iov_len != 0)
			err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
		else
			err = kernel_sendmsg(sock, &msg, NULL, 0, 0);
		if (ret == 0)
			ret = err;
		else if (err > 0)
			ret += err;
		if (err != iov.iov_len)
			goto out;
		base = 0;
	} else
		base -= len;

	if (pglen == 0)
		goto copy_tail;
	if (base >= pglen) {
		base -= pglen;
		goto copy_tail;
	}
	if (base || xdr->page_base) {
		pglen -= base;
123
		base += xdr->page_base;
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173
		ppage += base >> PAGE_CACHE_SHIFT;
		base &= ~PAGE_CACHE_MASK;
	}

	sendpage = sock->ops->sendpage ? : sock_no_sendpage;
	do {
		int flags = msgflags;

		len = PAGE_CACHE_SIZE;
		if (base)
			len -= base;
		if (pglen < len)
			len = pglen;

		if (pglen != len || xdr->tail[0].iov_len != 0)
			flags |= MSG_MORE;

		/* Hmm... We might be dealing with highmem pages */
		if (PageHighMem(*ppage))
			sendpage = sock_no_sendpage;
		err = sendpage(sock, *ppage, base, len, flags);
		if (ret == 0)
			ret = err;
		else if (err > 0)
			ret += err;
		if (err != len)
			goto out;
		base = 0;
		ppage++;
	} while ((pglen -= len) != 0);
copy_tail:
	len = xdr->tail[0].iov_len;
	if (base < len) {
		struct kvec iov = {
			.iov_base = xdr->tail[0].iov_base + base,
			.iov_len  = len - base,
		};
		struct msghdr msg = {
			.msg_flags   = msgflags,
		};
		err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
		if (ret == 0)
			ret = err;
		else if (err > 0)
			ret += err;
	}
out:
	return ret;
}

174 175 176 177 178
/**
 * xs_sendmsg - write an RPC request to a socket
 * @xprt: generic transport
 * @req: the RPC request to write
 *
179
 */
180
static int xs_sendmsg(struct rpc_xprt *xprt, struct rpc_rqst *req)
181
{
182 183
	struct socket *sock = xprt->sock;
	struct xdr_buf *xdr = &req->rq_snd_buf;
184 185
	struct sockaddr *addr = NULL;
	int addrlen = 0;
186 187
	unsigned int skip;
	int result;
188 189 190 191

	if (!sock)
		return -ENOTCONN;

192
	xs_pktdump("packet data:",
193 194 195 196 197 198 199 200
				req->rq_svec->iov_base,
				req->rq_svec->iov_len);

	/* For UDP, we need to provide an address */
	if (!xprt->stream) {
		addr = (struct sockaddr *) &xprt->addr;
		addrlen = sizeof(xprt->addr);
	}
201
	/* Don't repeat bytes */
202 203 204
	skip = req->rq_bytes_sent;

	clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
205
	result = xs_sendpages(sock, addr, addrlen, xdr, skip, MSG_DONTWAIT);
206

207
	dprintk("RPC:      xs_sendmsg(%d) = %d\n", xdr->len - skip, result);
208 209 210 211 212 213 214

	if (result >= 0)
		return result;

	switch (result) {
	case -ECONNREFUSED:
		/* When the server has died, an ICMP port unreachable message
215
		 * prompts ECONNREFUSED. */
216 217 218 219 220 221 222 223 224 225
	case -EAGAIN:
		break;
	case -ECONNRESET:
	case -ENOTCONN:
	case -EPIPE:
		/* connection broken */
		if (xprt->stream)
			result = -ENOTCONN;
		break;
	default:
226
		break;
227 228 229 230
	}
	return result;
}

231 232 233 234 235 236 237 238 239 240 241 242 243 244
/**
 * xs_send_request - write an RPC request to a socket
 * @task: address of RPC task that manages the state of an RPC request
 *
 * Return values:
 *      0:  The request has been sent
 * EAGAIN:  The socket was blocked, please call again later to
 *          complete the request
 *  other:  Some other error occured, the request was not sent
 *
 * XXX: In the case of soft timeouts, should we eventually give up
 *      if the socket is not able to make progress?
 */
static int xs_send_request(struct rpc_task *task)
245 246 247 248 249 250 251 252
{
	struct rpc_rqst *req = task->tk_rqstp;
	struct rpc_xprt *xprt = req->rq_xprt;
	int status, retry = 0;

	/* set up everything as needed. */
	/* Write the record marker */
	if (xprt->stream) {
253
		u32 *marker = req->rq_svec[0].iov_base;
254 255 256 257 258 259

		*marker = htonl(0x80000000|(req->rq_slen-sizeof(*marker)));
	}

	/* Continue transmitting the packet/record. We must be careful
	 * to cope with writespace callbacks arriving _after_ we have
260
	 * called sendmsg().
261 262 263
	 */
	while (1) {
		req->rq_xtime = jiffies;
264
		status = xs_sendmsg(xprt, req);
265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295

		if (status < 0)
			break;

		if (xprt->stream) {
			req->rq_bytes_sent += status;

			/* If we've sent the entire packet, immediately
			 * reset the count of bytes sent. */
			if (req->rq_bytes_sent >= req->rq_slen) {
				req->rq_bytes_sent = 0;
				return 0;
			}
		} else {
			if (status >= req->rq_slen)
				return 0;
			status = -EAGAIN;
			break;
		}

		dprintk("RPC: %4d xmit incomplete (%d left of %d)\n",
				task->tk_pid, req->rq_slen - req->rq_bytes_sent,
				req->rq_slen);

		status = -EAGAIN;
		if (retry++ > 50)
			break;
	}

	if (status == -EAGAIN) {
		if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) {
296
			/* Protect against races with xs_write_space */
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313
			spin_lock_bh(&xprt->sock_lock);
			/* Don't race with disconnect */
			if (!xprt_connected(xprt))
				task->tk_status = -ENOTCONN;
			else if (test_bit(SOCK_NOSPACE, &xprt->sock->flags)) {
				task->tk_timeout = req->rq_timeout;
				rpc_sleep_on(&xprt->pending, task, NULL, NULL);
			}
			spin_unlock_bh(&xprt->sock_lock);
			return status;
		}
		/* Keep holding the socket if it is blocked */
		rpc_delay(task, HZ>>4);
	}
	return status;
}

314 315 316 317
/**
 * xs_close - close a socket
 * @xprt: transport
 *
318
 */
319
static void xs_close(struct rpc_xprt *xprt)
320
{
321 322
	struct socket *sock = xprt->sock;
	struct sock *sk = xprt->inet;
323 324 325 326

	if (!sk)
		return;

327 328
	dprintk("RPC:      xs_close xprt %p\n", xprt);

329 330 331 332
	write_lock_bh(&sk->sk_callback_lock);
	xprt->inet = NULL;
	xprt->sock = NULL;

333 334
	sk->sk_user_data = NULL;
	sk->sk_data_ready = xprt->old_data_ready;
335
	sk->sk_state_change = xprt->old_state_change;
336
	sk->sk_write_space = xprt->old_write_space;
337 338
	write_unlock_bh(&sk->sk_callback_lock);

339
	sk->sk_no_check = 0;
340 341 342 343

	sock_release(sock);
}

344 345 346 347 348 349
/**
 * xs_destroy - prepare to shutdown a transport
 * @xprt: doomed transport
 *
 */
static void xs_destroy(struct rpc_xprt *xprt)
350
{
351 352
	dprintk("RPC:      xs_destroy xprt %p\n", xprt);

353 354 355 356
	cancel_delayed_work(&xprt->sock_connect);
	flush_scheduled_work();

	xprt_disconnect(xprt);
357
	xs_close(xprt);
358 359 360
	kfree(xprt->slot);
}

361 362 363 364 365 366 367 368 369 370
static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
{
	return (struct rpc_xprt *) sk->sk_user_data;
}

/**
 * xs_udp_data_ready - "data ready" callback for UDP sockets
 * @sk: socket with data to read
 * @len: how much data to read
 *
371
 */
372
static void xs_udp_data_ready(struct sock *sk, int len)
373
{
374 375
	struct rpc_task *task;
	struct rpc_xprt *xprt;
376
	struct rpc_rqst *rovr;
377
	struct sk_buff *skb;
378 379 380 381
	int err, repsize, copied;
	u32 _xid, *xp;

	read_lock(&sk->sk_callback_lock);
382 383
	dprintk("RPC:      xs_udp_data_ready...\n");
	if (!(xprt = xprt_from_sock(sk)))
384 385 386 387 388 389 390 391 392 393
		goto out;

	if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL)
		goto out;

	if (xprt->shutdown)
		goto dropit;

	repsize = skb->len - sizeof(struct udphdr);
	if (repsize < 4) {
394
		dprintk("RPC:      impossible RPC reply size %d!\n", repsize);
395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
		goto dropit;
	}

	/* Copy the XID from the skb... */
	xp = skb_header_pointer(skb, sizeof(struct udphdr),
				sizeof(_xid), &_xid);
	if (xp == NULL)
		goto dropit;

	/* Look up and lock the request corresponding to the given XID */
	spin_lock(&xprt->sock_lock);
	rovr = xprt_lookup_rqst(xprt, *xp);
	if (!rovr)
		goto out_unlock;
	task = rovr->rq_task;

	dprintk("RPC: %4d received reply\n", task->tk_pid);

	if ((copied = rovr->rq_private_buf.buflen) > repsize)
		copied = repsize;

	/* Suck it into the iovec, verify checksum if not done by hw. */
	if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb))
		goto out_unlock;

	/* Something worked... */
	dst_confirm(skb->dst);

	xprt_complete_rqst(xprt, rovr, copied);

 out_unlock:
	spin_unlock(&xprt->sock_lock);
 dropit:
	skb_free_datagram(sk, skb);
 out:
	read_unlock(&sk->sk_callback_lock);
}

433
static inline size_t xs_tcp_copy_data(skb_reader_t *desc, void *p, size_t len)
434 435 436 437 438 439 440 441 442 443 444 445 446 447 448
{
	if (len > desc->count)
		len = desc->count;
	if (skb_copy_bits(desc->skb, desc->offset, p, len)) {
		dprintk("RPC:      failed to copy %zu bytes from skb. %zu bytes remain\n",
				len, desc->count);
		return 0;
	}
	desc->offset += len;
	desc->count -= len;
	dprintk("RPC:      copied %zu bytes from skb. %zu bytes remain\n",
			len, desc->count);
	return len;
}

449
static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc)
450 451 452 453 454 455
{
	size_t len, used;
	char *p;

	p = ((char *) &xprt->tcp_recm) + xprt->tcp_offset;
	len = sizeof(xprt->tcp_recm) - xprt->tcp_offset;
456
	used = xs_tcp_copy_data(desc, p, len);
457 458 459 460 461 462 463 464 465 466 467 468 469
	xprt->tcp_offset += used;
	if (used != len)
		return;
	xprt->tcp_reclen = ntohl(xprt->tcp_recm);
	if (xprt->tcp_reclen & 0x80000000)
		xprt->tcp_flags |= XPRT_LAST_FRAG;
	else
		xprt->tcp_flags &= ~XPRT_LAST_FRAG;
	xprt->tcp_reclen &= 0x7fffffff;
	xprt->tcp_flags &= ~XPRT_COPY_RECM;
	xprt->tcp_offset = 0;
	/* Sanity check of the record length */
	if (xprt->tcp_reclen < 4) {
470
		dprintk("RPC:      invalid TCP record fragment length\n");
471
		xprt_disconnect(xprt);
472
		return;
473 474 475 476 477
	}
	dprintk("RPC:      reading TCP record fragment of length %d\n",
			xprt->tcp_reclen);
}

478
static void xs_tcp_check_recm(struct rpc_xprt *xprt)
479 480 481 482 483 484 485 486 487 488 489 490 491 492
{
	dprintk("RPC:      xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u, tcp_flags = %lx\n",
			xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen, xprt->tcp_flags);
	if (xprt->tcp_offset == xprt->tcp_reclen) {
		xprt->tcp_flags |= XPRT_COPY_RECM;
		xprt->tcp_offset = 0;
		if (xprt->tcp_flags & XPRT_LAST_FRAG) {
			xprt->tcp_flags &= ~XPRT_COPY_DATA;
			xprt->tcp_flags |= XPRT_COPY_XID;
			xprt->tcp_copied = 0;
		}
	}
}

493
static inline void xs_tcp_read_xid(struct rpc_xprt *xprt, skb_reader_t *desc)
494 495 496 497 498 499 500
{
	size_t len, used;
	char *p;

	len = sizeof(xprt->tcp_xid) - xprt->tcp_offset;
	dprintk("RPC:      reading XID (%Zu bytes)\n", len);
	p = ((char *) &xprt->tcp_xid) + xprt->tcp_offset;
501
	used = xs_tcp_copy_data(desc, p, len);
502 503 504 505 506 507 508 509
	xprt->tcp_offset += used;
	if (used != len)
		return;
	xprt->tcp_flags &= ~XPRT_COPY_XID;
	xprt->tcp_flags |= XPRT_COPY_DATA;
	xprt->tcp_copied = 4;
	dprintk("RPC:      reading reply for XID %08x\n",
						ntohl(xprt->tcp_xid));
510
	xs_tcp_check_recm(xprt);
511 512
}

513
static inline void xs_tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc)
514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539
{
	struct rpc_rqst *req;
	struct xdr_buf *rcvbuf;
	size_t len;
	ssize_t r;

	/* Find and lock the request corresponding to this xid */
	spin_lock(&xprt->sock_lock);
	req = xprt_lookup_rqst(xprt, xprt->tcp_xid);
	if (!req) {
		xprt->tcp_flags &= ~XPRT_COPY_DATA;
		dprintk("RPC:      XID %08x request not found!\n",
				ntohl(xprt->tcp_xid));
		spin_unlock(&xprt->sock_lock);
		return;
	}

	rcvbuf = &req->rq_private_buf;
	len = desc->count;
	if (len > xprt->tcp_reclen - xprt->tcp_offset) {
		skb_reader_t my_desc;

		len = xprt->tcp_reclen - xprt->tcp_offset;
		memcpy(&my_desc, desc, sizeof(my_desc));
		my_desc.count = len;
		r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied,
540
					  &my_desc, xs_tcp_copy_data);
541 542 543 544
		desc->count -= r;
		desc->offset += r;
	} else
		r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied,
545
					  desc, xs_tcp_copy_data);
546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587

	if (r > 0) {
		xprt->tcp_copied += r;
		xprt->tcp_offset += r;
	}
	if (r != len) {
		/* Error when copying to the receive buffer,
		 * usually because we weren't able to allocate
		 * additional buffer pages. All we can do now
		 * is turn off XPRT_COPY_DATA, so the request
		 * will not receive any additional updates,
		 * and time out.
		 * Any remaining data from this record will
		 * be discarded.
		 */
		xprt->tcp_flags &= ~XPRT_COPY_DATA;
		dprintk("RPC:      XID %08x truncated request\n",
				ntohl(xprt->tcp_xid));
		dprintk("RPC:      xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n",
				xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen);
		goto out;
	}

	dprintk("RPC:      XID %08x read %Zd bytes\n",
			ntohl(xprt->tcp_xid), r);
	dprintk("RPC:      xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n",
			xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen);

	if (xprt->tcp_copied == req->rq_private_buf.buflen)
		xprt->tcp_flags &= ~XPRT_COPY_DATA;
	else if (xprt->tcp_offset == xprt->tcp_reclen) {
		if (xprt->tcp_flags & XPRT_LAST_FRAG)
			xprt->tcp_flags &= ~XPRT_COPY_DATA;
	}

out:
	if (!(xprt->tcp_flags & XPRT_COPY_DATA)) {
		dprintk("RPC: %4d received reply complete\n",
				req->rq_task->tk_pid);
		xprt_complete_rqst(xprt, req, xprt->tcp_copied);
	}
	spin_unlock(&xprt->sock_lock);
588
	xs_tcp_check_recm(xprt);
589 590
}

591
static inline void xs_tcp_read_discard(struct rpc_xprt *xprt, skb_reader_t *desc)
592 593 594 595 596 597 598 599 600 601
{
	size_t len;

	len = xprt->tcp_reclen - xprt->tcp_offset;
	if (len > desc->count)
		len = desc->count;
	desc->count -= len;
	desc->offset += len;
	xprt->tcp_offset += len;
	dprintk("RPC:      discarded %Zu bytes\n", len);
602
	xs_tcp_check_recm(xprt);
603 604
}

605
static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len)
606 607 608 609 610 611 612
{
	struct rpc_xprt *xprt = rd_desc->arg.data;
	skb_reader_t desc = {
		.skb	= skb,
		.offset	= offset,
		.count	= len,
		.csum	= 0
613
	};
614

615
	dprintk("RPC:      xs_tcp_data_recv started\n");
616 617 618 619
	do {
		/* Read in a new fragment marker if necessary */
		/* Can we ever really expect to get completely empty fragments? */
		if (xprt->tcp_flags & XPRT_COPY_RECM) {
620
			xs_tcp_read_fraghdr(xprt, &desc);
621 622 623 624
			continue;
		}
		/* Read in the xid if necessary */
		if (xprt->tcp_flags & XPRT_COPY_XID) {
625
			xs_tcp_read_xid(xprt, &desc);
626 627 628 629
			continue;
		}
		/* Read in the request data */
		if (xprt->tcp_flags & XPRT_COPY_DATA) {
630
			xs_tcp_read_request(xprt, &desc);
631 632 633
			continue;
		}
		/* Skip over any trailing bytes on short reads */
634
		xs_tcp_read_discard(xprt, &desc);
635
	} while (desc.count);
636
	dprintk("RPC:      xs_tcp_data_recv done\n");
637 638 639
	return len - desc.count;
}

640 641 642 643 644 645 646
/**
 * xs_tcp_data_ready - "data ready" callback for TCP sockets
 * @sk: socket with data to read
 * @bytes: how much data to read
 *
 */
static void xs_tcp_data_ready(struct sock *sk, int bytes)
647 648 649 650 651
{
	struct rpc_xprt *xprt;
	read_descriptor_t rd_desc;

	read_lock(&sk->sk_callback_lock);
652 653
	dprintk("RPC:      xs_tcp_data_ready...\n");
	if (!(xprt = xprt_from_sock(sk)))
654 655 656 657
		goto out;
	if (xprt->shutdown)
		goto out;

658
	/* We use rd_desc to pass struct xprt to xs_tcp_data_recv */
659 660
	rd_desc.arg.data = xprt;
	rd_desc.count = 65536;
661
	tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
662 663 664 665
out:
	read_unlock(&sk->sk_callback_lock);
}

666 667 668 669 670 671
/**
 * xs_tcp_state_change - callback to handle TCP socket state changes
 * @sk: socket whose state has changed
 *
 */
static void xs_tcp_state_change(struct sock *sk)
672
{
673
	struct rpc_xprt *xprt;
674 675 676 677

	read_lock(&sk->sk_callback_lock);
	if (!(xprt = xprt_from_sock(sk)))
		goto out;
678
	dprintk("RPC:      xs_tcp_state_change client %p...\n", xprt);
679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707
	dprintk("RPC:      state %x conn %d dead %d zapped %d\n",
				sk->sk_state, xprt_connected(xprt),
				sock_flag(sk, SOCK_DEAD),
				sock_flag(sk, SOCK_ZAPPED));

	switch (sk->sk_state) {
	case TCP_ESTABLISHED:
		spin_lock_bh(&xprt->sock_lock);
		if (!xprt_test_and_set_connected(xprt)) {
			/* Reset TCP record info */
			xprt->tcp_offset = 0;
			xprt->tcp_reclen = 0;
			xprt->tcp_copied = 0;
			xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID;
			rpc_wake_up(&xprt->pending);
		}
		spin_unlock_bh(&xprt->sock_lock);
		break;
	case TCP_SYN_SENT:
	case TCP_SYN_RECV:
		break;
	default:
		xprt_disconnect(xprt);
		break;
	}
 out:
	read_unlock(&sk->sk_callback_lock);
}

708 709 710 711 712
/**
 * xs_write_space - callback invoked when socket buffer space becomes
 *                         available
 * @sk: socket whose state has changed
 *
713 714 715 716 717
 * Called when more output buffer space is available for this socket.
 * We try not to wake our writers until they can make "significant"
 * progress, otherwise we'll waste resources thrashing sock_sendmsg
 * with a bunch of small requests.
 */
718
static void xs_write_space(struct sock *sk)
719
{
720 721
	struct rpc_xprt *xprt;
	struct socket *sock;
722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750

	read_lock(&sk->sk_callback_lock);
	if (!(xprt = xprt_from_sock(sk)) || !(sock = sk->sk_socket))
		goto out;
	if (xprt->shutdown)
		goto out;

	/* Wait until we have enough socket memory */
	if (xprt->stream) {
		/* from net/core/stream.c:sk_stream_write_space */
		if (sk_stream_wspace(sk) < sk_stream_min_wspace(sk))
			goto out;
	} else {
		/* from net/core/sock.c:sock_def_write_space */
		if (!sock_writeable(sk))
			goto out;
	}

	if (!test_and_clear_bit(SOCK_NOSPACE, &sock->flags))
		goto out;

	spin_lock_bh(&xprt->sock_lock);
	if (xprt->snd_task)
		rpc_wake_up_task(xprt->snd_task);
	spin_unlock_bh(&xprt->sock_lock);
out:
	read_unlock(&sk->sk_callback_lock);
}

751 752 753 754 755 756 757
/**
 * xs_set_buffer_size - set send and receive limits
 * @xprt: generic transport
 *
 * Set socket send and receive limits based on the
 * sndsize and rcvsize fields in the generic transport
 * structure. This applies only to UDP sockets.
758
 */
759
static void xs_set_buffer_size(struct rpc_xprt *xprt)
760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775
{
	struct sock *sk = xprt->inet;

	if (xprt->stream)
		return;
	if (xprt->rcvsize) {
		sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
		sk->sk_rcvbuf = xprt->rcvsize * xprt->max_reqs *  2;
	}
	if (xprt->sndsize) {
		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
		sk->sk_sndbuf = xprt->sndsize * xprt->max_reqs * 2;
		sk->sk_write_space(sk);
	}
}

776
static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock)
777 778 779 780
{
	struct sockaddr_in myaddr = {
		.sin_family = AF_INET,
	};
781
	int err, port;
782 783 784 785 786 787 788 789 790

	/* Were we already bound to a given port? Try to reuse it */
	port = xprt->port;
	do {
		myaddr.sin_port = htons(port);
		err = sock->ops->bind(sock, (struct sockaddr *) &myaddr,
						sizeof(myaddr));
		if (err == 0) {
			xprt->port = port;
791 792
			dprintk("RPC:      xs_bindresvport bound to port %u\n",
					port);
793 794 795
			return 0;
		}
		if (--port == 0)
796
			port = XS_MAX_RESVPORT;
797 798
	} while (err == -EADDRINUSE && port != xprt->port);

799
	dprintk("RPC:      can't bind to reserved port (%d).\n", -err);
800 801 802
	return err;
}

803
static struct socket *xs_create(struct rpc_xprt *xprt, int proto, int resvport)
804
{
805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831
	struct socket *sock;
	int type, err;

	dprintk("RPC:      xs_create(%s %d)\n",
			   (proto == IPPROTO_UDP)? "udp" : "tcp", proto);

	type = (proto == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM;

	if ((err = sock_create_kern(PF_INET, type, proto, &sock)) < 0) {
		dprintk("RPC:      can't create socket (%d).\n", -err);
		return NULL;
	}

	/* If the caller has the capability, bind to a reserved port */
	if (resvport && xs_bindresvport(xprt, sock) < 0)
		goto failed;

	return sock;

failed:
	sock_release(sock);
	return NULL;
}

static void xs_bind(struct rpc_xprt *xprt, struct socket *sock)
{
	struct sock *sk = sock->sk;
832 833 834 835 836 837 838 839 840 841

	if (xprt->inet)
		return;

	write_lock_bh(&sk->sk_callback_lock);
	sk->sk_user_data = xprt;
	xprt->old_data_ready = sk->sk_data_ready;
	xprt->old_state_change = sk->sk_state_change;
	xprt->old_write_space = sk->sk_write_space;
	if (xprt->prot == IPPROTO_UDP) {
842
		sk->sk_data_ready = xs_udp_data_ready;
843 844 845 846
		sk->sk_no_check = UDP_CSUM_NORCV;
		xprt_set_connected(xprt);
	} else {
		tcp_sk(sk)->nonagle = 1;	/* disable Nagle's algorithm */
847 848
		sk->sk_data_ready = xs_tcp_data_ready;
		sk->sk_state_change = xs_tcp_state_change;
849 850
		xprt_clear_connected(xprt);
	}
851
	sk->sk_write_space = xs_write_space;
852 853 854 855 856 857 858 859 860

	/* Reset to new socket */
	xprt->sock = sock;
	xprt->inet = sk;
	write_unlock_bh(&sk->sk_callback_lock);

	return;
}

861 862 863 864 865
/**
 * xs_connect_worker - try to connect a socket to a remote endpoint
 * @args: RPC transport to connect
 *
 * Invoked by a work queue tasklet.
866
 */
867
static void xs_connect_worker(void *args)
868 869 870 871 872 873 874 875
{
	struct rpc_xprt *xprt = (struct rpc_xprt *)args;
	struct socket *sock = xprt->sock;
	int status = -EIO;

	if (xprt->shutdown || xprt->addr.sin_port == 0)
		goto out;

876 877
	dprintk("RPC:      xs_connect_worker xprt %p\n", xprt);

878 879 880
	/*
	 * Start by resetting any existing state
	 */
881 882
	xs_close(xprt);
	sock = xs_create(xprt, xprt->prot, xprt->resvport);
883 884 885 886 887
	if (sock == NULL) {
		/* couldn't create socket or bind to reserved port;
		 * this is likely a permanent error, so cause an abort */
		goto out;
	}
888 889
	xs_bind(xprt, sock);
	xs_set_buffer_size(xprt);
890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919

	status = 0;
	if (!xprt->stream)
		goto out;

	/*
	 * Tell the socket layer to start connecting...
	 */
	status = sock->ops->connect(sock, (struct sockaddr *) &xprt->addr,
			sizeof(xprt->addr), O_NONBLOCK);
	dprintk("RPC: %p  connect status %d connected %d sock state %d\n",
			xprt, -status, xprt_connected(xprt), sock->sk->sk_state);
	if (status < 0) {
		switch (status) {
			case -EINPROGRESS:
			case -EALREADY:
				goto out_clear;
		}
	}
out:
	if (status < 0)
		rpc_wake_up_status(&xprt->pending, status);
	else
		rpc_wake_up(&xprt->pending);
out_clear:
	smp_mb__before_clear_bit();
	clear_bit(XPRT_CONNECTING, &xprt->sockstate);
	smp_mb__after_clear_bit();
}

920 921 922 923 924 925 926
/**
 * xs_connect - connect a socket to a remote endpoint
 * @task: address of RPC task that manages state of connect request
 *
 * TCP: If the remote end dropped the connection, delay reconnecting.
 */
static void xs_connect(struct rpc_task *task)
927 928 929 930
{
	struct rpc_xprt *xprt = task->tk_xprt;

	if (!test_and_set_bit(XPRT_CONNECTING, &xprt->sockstate)) {
931 932
		if (xprt->sock != NULL) {
			dprintk("RPC:      xs_connect delayed xprt %p\n", xprt);
933 934
			schedule_delayed_work(&xprt->sock_connect,
					RPC_REESTABLISH_TIMEOUT);
935 936
		} else {
			dprintk("RPC:      xs_connect scheduled xprt %p\n", xprt);
937 938 939 940 941 942 943 944
			schedule_work(&xprt->sock_connect);
			/* flush_scheduled_work can sleep... */
			if (!RPC_IS_ASYNC(task))
				flush_scheduled_work();
		}
	}
}

945 946 947 948 949 950
static struct rpc_xprt_ops xs_ops = {
	.set_buffer_size	= xs_set_buffer_size,
	.connect		= xs_connect,
	.send_request		= xs_send_request,
	.close			= xs_close,
	.destroy		= xs_destroy,
951 952 953 954 955
};

extern unsigned int xprt_udp_slot_table_entries;
extern unsigned int xprt_tcp_slot_table_entries;

956 957 958 959 960 961
/**
 * xs_setup_udp - Set up transport to use a UDP socket
 * @xprt: transport to set up
 * @to:   timeout parameters
 *
 */
962 963 964 965 966 967 968 969 970 971 972 973 974 975
int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to)
{
	size_t slot_table_size;

	dprintk("RPC:      setting up udp-ipv4 transport...\n");

	xprt->max_reqs = xprt_udp_slot_table_entries;
	slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]);
	xprt->slot = kmalloc(slot_table_size, GFP_KERNEL);
	if (xprt->slot == NULL)
		return -ENOMEM;
	memset(xprt->slot, 0, slot_table_size);

	xprt->prot = IPPROTO_UDP;
976
	xprt->port = XS_MAX_RESVPORT;
977 978 979 980 981 982 983
	xprt->stream = 0;
	xprt->nocong = 0;
	xprt->cwnd = RPC_INITCWND;
	xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0;
	/* XXX: header size can vary due to auth type, IPv6, etc. */
	xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);

984
	INIT_WORK(&xprt->sock_connect, xs_connect_worker, xprt);
985

986
	xprt->ops = &xs_ops;
987 988 989 990

	if (to)
		xprt->timeout = *to;
	else
991
		xprt_set_timeout(&xprt->timeout, 5, 5 * HZ);
992 993 994 995

	return 0;
}

996 997 998 999 1000 1001
/**
 * xs_setup_tcp - Set up transport to use a TCP socket
 * @xprt: transport to set up
 * @to: timeout parameters
 *
 */
1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015
int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to)
{
	size_t slot_table_size;

	dprintk("RPC:      setting up tcp-ipv4 transport...\n");

	xprt->max_reqs = xprt_tcp_slot_table_entries;
	slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]);
	xprt->slot = kmalloc(slot_table_size, GFP_KERNEL);
	if (xprt->slot == NULL)
		return -ENOMEM;
	memset(xprt->slot, 0, slot_table_size);

	xprt->prot = IPPROTO_TCP;
1016
	xprt->port = XS_MAX_RESVPORT;
1017 1018 1019 1020 1021 1022
	xprt->stream = 1;
	xprt->nocong = 1;
	xprt->cwnd = RPC_MAXCWND(xprt);
	xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0;
	xprt->max_payload = (1U << 31) - 1;

1023
	INIT_WORK(&xprt->sock_connect, xs_connect_worker, xprt);
1024

1025
	xprt->ops = &xs_ops;
1026 1027 1028 1029

	if (to)
		xprt->timeout = *to;
	else
1030
		xprt_set_timeout(&xprt->timeout, 2, 60 * HZ);
1031 1032 1033

	return 0;
}