smc_rx.c 9.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
U
Ursula Braun 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * Shared Memory Communications over RDMA (SMC-R) and RoCE
 *
 * Manage RMBE
 * copy new RMBE data into user space
 *
 * Copyright IBM Corp. 2016
 *
 * Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
 */

#include <linux/net.h>
#include <linux/rcupdate.h>
15 16
#include <linux/sched/signal.h>

U
Ursula Braun 已提交
17 18 19 20 21 22 23 24
#include <net/sock.h>

#include "smc.h"
#include "smc_core.h"
#include "smc_cdc.h"
#include "smc_tx.h" /* smc_tx_consumer_update() */
#include "smc_rx.h"

25
/* callback implementation to wakeup consumers blocked with smc_rx_wait().
U
Ursula Braun 已提交
26 27
 * indirectly called by smc_cdc_msg_recv_action().
 */
28
static void smc_rx_wake_up(struct sock *sk)
U
Ursula Braun 已提交
29 30 31 32 33 34 35 36
{
	struct socket_wq *wq;

	/* derived from sock_def_readable() */
	/* called already in smc_listen_work() */
	rcu_read_lock();
	wq = rcu_dereference(sk->sk_wq);
	if (skwq_has_sleeper(wq))
37 38
		wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI |
						EPOLLRDNORM | EPOLLRDBAND);
39
	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
U
Ursula Braun 已提交
40 41 42 43 44 45
	if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
	    (sk->sk_state == SMC_CLOSED))
		sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
	rcu_read_unlock();
}

S
Stefan Raspl 已提交
46 47 48 49 50 51 52 53
/* Update consumer cursor
 *   @conn   connection to update
 *   @cons   consumer cursor
 *   @len    number of Bytes consumed
 */
static void smc_rx_update_consumer(struct smc_connection *conn,
				   union smc_host_cursor cons, size_t len)
{
54
	smc_curs_add(conn->rmb_desc->len, &cons, len);
S
Stefan Raspl 已提交
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
	smc_curs_write(&conn->local_tx_ctrl.cons, smc_curs_read(&cons, conn),
		       conn);
	/* send consumer cursor update if required */
	/* similar to advertising new TCP rcv_wnd if required */
	smc_tx_consumer_update(conn);
}

struct smc_spd_priv {
	struct smc_sock *smc;
	size_t		 len;
};

static void smc_rx_pipe_buf_release(struct pipe_inode_info *pipe,
				    struct pipe_buffer *buf)
{
	struct smc_spd_priv *priv = (struct smc_spd_priv *)buf->private;
	struct smc_sock *smc = priv->smc;
	struct smc_connection *conn;
	union smc_host_cursor cons;
	struct sock *sk = &smc->sk;

	if (sk->sk_state == SMC_CLOSED ||
	    sk->sk_state == SMC_PEERFINCLOSEWAIT ||
	    sk->sk_state == SMC_APPFINCLOSEWAIT)
		goto out;
	conn = &smc->conn;
	lock_sock(sk);
	smc_curs_write(&cons, smc_curs_read(&conn->local_tx_ctrl.cons, conn),
		       conn);
	smc_rx_update_consumer(conn, cons, priv->len);
	release_sock(sk);
	if (atomic_sub_and_test(priv->len, &conn->splice_pending))
		smc_rx_wake_up(sk);
out:
	kfree(priv);
	put_page(buf->page);
	sock_put(sk);
}

static int smc_rx_pipe_buf_nosteal(struct pipe_inode_info *pipe,
				   struct pipe_buffer *buf)
{
	return 1;
}

static const struct pipe_buf_operations smc_pipe_ops = {
	.can_merge = 0,
	.confirm = generic_pipe_buf_confirm,
	.release = smc_rx_pipe_buf_release,
	.steal = smc_rx_pipe_buf_nosteal,
	.get = generic_pipe_buf_get
};

static void smc_rx_spd_release(struct splice_pipe_desc *spd,
			       unsigned int i)
{
	put_page(spd->pages[i]);
}

static int smc_rx_splice(struct pipe_inode_info *pipe, char *src, size_t len,
			 struct smc_sock *smc)
{
	struct splice_pipe_desc spd;
	struct partial_page partial;
	struct smc_spd_priv *priv;
	struct page *page;
	int bytes;

	page = virt_to_page(smc->conn.rmb_desc->cpu_addr);
	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (!priv)
		return -ENOMEM;
	priv->len = len;
	priv->smc = smc;
	partial.offset = src - (char *)smc->conn.rmb_desc->cpu_addr;
	partial.len = len;
	partial.private = (unsigned long)priv;

	spd.nr_pages_max = 1;
	spd.nr_pages = 1;
	spd.pages = &page;
	spd.partial = &partial;
	spd.ops = &smc_pipe_ops;
	spd.spd_release = smc_rx_spd_release;

	bytes = splice_to_pipe(pipe, &spd);
	if (bytes > 0) {
		sock_hold(&smc->sk);
		get_page(smc->conn.rmb_desc->pages);
		atomic_add(bytes, &smc->conn.splice_pending);
	}

	return bytes;
}

static int smc_rx_data_available_and_no_splice_pend(struct smc_connection *conn)
{
	return atomic_read(&conn->bytes_to_rcv) &&
	       !atomic_read(&conn->splice_pending);
}

U
Ursula Braun 已提交
156 157 158
/* blocks rcvbuf consumer until >=len bytes available or timeout or interrupted
 *   @smc    smc socket
 *   @timeo  pointer to max seconds to wait, pointer to value 0 for no timeout
159
 *   @fcrit  add'l criterion to evaluate as function pointer
U
Ursula Braun 已提交
160 161 162 163
 * Returns:
 * 1 if at least 1 byte available in rcvbuf or if socket error/shutdown.
 * 0 otherwise (nothing in rcvbuf nor timeout, e.g. interrupted).
 */
164 165
int smc_rx_wait(struct smc_sock *smc, long *timeo,
		int (*fcrit)(struct smc_connection *conn))
U
Ursula Braun 已提交
166 167 168 169 170 171
{
	DEFINE_WAIT_FUNC(wait, woken_wake_function);
	struct smc_connection *conn = &smc->conn;
	struct sock *sk = &smc->sk;
	int rc;

172
	if (fcrit(conn))
U
Ursula Braun 已提交
173 174 175 176 177 178
		return 1;
	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
	add_wait_queue(sk_sleep(sk), &wait);
	rc = sk_wait_event(sk, timeo,
			   sk->sk_err ||
			   sk->sk_shutdown & RCV_SHUTDOWN ||
179
			   fcrit(conn) ||
U
Ursula Braun 已提交
180 181 182 183 184 185 186
			   smc_cdc_rxed_any_close_or_senddone(conn),
			   &wait);
	remove_wait_queue(sk_sleep(sk), &wait);
	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
	return rc;
}

S
Stefan Raspl 已提交
187 188 189 190 191 192
/* smc_rx_recvmsg - receive data from RMBE
 * @msg:	copy data to receive buffer
 * @pipe:	copy data to pipe if set - indicates splice() call
 *
 * rcvbuf consumer: main API called by socket layer.
 * Called under sk lock.
U
Ursula Braun 已提交
193
 */
S
Stefan Raspl 已提交
194 195
int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
		   struct pipe_inode_info *pipe, size_t len, int flags)
U
Ursula Braun 已提交
196 197 198 199
{
	size_t copylen, read_done = 0, read_remaining = len;
	size_t chunk_len, chunk_off, chunk_len_sum;
	struct smc_connection *conn = &smc->conn;
S
Stefan Raspl 已提交
200
	int (*func)(struct smc_connection *conn);
U
Ursula Braun 已提交
201 202 203 204
	union smc_host_cursor cons;
	int readable, chunk;
	char *rcvbuf_base;
	struct sock *sk;
S
Stefan Raspl 已提交
205
	int splbytes;
U
Ursula Braun 已提交
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
	long timeo;
	int target;		/* Read at least these many bytes */
	int rc;

	if (unlikely(flags & MSG_ERRQUEUE))
		return -EINVAL; /* future work for sk.sk_family == AF_SMC */
	if (flags & MSG_OOB)
		return -EINVAL; /* future work */

	sk = &smc->sk;
	if (sk->sk_state == SMC_LISTEN)
		return -ENOTCONN;
	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);

	/* we currently use 1 RMBE per RMB, so RMBE == RMB base addr */
	rcvbuf_base = conn->rmb_desc->cpu_addr;

	do { /* while (read_remaining) */
S
Stefan Raspl 已提交
225
		if (read_done >= target || (pipe && read_done))
U
Ursula Braun 已提交
226 227 228 229 230
			break;

		if (atomic_read(&conn->bytes_to_rcv))
			goto copy;

S
Stefan Raspl 已提交
231 232 233 234 235
		if (sk->sk_shutdown & RCV_SHUTDOWN ||
		    smc_cdc_rxed_any_close_or_senddone(conn) ||
		    conn->local_tx_ctrl.conn_state_flags.peer_conn_abort)
			break;

U
Ursula Braun 已提交
236 237 238 239
		if (read_done) {
			if (sk->sk_err ||
			    sk->sk_state == SMC_CLOSED ||
			    !timeo ||
S
Stefan Raspl 已提交
240
			    signal_pending(current))
U
Ursula Braun 已提交
241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
				break;
		} else {
			if (sk->sk_err) {
				read_done = sock_error(sk);
				break;
			}
			if (sk->sk_state == SMC_CLOSED) {
				if (!sock_flag(sk, SOCK_DONE)) {
					/* This occurs when user tries to read
					 * from never connected socket.
					 */
					read_done = -ENOTCONN;
					break;
				}
				break;
			}
			if (signal_pending(current)) {
				read_done = sock_intr_errno(timeo);
				break;
			}
H
Hans Wippel 已提交
261 262
			if (!timeo)
				return -EAGAIN;
U
Ursula Braun 已提交
263 264
		}

265 266
		if (!smc_rx_data_available(conn)) {
			smc_rx_wait(smc, &timeo, smc_rx_data_available);
U
Ursula Braun 已提交
267 268 269 270 271
			continue;
		}

copy:
		/* initialize variables for 1st iteration of subsequent loop */
272
		/* could be just 1 byte, even after waiting on data above */
U
Ursula Braun 已提交
273
		readable = atomic_read(&conn->bytes_to_rcv);
S
Stefan Raspl 已提交
274 275 276 277 278 279 280 281 282 283
		splbytes = atomic_read(&conn->splice_pending);
		if (!readable || (msg && splbytes)) {
			if (splbytes)
				func = smc_rx_data_available_and_no_splice_pend;
			else
				func = smc_rx_data_available;
			smc_rx_wait(smc, &timeo, func);
			continue;
		}

U
Ursula Braun 已提交
284 285 286 287 288
		/* not more than what user space asked for */
		copylen = min_t(size_t, read_remaining, readable);
		smc_curs_write(&cons,
			       smc_curs_read(&conn->local_tx_ctrl.cons, conn),
			       conn);
S
Stefan Raspl 已提交
289 290
		/* subsequent splice() calls pick up where previous left */
		if (splbytes)
291
			smc_curs_add(conn->rmb_desc->len, &cons, splbytes);
U
Ursula Braun 已提交
292 293
		/* determine chunks where to read from rcvbuf */
		/* either unwrapped case, or 1st chunk of wrapped case */
294 295
		chunk_len = min_t(size_t, copylen, conn->rmb_desc->len -
				  cons.count);
U
Ursula Braun 已提交
296 297
		chunk_len_sum = chunk_len;
		chunk_off = cons.count;
298
		smc_rmb_sync_sg_for_cpu(conn);
U
Ursula Braun 已提交
299 300
		for (chunk = 0; chunk < 2; chunk++) {
			if (!(flags & MSG_TRUNC)) {
S
Stefan Raspl 已提交
301 302 303 304 305 306 307 308 309 310
				if (msg) {
					rc = memcpy_to_msg(msg, rcvbuf_base +
							   chunk_off,
							   chunk_len);
				} else {
					rc = smc_rx_splice(pipe, rcvbuf_base +
							chunk_off, chunk_len,
							smc);
				}
				if (rc < 0) {
U
Ursula Braun 已提交
311 312
					if (!read_done)
						read_done = -EFAULT;
313
					smc_rmb_sync_sg_for_device(conn);
U
Ursula Braun 已提交
314 315 316 317 318 319 320 321 322 323 324 325 326
					goto out;
				}
			}
			read_remaining -= chunk_len;
			read_done += chunk_len;

			if (chunk_len_sum == copylen)
				break; /* either on 1st or 2nd iteration */
			/* prepare next (== 2nd) iteration */
			chunk_len = copylen - chunk_len; /* remainder */
			chunk_len_sum += chunk_len;
			chunk_off = 0; /* modulo offset in recv ring buffer */
		}
327
		smc_rmb_sync_sg_for_device(conn);
U
Ursula Braun 已提交
328 329 330 331 332 333

		/* update cursors */
		if (!(flags & MSG_PEEK)) {
			/* increased in recv tasklet smc_cdc_msg_rcv() */
			smp_mb__before_atomic();
			atomic_sub(copylen, &conn->bytes_to_rcv);
334
			/* guarantee 0 <= bytes_to_rcv <= rmb_desc->len */
U
Ursula Braun 已提交
335
			smp_mb__after_atomic();
S
Stefan Raspl 已提交
336 337
			if (msg)
				smc_rx_update_consumer(conn, cons, copylen);
U
Ursula Braun 已提交
338 339 340 341 342 343 344 345 346
		}
	} while (read_remaining);
out:
	return read_done;
}

/* Initialize receive properties on connection establishment. NB: not __init! */
void smc_rx_init(struct smc_sock *smc)
{
347
	smc->sk.sk_data_ready = smc_rx_wake_up;
S
Stefan Raspl 已提交
348
	atomic_set(&smc->conn.splice_pending, 0);
U
Ursula Braun 已提交
349
}