ib_send.c 29.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
/*
 * Copyright (c) 2006 Oracle.  All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 *
 */
#include <linux/kernel.h>
#include <linux/in.h>
#include <linux/device.h>
#include <linux/dmapool.h>

#include "rds.h"
#include "ib.h"

41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
static char *rds_ib_wc_status_strings[] = {
#define RDS_IB_WC_STATUS_STR(foo) \
		[IB_WC_##foo] = __stringify(IB_WC_##foo)
	RDS_IB_WC_STATUS_STR(SUCCESS),
	RDS_IB_WC_STATUS_STR(LOC_LEN_ERR),
	RDS_IB_WC_STATUS_STR(LOC_QP_OP_ERR),
	RDS_IB_WC_STATUS_STR(LOC_EEC_OP_ERR),
	RDS_IB_WC_STATUS_STR(LOC_PROT_ERR),
	RDS_IB_WC_STATUS_STR(WR_FLUSH_ERR),
	RDS_IB_WC_STATUS_STR(MW_BIND_ERR),
	RDS_IB_WC_STATUS_STR(BAD_RESP_ERR),
	RDS_IB_WC_STATUS_STR(LOC_ACCESS_ERR),
	RDS_IB_WC_STATUS_STR(REM_INV_REQ_ERR),
	RDS_IB_WC_STATUS_STR(REM_ACCESS_ERR),
	RDS_IB_WC_STATUS_STR(REM_OP_ERR),
	RDS_IB_WC_STATUS_STR(RETRY_EXC_ERR),
	RDS_IB_WC_STATUS_STR(RNR_RETRY_EXC_ERR),
	RDS_IB_WC_STATUS_STR(LOC_RDD_VIOL_ERR),
	RDS_IB_WC_STATUS_STR(REM_INV_RD_REQ_ERR),
	RDS_IB_WC_STATUS_STR(REM_ABORT_ERR),
	RDS_IB_WC_STATUS_STR(INV_EECN_ERR),
	RDS_IB_WC_STATUS_STR(INV_EEC_STATE_ERR),
	RDS_IB_WC_STATUS_STR(FATAL_ERR),
	RDS_IB_WC_STATUS_STR(RESP_TIMEOUT_ERR),
	RDS_IB_WC_STATUS_STR(GENERAL_ERR),
#undef RDS_IB_WC_STATUS_STR
};

char *rds_ib_wc_status_str(enum ib_wc_status status)
{
	return rds_str_array(rds_ib_wc_status_strings,
			     ARRAY_SIZE(rds_ib_wc_status_strings), status);
}

A
Andy Grover 已提交
75 76 77 78 79 80 81
/*
 * Convert IB-specific error message to RDS error message and call core
 * completion handler.
 */
static void rds_ib_send_complete(struct rds_message *rm,
				 int wc_status,
				 void (*complete)(struct rds_message *rm, int status))
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
{
	int notify_status;

	switch (wc_status) {
	case IB_WC_WR_FLUSH_ERR:
		return;

	case IB_WC_SUCCESS:
		notify_status = RDS_RDMA_SUCCESS;
		break;

	case IB_WC_REM_ACCESS_ERR:
		notify_status = RDS_RDMA_REMOTE_ERROR;
		break;

	default:
		notify_status = RDS_RDMA_OTHER_ERROR;
		break;
	}
A
Andy Grover 已提交
101
	complete(rm, notify_status);
102 103
}

104 105 106
static void rds_ib_send_unmap_data(struct rds_ib_connection *ic,
				   struct rm_data_op *op,
				   int wc_status)
107
{
108 109 110 111 112
	if (op->op_nents)
		ib_dma_unmap_sg(ic->i_cm_id->device,
				op->op_sg, op->op_nents,
				DMA_TO_DEVICE);
}
113

114 115 116 117 118 119 120 121 122 123
static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic,
				   struct rm_rdma_op *op,
				   int wc_status)
{
	if (op->op_mapped) {
		ib_dma_unmap_sg(ic->i_cm_id->device,
				op->op_sg, op->op_nents,
				op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
		op->op_mapped = 0;
	}
A
Andy Grover 已提交
124

125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
	/* If the user asked for a completion notification on this
	 * message, we can implement three different semantics:
	 *  1.	Notify when we received the ACK on the RDS message
	 *	that was queued with the RDMA. This provides reliable
	 *	notification of RDMA status at the expense of a one-way
	 *	packet delay.
	 *  2.	Notify when the IB stack gives us the completion event for
	 *	the RDMA operation.
	 *  3.	Notify when the IB stack gives us the completion event for
	 *	the accompanying RDS messages.
	 * Here, we implement approach #3. To implement approach #2,
	 * we would need to take an event for the rdma WR. To implement #1,
	 * don't call rds_rdma_send_complete at all, and fall back to the notify
	 * handling in the ACK processing code.
	 *
	 * Note: There's no need to explicitly sync any RDMA buffers using
	 * ib_dma_sync_sg_for_cpu - the completion for the RDMA
	 * operation itself unmapped the RDMA buffers, which takes care
	 * of synching.
	 */
	rds_ib_send_complete(container_of(op, struct rds_message, rdma),
			     wc_status, rds_rdma_send_complete);
147

148 149 150 151 152
	if (op->op_write)
		rds_stats_add(s_send_rdma_bytes, op->op_bytes);
	else
		rds_stats_add(s_recv_rdma_bytes, op->op_bytes);
}
153

154 155 156 157 158 159 160 161 162
static void rds_ib_send_unmap_atomic(struct rds_ib_connection *ic,
				     struct rm_atomic_op *op,
				     int wc_status)
{
	/* unmap atomic recvbuf */
	if (op->op_mapped) {
		ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, 1,
				DMA_FROM_DEVICE);
		op->op_mapped = 0;
163 164
	}

165 166
	rds_ib_send_complete(container_of(op, struct rds_message, atomic),
			     wc_status, rds_atomic_send_complete);
A
Andy Grover 已提交
167

168
	if (op->op_type == RDS_ATOMIC_TYPE_CSWP)
169
		rds_ib_stats_inc(s_ib_atomic_cswp);
170
	else
171
		rds_ib_stats_inc(s_ib_atomic_fadd);
172
}
A
Andy Grover 已提交
173

174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
/*
 * Unmap the resources associated with a struct send_work.
 *
 * Returns the rm for no good reason other than it is unobtainable
 * other than by switching on wr.opcode, currently, and the caller,
 * the event handler, needs it.
 */
static struct rds_message *rds_ib_send_unmap_op(struct rds_ib_connection *ic,
						struct rds_ib_send_work *send,
						int wc_status)
{
	struct rds_message *rm = NULL;

	/* In the error case, wc.opcode sometimes contains garbage */
	switch (send->s_wr.opcode) {
	case IB_WR_SEND:
		if (send->s_op) {
			rm = container_of(send->s_op, struct rds_message, data);
			rds_ib_send_unmap_data(ic, send->s_op, wc_status);
		}
		break;
	case IB_WR_RDMA_WRITE:
	case IB_WR_RDMA_READ:
		if (send->s_op) {
			rm = container_of(send->s_op, struct rds_message, rdma);
			rds_ib_send_unmap_rdma(ic, send->s_op, wc_status);
		}
		break;
	case IB_WR_ATOMIC_FETCH_AND_ADD:
	case IB_WR_ATOMIC_CMP_AND_SWP:
		if (send->s_op) {
			rm = container_of(send->s_op, struct rds_message, atomic);
			rds_ib_send_unmap_atomic(ic, send->s_op, wc_status);
		}
		break;
	default:
		if (printk_ratelimit())
			printk(KERN_NOTICE
			       "RDS/IB: %s: unexpected opcode 0x%x in WR!\n",
			       __func__, send->s_wr.opcode);
		break;
A
Andy Grover 已提交
215 216
	}

217
	send->s_wr.opcode = 0xdead;
218

219
	return rm;
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
}

void rds_ib_send_init_ring(struct rds_ib_connection *ic)
{
	struct rds_ib_send_work *send;
	u32 i;

	for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
		struct ib_sge *sge;

		send->s_op = NULL;

		send->s_wr.wr_id = i;
		send->s_wr.sg_list = send->s_sge;
		send->s_wr.ex.imm_data = 0;

236
		sge = &send->s_sge[0];
237 238 239
		sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header));
		sge->length = sizeof(struct rds_header);
		sge->lkey = ic->i_mr->lkey;
240 241

		send->s_sge[1].lkey = ic->i_mr->lkey;
242 243 244 245 246 247 248 249 250
	}
}

void rds_ib_send_clear_ring(struct rds_ib_connection *ic)
{
	struct rds_ib_send_work *send;
	u32 i;

	for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
251 252
		if (send->s_op && send->s_wr.opcode != 0xdead)
			rds_ib_send_unmap_op(ic, send, IB_WC_WR_FLUSH_ERR);
253 254 255
	}
}

Z
Zach Brown 已提交
256 257 258 259 260 261 262 263 264 265 266 267
/*
 * The only fast path caller always has a non-zero nr, so we don't
 * bother testing nr before performing the atomic sub.
 */
static void rds_ib_sub_signaled(struct rds_ib_connection *ic, int nr)
{
	if ((atomic_sub_return(nr, &ic->i_signaled_sends) == 0) &&
	    waitqueue_active(&rds_ib_ring_empty_wait))
		wake_up(&rds_ib_ring_empty_wait);
	BUG_ON(atomic_read(&ic->i_signaled_sends) < 0);
}

268 269 270 271 272 273 274 275 276 277
/*
 * The _oldest/_free ring operations here race cleanly with the alloc/unalloc
 * operations performed in the send path.  As the sender allocs and potentially
 * unallocs the next free entry in the ring it doesn't alter which is
 * the next to be freed, which is what this is concerned with.
 */
void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
{
	struct rds_connection *conn = context;
	struct rds_ib_connection *ic = conn->c_transport_data;
278
	struct rds_message *rm = NULL;
279 280 281 282 283 284
	struct ib_wc wc;
	struct rds_ib_send_work *send;
	u32 completed;
	u32 oldest;
	u32 i = 0;
	int ret;
Z
Zach Brown 已提交
285
	int nr_sig = 0;
286 287 288 289 290 291 292 293

	rdsdebug("cq %p conn %p\n", cq, conn);
	rds_ib_stats_inc(s_ib_tx_cq_call);
	ret = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
	if (ret)
		rdsdebug("ib_req_notify_cq send failed: %d\n", ret);

	while (ib_poll_cq(cq, 1, &wc) > 0) {
294 295 296
		rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
			 (unsigned long long)wc.wr_id, wc.status,
			 rds_ib_wc_status_str(wc.status), wc.byte_len,
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
			 be32_to_cpu(wc.ex.imm_data));
		rds_ib_stats_inc(s_ib_tx_cq_event);

		if (wc.wr_id == RDS_IB_ACK_WR_ID) {
			if (ic->i_ack_queued + HZ/2 < jiffies)
				rds_ib_stats_inc(s_ib_tx_stalled);
			rds_ib_ack_send_complete(ic);
			continue;
		}

		oldest = rds_ib_ring_oldest(&ic->i_send_ring);

		completed = rds_ib_ring_completed(&ic->i_send_ring, wc.wr_id, oldest);

		for (i = 0; i < completed; i++) {
			send = &ic->i_sends[oldest];
Z
Zach Brown 已提交
313 314
			if (send->s_wr.send_flags & IB_SEND_SIGNALED)
				nr_sig++;
315

316
			rm = rds_ib_send_unmap_op(ic, send, wc.status);
317 318 319 320

			if (send->s_queued + HZ/2 < jiffies)
				rds_ib_stats_inc(s_ib_tx_stalled);

321 322 323 324 325 326
			if (send->s_op) {
				if (send->s_op == rm->m_final_op) {
					/* If anyone waited for this message to get flushed out, wake
					 * them up now */
					rds_message_unmapped(rm);
				}
327 328
				rds_message_put(rm);
				send->s_op = NULL;
329 330 331 332 333 334
			}

			oldest = (oldest + 1) % ic->i_send_ring.w_nr;
		}

		rds_ib_ring_free(&ic->i_send_ring, completed);
Z
Zach Brown 已提交
335 336
		rds_ib_sub_signaled(ic, nr_sig);
		nr_sig = 0;
337

338 339
		if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
		    test_bit(0, &conn->c_map_queued))
340 341 342 343
			queue_delayed_work(rds_wq, &conn->c_send_w, 0);

		/* We expect errors as the qp is drained during shutdown */
		if (wc.status != IB_WC_SUCCESS && rds_conn_up(conn)) {
344 345 346 347
			rds_ib_conn_error(conn, "send completion on %pI4 had status "
					  "%u (%s), disconnecting and reconnecting\n",
					  &conn->c_faddr, wc.status,
					  rds_ib_wc_status_str(wc.status));
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
		}
	}
}

/*
 * This is the main function for allocating credits when sending
 * messages.
 *
 * Conceptually, we have two counters:
 *  -	send credits: this tells us how many WRs we're allowed
 *	to submit without overruning the reciever's queue. For
 *	each SEND WR we post, we decrement this by one.
 *
 *  -	posted credits: this tells us how many WRs we recently
 *	posted to the receive queue. This value is transferred
 *	to the peer as a "credit update" in a RDS header field.
 *	Every time we transmit credits to the peer, we subtract
 *	the amount of transferred credits from this counter.
 *
 * It is essential that we avoid situations where both sides have
 * exhausted their send credits, and are unable to send new credits
 * to the peer. We achieve this by requiring that we send at least
 * one credit update to the peer before exhausting our credits.
 * When new credits arrive, we subtract one credit that is withheld
 * until we've posted new buffers and are ready to transmit these
 * credits (see rds_ib_send_add_credits below).
 *
 * The RDS send code is essentially single-threaded; rds_send_xmit
376
 * sets RDS_IN_XMIT to ensure exclusive access to the send ring.
377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396
 * However, the ACK sending code is independent and can race with
 * message SENDs.
 *
 * In the send path, we need to update the counters for send credits
 * and the counter of posted buffers atomically - when we use the
 * last available credit, we cannot allow another thread to race us
 * and grab the posted credits counter.  Hence, we have to use a
 * spinlock to protect the credit counter, or use atomics.
 *
 * Spinlocks shared between the send and the receive path are bad,
 * because they create unnecessary delays. An early implementation
 * using a spinlock showed a 5% degradation in throughput at some
 * loads.
 *
 * This implementation avoids spinlocks completely, putting both
 * counters into a single atomic, and updating that atomic using
 * atomic_add (in the receive path, when receiving fresh credits),
 * and using atomic_cmpxchg when updating the two counters.
 */
int rds_ib_send_grab_credits(struct rds_ib_connection *ic,
397
			     u32 wanted, u32 *adv_credits, int need_posted, int max_posted)
398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436
{
	unsigned int avail, posted, got = 0, advertise;
	long oldval, newval;

	*adv_credits = 0;
	if (!ic->i_flowctl)
		return wanted;

try_again:
	advertise = 0;
	oldval = newval = atomic_read(&ic->i_credits);
	posted = IB_GET_POST_CREDITS(oldval);
	avail = IB_GET_SEND_CREDITS(oldval);

	rdsdebug("rds_ib_send_grab_credits(%u): credits=%u posted=%u\n",
			wanted, avail, posted);

	/* The last credit must be used to send a credit update. */
	if (avail && !posted)
		avail--;

	if (avail < wanted) {
		struct rds_connection *conn = ic->i_cm_id->context;

		/* Oops, there aren't that many credits left! */
		set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
		got = avail;
	} else {
		/* Sometimes you get what you want, lalala. */
		got = wanted;
	}
	newval -= IB_SET_SEND_CREDITS(got);

	/*
	 * If need_posted is non-zero, then the caller wants
	 * the posted regardless of whether any send credits are
	 * available.
	 */
	if (posted && (got || need_posted)) {
437
		advertise = min_t(unsigned int, posted, max_posted);
438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
		newval -= IB_SET_POST_CREDITS(advertise);
	}

	/* Finally bill everything */
	if (atomic_cmpxchg(&ic->i_credits, oldval, newval) != oldval)
		goto try_again;

	*adv_credits = advertise;
	return got;
}

void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits)
{
	struct rds_ib_connection *ic = conn->c_transport_data;

	if (credits == 0)
		return;

	rdsdebug("rds_ib_send_add_credits(%u): current=%u%s\n",
			credits,
			IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)),
			test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : "");

	atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits);
	if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags))
		queue_delayed_work(rds_wq, &conn->c_send_w, 0);

	WARN_ON(IB_GET_SEND_CREDITS(credits) >= 16384);

	rds_ib_stats_inc(s_ib_rx_credit_updates);
}

void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted)
{
	struct rds_ib_connection *ic = conn->c_transport_data;

	if (posted == 0)
		return;

	atomic_add(IB_SET_POST_CREDITS(posted), &ic->i_credits);

	/* Decide whether to send an update to the peer now.
	 * If we would send a credit update for every single buffer we
	 * post, we would end up with an ACK storm (ACK arrives,
	 * consumes buffer, we refill the ring, send ACK to remote
	 * advertising the newly posted buffer... ad inf)
	 *
	 * Performance pretty much depends on how often we send
	 * credit updates - too frequent updates mean lots of ACKs.
	 * Too infrequent updates, and the peer will run out of
	 * credits and has to throttle.
	 * For the time being, 16 seems to be a good compromise.
	 */
	if (IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)) >= 16)
		set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
}

Z
Zach Brown 已提交
495 496 497
static inline int rds_ib_set_wr_signal_state(struct rds_ib_connection *ic,
					     struct rds_ib_send_work *send,
					     bool notify)
A
Andy Grover 已提交
498 499 500 501 502 503 504 505 506
{
	/*
	 * We want to delay signaling completions just enough to get
	 * the batching benefits but not so much that we create dead time
	 * on the wire.
	 */
	if (ic->i_unsignaled_wrs-- == 0 || notify) {
		ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
		send->s_wr.send_flags |= IB_SEND_SIGNALED;
Z
Zach Brown 已提交
507
		return 1;
A
Andy Grover 已提交
508
	}
Z
Zach Brown 已提交
509
	return 0;
A
Andy Grover 已提交
510 511
}

512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537
/*
 * This can be called multiple times for a given message.  The first time
 * we see a message we map its scatterlist into the IB device so that
 * we can provide that mapped address to the IB scatter gather entries
 * in the IB work requests.  We translate the scatterlist into a series
 * of work requests that fragment the message.  These work requests complete
 * in order so we pass ownership of the message to the completion handler
 * once we send the final fragment.
 *
 * The RDS core uses the c_send_lock to only enter this function once
 * per connection.  This makes sure that the tx ring alloc/unalloc pairs
 * don't get out of sync and confuse the ring.
 */
int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
		unsigned int hdr_off, unsigned int sg, unsigned int off)
{
	struct rds_ib_connection *ic = conn->c_transport_data;
	struct ib_device *dev = ic->i_cm_id->device;
	struct rds_ib_send_work *send = NULL;
	struct rds_ib_send_work *first;
	struct rds_ib_send_work *prev;
	struct ib_send_wr *failed_wr;
	struct scatterlist *scat;
	u32 pos;
	u32 i;
	u32 work_alloc;
A
Andy Grover 已提交
538
	u32 credit_alloc = 0;
539 540 541
	u32 posted;
	u32 adv_credits = 0;
	int send_flags = 0;
A
Andy Grover 已提交
542
	int bytes_sent = 0;
543 544
	int ret;
	int flow_controlled = 0;
Z
Zach Brown 已提交
545
	int nr_sig = 0;
546 547 548 549

	BUG_ON(off % RDS_FRAG_SIZE);
	BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));

550 551 552 553 554 555 556
	/* Do not send cong updates to IB loopback */
	if (conn->c_loopback
	    && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
		rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
		return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
	}

557 558 559 560 561 562 563 564 565 566 567 568 569 570 571
	/* FIXME we may overallocate here */
	if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
		i = 1;
	else
		i = ceil(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE);

	work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
	if (work_alloc == 0) {
		set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
		rds_ib_stats_inc(s_ib_tx_ring_full);
		ret = -ENOMEM;
		goto out;
	}

	if (ic->i_flowctl) {
572
		credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
573 574 575 576
		adv_credits += posted;
		if (credit_alloc < work_alloc) {
			rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
			work_alloc = credit_alloc;
577
			flow_controlled = 1;
578 579
		}
		if (work_alloc == 0) {
580
			set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
581 582 583 584 585 586 587
			rds_ib_stats_inc(s_ib_tx_throttle);
			ret = -ENOMEM;
			goto out;
		}
	}

	/* map the message the first time we see it */
588
	if (!ic->i_data_op) {
589 590 591 592 593 594 595
		if (rm->data.op_nents) {
			rm->data.op_count = ib_dma_map_sg(dev,
							  rm->data.op_sg,
							  rm->data.op_nents,
							  DMA_TO_DEVICE);
			rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count);
			if (rm->data.op_count == 0) {
596 597 598 599 600 601
				rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
				rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
				ret = -ENOMEM; /* XXX ? */
				goto out;
			}
		} else {
602
			rm->data.op_count = 0;
603 604 605
		}

		rds_message_addref(rm);
606
		ic->i_data_op = &rm->data;
607 608 609 610 611 612 613 614 615

		/* Finalize the header */
		if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags))
			rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED;
		if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))
			rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;

		/* If it has a RDMA op, tell the peer we did it. This is
		 * used by the peer to release use-once RDMA MRs. */
A
Andy Grover 已提交
616
		if (rm->rdma.op_active) {
617 618
			struct rds_ext_header_rdma ext_hdr;

A
Andy Grover 已提交
619
			ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey);
620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
			rds_message_add_extension(&rm->m_inc.i_hdr,
					RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
		}
		if (rm->m_rdma_cookie) {
			rds_message_add_rdma_dest_extension(&rm->m_inc.i_hdr,
					rds_rdma_cookie_key(rm->m_rdma_cookie),
					rds_rdma_cookie_offset(rm->m_rdma_cookie));
		}

		/* Note - rds_ib_piggyb_ack clears the ACK_REQUIRED bit, so
		 * we should not do this unless we have a chance of at least
		 * sticking the header into the send ring. Which is why we
		 * should call rds_ib_ring_alloc first. */
		rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_ib_piggyb_ack(ic));
		rds_message_make_checksum(&rm->m_inc.i_hdr);

		/*
		 * Update adv_credits since we reset the ACK_REQUIRED bit.
		 */
639 640 641 642 643
		if (ic->i_flowctl) {
			rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
			adv_credits += posted;
			BUG_ON(adv_credits > 255);
		}
644
	}
645 646 647 648 649 650 651

	/* Sometimes you want to put a fence between an RDMA
	 * READ and the following SEND.
	 * We could either do this all the time
	 * or when requested by the user. Right now, we let
	 * the application choose.
	 */
A
Andy Grover 已提交
652
	if (rm->rdma.op_active && rm->rdma.op_fence)
653 654
		send_flags = IB_SEND_FENCE;

A
Andy Grover 已提交
655 656 657 658
	/* Each frag gets a header. Msgs may be 0 bytes */
	send = &ic->i_sends[pos];
	first = send;
	prev = NULL;
659
	scat = &ic->i_data_op->op_sg[sg];
A
Andy Grover 已提交
660 661 662 663 664 665 666 667 668 669 670
	i = 0;
	do {
		unsigned int len = 0;

		/* Set up the header */
		send->s_wr.send_flags = send_flags;
		send->s_wr.opcode = IB_WR_SEND;
		send->s_wr.num_sge = 1;
		send->s_wr.next = NULL;
		send->s_queued = jiffies;
		send->s_op = NULL;
671

A
Andy Grover 已提交
672 673 674 675 676
		send->s_sge[0].addr = ic->i_send_hdrs_dma
			+ (pos * sizeof(struct rds_header));
		send->s_sge[0].length = sizeof(struct rds_header);

		memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header));
677

A
Andy Grover 已提交
678 679
		/* Set up the data, if present */
		if (i < work_alloc
680
		    && scat != &rm->data.op_sg[rm->data.op_count]) {
A
Andy Grover 已提交
681 682
			len = min(RDS_FRAG_SIZE, ib_sg_dma_len(dev, scat) - off);
			send->s_wr.num_sge = 2;
683

A
Andy Grover 已提交
684 685
			send->s_sge[1].addr = ib_sg_dma_address(dev, scat) + off;
			send->s_sge[1].length = len;
686

A
Andy Grover 已提交
687 688 689 690 691 692 693
			bytes_sent += len;
			off += len;
			if (off == ib_sg_dma_len(dev, scat)) {
				scat++;
				off = 0;
			}
		}
694

A
Andy Grover 已提交
695
		rds_ib_set_wr_signal_state(ic, send, 0);
696 697 698 699

		/*
		 * Always signal the last one if we're stopping due to flow control.
		 */
700
		if (ic->i_flowctl && flow_controlled && i == (work_alloc-1))
701 702
			send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;

Z
Zach Brown 已提交
703 704 705
		if (send->s_wr.send_flags & IB_SEND_SIGNALED)
			nr_sig++;

706 707 708
		rdsdebug("send %p wr %p num_sge %u next %p\n", send,
			 &send->s_wr, send->s_wr.num_sge, send->s_wr.next);

709
		if (ic->i_flowctl && adv_credits) {
710 711 712 713 714 715 716 717 718 719 720 721 722 723
			struct rds_header *hdr = &ic->i_send_hdrs[pos];

			/* add credit and redo the header checksum */
			hdr->h_credit = adv_credits;
			rds_message_make_checksum(hdr);
			adv_credits = 0;
			rds_ib_stats_inc(s_ib_tx_credit_updates);
		}

		if (prev)
			prev->s_wr.next = &send->s_wr;
		prev = send;

		pos = (pos + 1) % ic->i_send_ring.w_nr;
A
Andy Grover 已提交
724 725 726 727
		send = &ic->i_sends[pos];
		i++;

	} while (i < work_alloc
728
		 && scat != &rm->data.op_sg[rm->data.op_count]);
729 730 731 732

	/* Account the RDS header in the number of bytes we sent, but just once.
	 * The caller has no concept of fragmentation. */
	if (hdr_off == 0)
A
Andy Grover 已提交
733
		bytes_sent += sizeof(struct rds_header);
734 735

	/* if we finished the message then send completion owns it */
736
	if (scat == &rm->data.op_sg[rm->data.op_count]) {
737
		prev->s_op = ic->i_data_op;
A
Andy Grover 已提交
738
		prev->s_wr.send_flags |= IB_SEND_SOLICITED;
739
		ic->i_data_op = NULL;
740 741
	}

A
Andy Grover 已提交
742
	/* Put back wrs & credits we didn't use */
743 744 745 746 747 748 749
	if (i < work_alloc) {
		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
		work_alloc = i;
	}
	if (ic->i_flowctl && i < credit_alloc)
		rds_ib_send_add_credits(conn, credit_alloc - i);

Z
Zach Brown 已提交
750 751 752
	if (nr_sig)
		atomic_add(nr_sig, &ic->i_signaled_sends);

753 754 755 756 757 758 759 760 761 762
	/* XXX need to worry about failed_wr and partial sends. */
	failed_wr = &first->s_wr;
	ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
	rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
		 first, &first->s_wr, ret, failed_wr);
	BUG_ON(failed_wr != &first->s_wr);
	if (ret) {
		printk(KERN_WARNING "RDS/IB: ib_post_send to %pI4 "
		       "returned %d\n", &conn->c_faddr, ret);
		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
Z
Zach Brown 已提交
763
		rds_ib_sub_signaled(ic, nr_sig);
764 765 766
		if (prev->s_op) {
			ic->i_data_op = prev->s_op;
			prev->s_op = NULL;
767
		}
768 769

		rds_ib_conn_error(ic->conn, "ib_post_send failed\n");
770 771 772
		goto out;
	}

A
Andy Grover 已提交
773
	ret = bytes_sent;
774 775 776 777 778
out:
	BUG_ON(adv_credits);
	return ret;
}

A
Andy Grover 已提交
779 780 781 782 783
/*
 * Issue atomic operation.
 * A simplified version of the rdma case, we always map 1 SG, and
 * only 8 bytes, for the return value from the atomic operation.
 */
784
int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
A
Andy Grover 已提交
785 786 787 788 789 790 791 792
{
	struct rds_ib_connection *ic = conn->c_transport_data;
	struct rds_ib_send_work *send = NULL;
	struct ib_send_wr *failed_wr;
	struct rds_ib_device *rds_ibdev;
	u32 pos;
	u32 work_alloc;
	int ret;
Z
Zach Brown 已提交
793
	int nr_sig = 0;
A
Andy Grover 已提交
794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817

	rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);

	work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, 1, &pos);
	if (work_alloc != 1) {
		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
		rds_ib_stats_inc(s_ib_tx_ring_full);
		ret = -ENOMEM;
		goto out;
	}

	/* address of send request in ring */
	send = &ic->i_sends[pos];
	send->s_queued = jiffies;

	if (op->op_type == RDS_ATOMIC_TYPE_CSWP) {
		send->s_wr.opcode = IB_WR_ATOMIC_CMP_AND_SWP;
		send->s_wr.wr.atomic.compare_add = op->op_compare;
		send->s_wr.wr.atomic.swap = op->op_swap_add;
	} else { /* FADD */
		send->s_wr.opcode = IB_WR_ATOMIC_FETCH_AND_ADD;
		send->s_wr.wr.atomic.compare_add = op->op_swap_add;
		send->s_wr.wr.atomic.swap = 0;
	}
Z
Zach Brown 已提交
818
	nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify);
A
Andy Grover 已提交
819 820 821 822
	send->s_wr.num_sge = 1;
	send->s_wr.next = NULL;
	send->s_wr.wr.atomic.remote_addr = op->op_remote_addr;
	send->s_wr.wr.atomic.rkey = op->op_rkey;
823 824
	send->s_op = op;
	rds_message_addref(container_of(send->s_op, struct rds_message, atomic));
A
Andy Grover 已提交
825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843

	/* map 8 byte retval buffer to the device */
	ret = ib_dma_map_sg(ic->i_cm_id->device, op->op_sg, 1, DMA_FROM_DEVICE);
	rdsdebug("ic %p mapping atomic op %p. mapped %d pg\n", ic, op, ret);
	if (ret != 1) {
		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
		rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
		ret = -ENOMEM; /* XXX ? */
		goto out;
	}

	/* Convert our struct scatterlist to struct ib_sge */
	send->s_sge[0].addr = ib_sg_dma_address(ic->i_cm_id->device, op->op_sg);
	send->s_sge[0].length = ib_sg_dma_len(ic->i_cm_id->device, op->op_sg);
	send->s_sge[0].lkey = ic->i_mr->lkey;

	rdsdebug("rva %Lx rpa %Lx len %u\n", op->op_remote_addr,
		 send->s_sge[0].addr, send->s_sge[0].length);

Z
Zach Brown 已提交
844 845 846
	if (nr_sig)
		atomic_add(nr_sig, &ic->i_signaled_sends);

A
Andy Grover 已提交
847 848 849 850 851 852 853 854 855
	failed_wr = &send->s_wr;
	ret = ib_post_send(ic->i_cm_id->qp, &send->s_wr, &failed_wr);
	rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic,
		 send, &send->s_wr, ret, failed_wr);
	BUG_ON(failed_wr != &send->s_wr);
	if (ret) {
		printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI4 "
		       "returned %d\n", &conn->c_faddr, ret);
		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
Z
Zach Brown 已提交
856
		rds_ib_sub_signaled(ic, nr_sig);
A
Andy Grover 已提交
857 858 859 860 861 862 863 864 865 866 867 868
		goto out;
	}

	if (unlikely(failed_wr != &send->s_wr)) {
		printk(KERN_WARNING "RDS/IB: atomic ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
		BUG_ON(failed_wr != &send->s_wr);
	}

out:
	return ret;
}

A
Andy Grover 已提交
869
int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
870 871 872 873 874 875 876 877
{
	struct rds_ib_connection *ic = conn->c_transport_data;
	struct rds_ib_send_work *send = NULL;
	struct rds_ib_send_work *first;
	struct rds_ib_send_work *prev;
	struct ib_send_wr *failed_wr;
	struct scatterlist *scat;
	unsigned long len;
A
Andy Grover 已提交
878
	u64 remote_addr = op->op_remote_addr;
879
	u32 max_sge = ic->rds_ibdev->max_sge;
880 881 882 883 884 885 886
	u32 pos;
	u32 work_alloc;
	u32 i;
	u32 j;
	int sent;
	int ret;
	int num_sge;
Z
Zach Brown 已提交
887
	int nr_sig = 0;
888

889
	/* map the op the first time we see it */
A
Andy Grover 已提交
890 891 892 893 894 895
	if (!op->op_mapped) {
		op->op_count = ib_dma_map_sg(ic->i_cm_id->device,
					     op->op_sg, op->op_nents, (op->op_write) ?
					     DMA_TO_DEVICE : DMA_FROM_DEVICE);
		rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->op_count);
		if (op->op_count == 0) {
896 897 898 899 900
			rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
			ret = -ENOMEM; /* XXX ? */
			goto out;
		}

A
Andy Grover 已提交
901
		op->op_mapped = 1;
902 903 904 905 906 907
	}

	/*
	 * Instead of knowing how to return a partial rdma read/write we insist that there
	 * be enough work requests to send the entire message.
	 */
908
	i = ceil(op->op_count, max_sge);
909 910 911 912 913 914 915 916 917 918 919 920

	work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
	if (work_alloc != i) {
		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
		rds_ib_stats_inc(s_ib_tx_ring_full);
		ret = -ENOMEM;
		goto out;
	}

	send = &ic->i_sends[pos];
	first = send;
	prev = NULL;
A
Andy Grover 已提交
921
	scat = &op->op_sg[0];
922
	sent = 0;
A
Andy Grover 已提交
923
	num_sge = op->op_count;
924

A
Andy Grover 已提交
925
	for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) {
926 927
		send->s_wr.send_flags = 0;
		send->s_queued = jiffies;
928
		send->s_op = NULL;
A
Andy Grover 已提交
929

Z
Zach Brown 已提交
930
		nr_sig += rds_ib_set_wr_signal_state(ic, send, op->op_notify);
931

A
Andy Grover 已提交
932
		send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
933
		send->s_wr.wr.rdma.remote_addr = remote_addr;
A
Andy Grover 已提交
934
		send->s_wr.wr.rdma.rkey = op->op_rkey;
935

936 937 938
		if (num_sge > max_sge) {
			send->s_wr.num_sge = max_sge;
			num_sge -= max_sge;
939 940 941 942 943 944 945 946 947
		} else {
			send->s_wr.num_sge = num_sge;
		}

		send->s_wr.next = NULL;

		if (prev)
			prev->s_wr.next = &send->s_wr;

A
Andy Grover 已提交
948
		for (j = 0; j < send->s_wr.num_sge && scat != &op->op_sg[op->op_count]; j++) {
949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969
			len = ib_sg_dma_len(ic->i_cm_id->device, scat);
			send->s_sge[j].addr =
				 ib_sg_dma_address(ic->i_cm_id->device, scat);
			send->s_sge[j].length = len;
			send->s_sge[j].lkey = ic->i_mr->lkey;

			sent += len;
			rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr);

			remote_addr += len;
			scat++;
		}

		rdsdebug("send %p wr %p num_sge %u next %p\n", send,
			&send->s_wr, send->s_wr.num_sge, send->s_wr.next);

		prev = send;
		if (++send == &ic->i_sends[ic->i_send_ring.w_nr])
			send = ic->i_sends;
	}

970 971 972 973 974 975
	/* give a reference to the last op */
	if (scat == &op->op_sg[op->op_count]) {
		prev->s_op = op;
		rds_message_addref(container_of(op, struct rds_message, rdma));
	}

976 977 978 979 980
	if (i < work_alloc) {
		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
		work_alloc = i;
	}

Z
Zach Brown 已提交
981 982 983
	if (nr_sig)
		atomic_add(nr_sig, &ic->i_signaled_sends);

984 985 986 987 988 989 990 991 992
	failed_wr = &first->s_wr;
	ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
	rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
		 first, &first->s_wr, ret, failed_wr);
	BUG_ON(failed_wr != &first->s_wr);
	if (ret) {
		printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI4 "
		       "returned %d\n", &conn->c_faddr, ret);
		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
Z
Zach Brown 已提交
993
		rds_ib_sub_signaled(ic, nr_sig);
994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
		goto out;
	}

	if (unlikely(failed_wr != &first->s_wr)) {
		printk(KERN_WARNING "RDS/IB: ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
		BUG_ON(failed_wr != &first->s_wr);
	}


out:
	return ret;
}

void rds_ib_xmit_complete(struct rds_connection *conn)
{
	struct rds_ib_connection *ic = conn->c_transport_data;

	/* We may have a pending ACK or window update we were unable
	 * to send previously (due to flow control). Try again. */
	rds_ib_attempt_ack(ic);
}