mthca_cq.c 26.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3
 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4
 * Copyright (c) 2005, 2006 Cisco Systems, Inc. All rights reserved.
5 6
 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
L
Linus Torvalds 已提交
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 *
 * $Id: mthca_cq.c 1369 2004-12-20 16:17:07Z roland $
 */

#include <linux/hardirq.h>
A
Alexey Dobriyan 已提交
40
#include <linux/sched.h>
L
Linus Torvalds 已提交
41

42 43
#include <asm/io.h>

44
#include <rdma/ib_pack.h>
L
Linus Torvalds 已提交
45 46 47 48 49 50 51 52 53 54 55 56 57

#include "mthca_dev.h"
#include "mthca_cmd.h"
#include "mthca_memfree.h"

enum {
	MTHCA_MAX_DIRECT_CQ_SIZE = 4 * PAGE_SIZE
};

enum {
	MTHCA_CQ_ENTRY_SIZE = 0x20
};

58 59 60 61
enum {
	MTHCA_ATOMIC_BYTE_LEN = 8
};

L
Linus Torvalds 已提交
62 63 64 65
/*
 * Must be packed because start is 64 bits but only aligned to 32 bits.
 */
struct mthca_cq_context {
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
	__be32 flags;
	__be64 start;
	__be32 logsize_usrpage;
	__be32 error_eqn;	/* Tavor only */
	__be32 comp_eqn;
	__be32 pd;
	__be32 lkey;
	__be32 last_notified_index;
	__be32 solicit_producer_index;
	__be32 consumer_index;
	__be32 producer_index;
	__be32 cqn;
	__be32 ci_db;		/* Arbel only */
	__be32 state_db;	/* Arbel only */
	u32    reserved;
L
Linus Torvalds 已提交
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
} __attribute__((packed));

#define MTHCA_CQ_STATUS_OK          ( 0 << 28)
#define MTHCA_CQ_STATUS_OVERFLOW    ( 9 << 28)
#define MTHCA_CQ_STATUS_WRITE_FAIL  (10 << 28)
#define MTHCA_CQ_FLAG_TR            ( 1 << 18)
#define MTHCA_CQ_FLAG_OI            ( 1 << 17)
#define MTHCA_CQ_STATE_DISARMED     ( 0 <<  8)
#define MTHCA_CQ_STATE_ARMED        ( 1 <<  8)
#define MTHCA_CQ_STATE_ARMED_SOL    ( 4 <<  8)
#define MTHCA_EQ_STATE_FIRED        (10 <<  8)

enum {
	MTHCA_ERROR_CQE_OPCODE_MASK = 0xfe
};

enum {
	SYNDROME_LOCAL_LENGTH_ERR 	 = 0x01,
	SYNDROME_LOCAL_QP_OP_ERR  	 = 0x02,
	SYNDROME_LOCAL_EEC_OP_ERR 	 = 0x03,
	SYNDROME_LOCAL_PROT_ERR   	 = 0x04,
	SYNDROME_WR_FLUSH_ERR     	 = 0x05,
	SYNDROME_MW_BIND_ERR      	 = 0x06,
	SYNDROME_BAD_RESP_ERR     	 = 0x10,
	SYNDROME_LOCAL_ACCESS_ERR 	 = 0x11,
	SYNDROME_REMOTE_INVAL_REQ_ERR 	 = 0x12,
	SYNDROME_REMOTE_ACCESS_ERR 	 = 0x13,
	SYNDROME_REMOTE_OP_ERR     	 = 0x14,
	SYNDROME_RETRY_EXC_ERR 		 = 0x15,
	SYNDROME_RNR_RETRY_EXC_ERR 	 = 0x16,
	SYNDROME_LOCAL_RDD_VIOL_ERR 	 = 0x20,
	SYNDROME_REMOTE_INVAL_RD_REQ_ERR = 0x21,
	SYNDROME_REMOTE_ABORTED_ERR 	 = 0x22,
	SYNDROME_INVAL_EECN_ERR 	 = 0x23,
	SYNDROME_INVAL_EEC_STATE_ERR 	 = 0x24
};

struct mthca_cqe {
119 120 121 122 123 124 125 126 127 128 129 130
	__be32 my_qpn;
	__be32 my_ee;
	__be32 rqpn;
	__be16 sl_g_mlpath;
	__be16 rlid;
	__be32 imm_etype_pkey_eec;
	__be32 byte_cnt;
	__be32 wqe;
	u8     opcode;
	u8     is_send;
	u8     reserved;
	u8     owner;
L
Linus Torvalds 已提交
131 132 133
};

struct mthca_err_cqe {
134 135 136
	__be32 my_qpn;
	u32    reserved1[3];
	u8     syndrome;
137
	u8     vendor_err;
138
	__be16 db_cnt;
139
	u32    reserved2;
140 141
	__be32 wqe;
	u8     opcode;
142
	u8     reserved3[2];
143
	u8     owner;
L
Linus Torvalds 已提交
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
};

#define MTHCA_CQ_ENTRY_OWNER_SW      (0 << 7)
#define MTHCA_CQ_ENTRY_OWNER_HW      (1 << 7)

#define MTHCA_TAVOR_CQ_DB_INC_CI       (1 << 24)
#define MTHCA_TAVOR_CQ_DB_REQ_NOT      (2 << 24)
#define MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL  (3 << 24)
#define MTHCA_TAVOR_CQ_DB_SET_CI       (4 << 24)
#define MTHCA_TAVOR_CQ_DB_REQ_NOT_MULT (5 << 24)

#define MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL  (1 << 24)
#define MTHCA_ARBEL_CQ_DB_REQ_NOT      (2 << 24)
#define MTHCA_ARBEL_CQ_DB_REQ_NOT_MULT (3 << 24)

159 160
static inline struct mthca_cqe *get_cqe_from_buf(struct mthca_cq_buf *buf,
						 int entry)
L
Linus Torvalds 已提交
161
{
162 163
	if (buf->is_direct)
		return buf->queue.direct.buf + (entry * MTHCA_CQ_ENTRY_SIZE);
L
Linus Torvalds 已提交
164
	else
165
		return buf->queue.page_list[entry * MTHCA_CQ_ENTRY_SIZE / PAGE_SIZE].buf
L
Linus Torvalds 已提交
166 167 168
			+ (entry * MTHCA_CQ_ENTRY_SIZE) % PAGE_SIZE;
}

169 170 171 172 173 174
static inline struct mthca_cqe *get_cqe(struct mthca_cq *cq, int entry)
{
	return get_cqe_from_buf(&cq->buf, entry);
}

static inline struct mthca_cqe *cqe_sw(struct mthca_cqe *cqe)
L
Linus Torvalds 已提交
175 176 177 178 179 180
{
	return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe;
}

static inline struct mthca_cqe *next_cqe_sw(struct mthca_cq *cq)
{
181
	return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe));
L
Linus Torvalds 已提交
182 183 184 185 186 187 188
}

static inline void set_cqe_hw(struct mthca_cqe *cqe)
{
	cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW;
}

189 190 191 192 193 194 195 196 197 198 199
static void dump_cqe(struct mthca_dev *dev, void *cqe_ptr)
{
	__be32 *cqe = cqe_ptr;

	(void) cqe;	/* avoid warning if mthca_dbg compiled away... */
	mthca_dbg(dev, "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
		  be32_to_cpu(cqe[0]), be32_to_cpu(cqe[1]), be32_to_cpu(cqe[2]),
		  be32_to_cpu(cqe[3]), be32_to_cpu(cqe[4]), be32_to_cpu(cqe[5]),
		  be32_to_cpu(cqe[6]), be32_to_cpu(cqe[7]));
}

L
Linus Torvalds 已提交
200 201 202 203 204 205 206
/*
 * incr is ignored in native Arbel (mem-free) mode, so cq->cons_index
 * should be correct before calling update_cons_index().
 */
static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq,
				     int incr)
{
207
	__be32 doorbell[2];
L
Linus Torvalds 已提交
208

209
	if (mthca_is_memfree(dev)) {
L
Linus Torvalds 已提交
210 211 212 213 214 215 216 217 218
		*cq->set_ci_db = cpu_to_be32(cq->cons_index);
		wmb();
	} else {
		doorbell[0] = cpu_to_be32(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn);
		doorbell[1] = cpu_to_be32(incr - 1);

		mthca_write64(doorbell,
			      dev->kar + MTHCA_CQ_DOORBELL,
			      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
219 220 221 222 223
		/*
		 * Make sure doorbells don't leak out of CQ spinlock
		 * and reach the HCA out of order:
		 */
		mmiowb();
L
Linus Torvalds 已提交
224 225 226
	}
}

227
void mthca_cq_completion(struct mthca_dev *dev, u32 cqn)
L
Linus Torvalds 已提交
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
{
	struct mthca_cq *cq;

	cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));

	if (!cq) {
		mthca_warn(dev, "Completion event for bogus CQ %08x\n", cqn);
		return;
	}

	++cq->arm_sn;

	cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
}

243 244 245 246 247 248 249 250 251 252
void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
		    enum ib_event_type event_type)
{
	struct mthca_cq *cq;
	struct ib_event event;

	spin_lock(&dev->cq_table.lock);

	cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
	if (cq)
253 254
		++cq->refcount;

255 256 257 258 259 260 261 262 263 264 265 266 267
	spin_unlock(&dev->cq_table.lock);

	if (!cq) {
		mthca_warn(dev, "Async event for bogus CQ %08x\n", cqn);
		return;
	}

	event.device      = &dev->ib_dev;
	event.event       = event_type;
	event.element.cq  = &cq->ibcq;
	if (cq->ibcq.event_handler)
		cq->ibcq.event_handler(&event, cq->ibcq.cq_context);

268 269
	spin_lock(&dev->cq_table.lock);
	if (!--cq->refcount)
270
		wake_up(&cq->wait);
271
	spin_unlock(&dev->cq_table.lock);
272 273
}

274 275 276 277 278 279 280 281 282
static inline int is_recv_cqe(struct mthca_cqe *cqe)
{
	if ((cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) ==
	    MTHCA_ERROR_CQE_OPCODE_MASK)
		return !(cqe->opcode & 0x01);
	else
		return !(cqe->is_send & 0x80);
}

283
void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn,
284
		    struct mthca_srq *srq)
L
Linus Torvalds 已提交
285 286
{
	struct mthca_cqe *cqe;
287
	u32 prod_index;
288
	int i, nfreed = 0;
L
Linus Torvalds 已提交
289 290 291 292 293 294 295 296 297 298 299

	spin_lock_irq(&cq->lock);

	/*
	 * First we need to find the current producer index, so we
	 * know where to start cleaning from.  It doesn't matter if HW
	 * adds new entries after this loop -- the QP we're worried
	 * about is already in RESET, so the new entries won't come
	 * from our QP and therefore don't need to be checked.
	 */
	for (prod_index = cq->cons_index;
300
	     cqe_sw(get_cqe(cq, prod_index & cq->ibcq.cqe));
L
Linus Torvalds 已提交
301 302 303 304 305 306
	     ++prod_index)
		if (prod_index == cq->cons_index + cq->ibcq.cqe)
			break;

	if (0)
		mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n",
307
			  qpn, cq->cqn, cq->cons_index, prod_index);
L
Linus Torvalds 已提交
308 309 310 311 312

	/*
	 * Now sweep backwards through the CQ, removing CQ entries
	 * that match our QP by copying older entries on top of them.
	 */
313 314
	while ((int) --prod_index - (int) cq->cons_index >= 0) {
		cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
315
		if (cqe->my_qpn == cpu_to_be32(qpn)) {
316
			if (srq && is_recv_cqe(cqe))
317
				mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe));
L
Linus Torvalds 已提交
318
			++nfreed;
319 320 321
		} else if (nfreed)
			memcpy(get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe),
			       cqe, MTHCA_CQ_ENTRY_SIZE);
L
Linus Torvalds 已提交
322 323 324
	}

	if (nfreed) {
325 326
		for (i = 0; i < nfreed; ++i)
			set_cqe_hw(get_cqe(cq, (cq->cons_index + i) & cq->ibcq.cqe));
L
Linus Torvalds 已提交
327 328 329 330 331 332 333 334
		wmb();
		cq->cons_index += nfreed;
		update_cons_index(dev, cq, nfreed);
	}

	spin_unlock_irq(&cq->lock);
}

335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
void mthca_cq_resize_copy_cqes(struct mthca_cq *cq)
{
	int i;

	/*
	 * In Tavor mode, the hardware keeps the consumer and producer
	 * indices mod the CQ size.  Since we might be making the CQ
	 * bigger, we need to deal with the case where the producer
	 * index wrapped around before the CQ was resized.
	 */
	if (!mthca_is_memfree(to_mdev(cq->ibcq.device)) &&
	    cq->ibcq.cqe < cq->resize_buf->cqe) {
		cq->cons_index &= cq->ibcq.cqe;
		if (cqe_sw(get_cqe(cq, cq->ibcq.cqe)))
			cq->cons_index -= cq->ibcq.cqe + 1;
	}

	for (i = cq->cons_index; cqe_sw(get_cqe(cq, i & cq->ibcq.cqe)); ++i)
		memcpy(get_cqe_from_buf(&cq->resize_buf->buf,
					i & cq->resize_buf->cqe),
		       get_cqe(cq, i & cq->ibcq.cqe), MTHCA_CQ_ENTRY_SIZE);
}

int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent)
{
	int ret;
	int i;

	ret = mthca_buf_alloc(dev, nent * MTHCA_CQ_ENTRY_SIZE,
			      MTHCA_MAX_DIRECT_CQ_SIZE,
			      &buf->queue, &buf->is_direct,
			      &dev->driver_pd, 1, &buf->mr);
	if (ret)
		return ret;

	for (i = 0; i < nent; ++i)
		set_cqe_hw(get_cqe_from_buf(buf, i));

	return 0;
}

void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int cqe)
{
	mthca_buf_free(dev, (cqe + 1) * MTHCA_CQ_ENTRY_SIZE, &buf->queue,
		       buf->is_direct, &buf->mr);
}

382 383 384 385
static void handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
			     struct mthca_qp *qp, int wqe_index, int is_send,
			     struct mthca_err_cqe *cqe,
			     struct ib_wc *entry, int *free_cqe)
L
Linus Torvalds 已提交
386 387
{
	int dbd;
388
	__be32 new_wqe;
L
Linus Torvalds 已提交
389

390 391 392 393 394 395
	if (cqe->syndrome == SYNDROME_LOCAL_QP_OP_ERR) {
		mthca_dbg(dev, "local QP operation err "
			  "(QPN %06x, WQE @ %08x, CQN %06x, index %d)\n",
			  be32_to_cpu(cqe->my_qpn), be32_to_cpu(cqe->wqe),
			  cq->cqn, cq->cons_index);
		dump_cqe(dev, cqe);
L
Linus Torvalds 已提交
396 397 398
	}

	/*
399 400
	 * For completions in error, only work request ID, status, vendor error
	 * (and freed resource count for RD) have to be set.
L
Linus Torvalds 已提交
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461
	 */
	switch (cqe->syndrome) {
	case SYNDROME_LOCAL_LENGTH_ERR:
		entry->status = IB_WC_LOC_LEN_ERR;
		break;
	case SYNDROME_LOCAL_QP_OP_ERR:
		entry->status = IB_WC_LOC_QP_OP_ERR;
		break;
	case SYNDROME_LOCAL_EEC_OP_ERR:
		entry->status = IB_WC_LOC_EEC_OP_ERR;
		break;
	case SYNDROME_LOCAL_PROT_ERR:
		entry->status = IB_WC_LOC_PROT_ERR;
		break;
	case SYNDROME_WR_FLUSH_ERR:
		entry->status = IB_WC_WR_FLUSH_ERR;
		break;
	case SYNDROME_MW_BIND_ERR:
		entry->status = IB_WC_MW_BIND_ERR;
		break;
	case SYNDROME_BAD_RESP_ERR:
		entry->status = IB_WC_BAD_RESP_ERR;
		break;
	case SYNDROME_LOCAL_ACCESS_ERR:
		entry->status = IB_WC_LOC_ACCESS_ERR;
		break;
	case SYNDROME_REMOTE_INVAL_REQ_ERR:
		entry->status = IB_WC_REM_INV_REQ_ERR;
		break;
	case SYNDROME_REMOTE_ACCESS_ERR:
		entry->status = IB_WC_REM_ACCESS_ERR;
		break;
	case SYNDROME_REMOTE_OP_ERR:
		entry->status = IB_WC_REM_OP_ERR;
		break;
	case SYNDROME_RETRY_EXC_ERR:
		entry->status = IB_WC_RETRY_EXC_ERR;
		break;
	case SYNDROME_RNR_RETRY_EXC_ERR:
		entry->status = IB_WC_RNR_RETRY_EXC_ERR;
		break;
	case SYNDROME_LOCAL_RDD_VIOL_ERR:
		entry->status = IB_WC_LOC_RDD_VIOL_ERR;
		break;
	case SYNDROME_REMOTE_INVAL_RD_REQ_ERR:
		entry->status = IB_WC_REM_INV_RD_REQ_ERR;
		break;
	case SYNDROME_REMOTE_ABORTED_ERR:
		entry->status = IB_WC_REM_ABORT_ERR;
		break;
	case SYNDROME_INVAL_EECN_ERR:
		entry->status = IB_WC_INV_EECN_ERR;
		break;
	case SYNDROME_INVAL_EEC_STATE_ERR:
		entry->status = IB_WC_INV_EEC_STATE_ERR;
		break;
	default:
		entry->status = IB_WC_GENERAL_ERR;
		break;
	}

462 463
	entry->vendor_err = cqe->vendor_err;

464 465 466 467 468
	/*
	 * Mem-free HCAs always generate one CQE per WQE, even in the
	 * error case, so we don't have to check the doorbell count, etc.
	 */
	if (mthca_is_memfree(dev))
469
		return;
470

471
	mthca_free_err_wqe(dev, qp, is_send, wqe_index, &dbd, &new_wqe);
L
Linus Torvalds 已提交
472 473 474 475 476 477

	/*
	 * If we're at the end of the WQE chain, or we've used up our
	 * doorbell count, free the CQE.  Otherwise just update it for
	 * the next poll operation.
	 */
478
	if (!(new_wqe & cpu_to_be32(0x3f)) || (!cqe->db_cnt && dbd))
479
		return;
L
Linus Torvalds 已提交
480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515

	cqe->db_cnt   = cpu_to_be16(be16_to_cpu(cqe->db_cnt) - dbd);
	cqe->wqe      = new_wqe;
	cqe->syndrome = SYNDROME_WR_FLUSH_ERR;

	*free_cqe = 0;
}

static inline int mthca_poll_one(struct mthca_dev *dev,
				 struct mthca_cq *cq,
				 struct mthca_qp **cur_qp,
				 int *freed,
				 struct ib_wc *entry)
{
	struct mthca_wq *wq;
	struct mthca_cqe *cqe;
	int wqe_index;
	int is_error;
	int is_send;
	int free_cqe = 1;
	int err = 0;

	cqe = next_cqe_sw(cq);
	if (!cqe)
		return -EAGAIN;

	/*
	 * Make sure we read CQ entry contents after we've checked the
	 * ownership bit.
	 */
	rmb();

	if (0) {
		mthca_dbg(dev, "%x/%d: CQE -> QPN %06x, WQE @ %08x\n",
			  cq->cqn, cq->cons_index, be32_to_cpu(cqe->my_qpn),
			  be32_to_cpu(cqe->wqe));
516
		dump_cqe(dev, cqe);
L
Linus Torvalds 已提交
517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539
	}

	is_error = (cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) ==
		MTHCA_ERROR_CQE_OPCODE_MASK;
	is_send  = is_error ? cqe->opcode & 0x01 : cqe->is_send & 0x80;

	if (!*cur_qp || be32_to_cpu(cqe->my_qpn) != (*cur_qp)->qpn) {
		/*
		 * We do not have to take the QP table lock here,
		 * because CQs will be locked while QPs are removed
		 * from the table.
		 */
		*cur_qp = mthca_array_get(&dev->qp_table.qp,
					  be32_to_cpu(cqe->my_qpn) &
					  (dev->limits.num_qps - 1));
		if (!*cur_qp) {
			mthca_warn(dev, "CQ entry for unknown QP %06x\n",
				   be32_to_cpu(cqe->my_qpn) & 0xffffff);
			err = -EINVAL;
			goto out;
		}
	}

540
	entry->qp = &(*cur_qp)->ibqp;
L
Linus Torvalds 已提交
541 542 543 544 545 546 547

	if (is_send) {
		wq = &(*cur_qp)->sq;
		wqe_index = ((be32_to_cpu(cqe->wqe) - (*cur_qp)->send_wqe_offset)
			     >> wq->wqe_shift);
		entry->wr_id = (*cur_qp)->wrid[wqe_index +
					       (*cur_qp)->rq.max];
548 549 550 551 552 553 554
	} else if ((*cur_qp)->ibqp.srq) {
		struct mthca_srq *srq = to_msrq((*cur_qp)->ibqp.srq);
		u32 wqe = be32_to_cpu(cqe->wqe);
		wq = NULL;
		wqe_index = wqe >> srq->wqe_shift;
		entry->wr_id = srq->wrid[wqe_index];
		mthca_free_srq_wqe(srq, wqe);
L
Linus Torvalds 已提交
555
	} else {
556
		s32 wqe;
L
Linus Torvalds 已提交
557
		wq = &(*cur_qp)->rq;
558 559
		wqe = be32_to_cpu(cqe->wqe);
		wqe_index = wqe >> wq->wqe_shift;
R
Roland Dreier 已提交
560 561 562 563 564
		/*
		 * WQE addr == base - 1 might be reported in receive completion
		 * with error instead of (rq size - 1) by Sinai FW 1.0.800 and
		 * Arbel FW 5.1.400.  This bug should be fixed in later FW revs.
		 */
565 566
		if (unlikely(wqe_index < 0))
			wqe_index = wq->max - 1;
L
Linus Torvalds 已提交
567 568 569
		entry->wr_id = (*cur_qp)->wrid[wqe_index];
	}

570 571 572 573 574
	if (wq) {
		if (wq->last_comp < wqe_index)
			wq->tail += wqe_index - wq->last_comp;
		else
			wq->tail += wqe_index + wq->max - wq->last_comp;
L
Linus Torvalds 已提交
575

576 577
		wq->last_comp = wqe_index;
	}
L
Linus Torvalds 已提交
578 579

	if (is_error) {
580 581 582
		handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send,
				 (struct mthca_err_cqe *) cqe,
				 entry, &free_cqe);
L
Linus Torvalds 已提交
583 584 585 586
		goto out;
	}

	if (is_send) {
587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608
		entry->wc_flags = 0;
		switch (cqe->opcode) {
		case MTHCA_OPCODE_RDMA_WRITE:
			entry->opcode    = IB_WC_RDMA_WRITE;
			break;
		case MTHCA_OPCODE_RDMA_WRITE_IMM:
			entry->opcode    = IB_WC_RDMA_WRITE;
			entry->wc_flags |= IB_WC_WITH_IMM;
			break;
		case MTHCA_OPCODE_SEND:
			entry->opcode    = IB_WC_SEND;
			break;
		case MTHCA_OPCODE_SEND_IMM:
			entry->opcode    = IB_WC_SEND;
			entry->wc_flags |= IB_WC_WITH_IMM;
			break;
		case MTHCA_OPCODE_RDMA_READ:
			entry->opcode    = IB_WC_RDMA_READ;
			entry->byte_len  = be32_to_cpu(cqe->byte_cnt);
			break;
		case MTHCA_OPCODE_ATOMIC_CS:
			entry->opcode    = IB_WC_COMP_SWAP;
609
			entry->byte_len  = MTHCA_ATOMIC_BYTE_LEN;
610 611 612
			break;
		case MTHCA_OPCODE_ATOMIC_FA:
			entry->opcode    = IB_WC_FETCH_ADD;
613
			entry->byte_len  = MTHCA_ATOMIC_BYTE_LEN;
614 615 616 617 618 619 620 621
			break;
		case MTHCA_OPCODE_BIND_MW:
			entry->opcode    = IB_WC_BIND_MW;
			break;
		default:
			entry->opcode    = MTHCA_OPCODE_INVALID;
			break;
		}
L
Linus Torvalds 已提交
622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675
	} else {
		entry->byte_len = be32_to_cpu(cqe->byte_cnt);
		switch (cqe->opcode & 0x1f) {
		case IB_OPCODE_SEND_LAST_WITH_IMMEDIATE:
		case IB_OPCODE_SEND_ONLY_WITH_IMMEDIATE:
			entry->wc_flags = IB_WC_WITH_IMM;
			entry->imm_data = cqe->imm_etype_pkey_eec;
			entry->opcode = IB_WC_RECV;
			break;
		case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:
		case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:
			entry->wc_flags = IB_WC_WITH_IMM;
			entry->imm_data = cqe->imm_etype_pkey_eec;
			entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
			break;
		default:
			entry->wc_flags = 0;
			entry->opcode = IB_WC_RECV;
			break;
		}
		entry->slid 	   = be16_to_cpu(cqe->rlid);
		entry->sl   	   = be16_to_cpu(cqe->sl_g_mlpath) >> 12;
		entry->src_qp 	   = be32_to_cpu(cqe->rqpn) & 0xffffff;
		entry->dlid_path_bits = be16_to_cpu(cqe->sl_g_mlpath) & 0x7f;
		entry->pkey_index  = be32_to_cpu(cqe->imm_etype_pkey_eec) >> 16;
		entry->wc_flags   |= be16_to_cpu(cqe->sl_g_mlpath) & 0x80 ?
					IB_WC_GRH : 0;
	}

	entry->status = IB_WC_SUCCESS;

 out:
	if (likely(free_cqe)) {
		set_cqe_hw(cqe);
		++(*freed);
		++cq->cons_index;
	}

	return err;
}

int mthca_poll_cq(struct ib_cq *ibcq, int num_entries,
		  struct ib_wc *entry)
{
	struct mthca_dev *dev = to_mdev(ibcq->device);
	struct mthca_cq *cq = to_mcq(ibcq);
	struct mthca_qp *qp = NULL;
	unsigned long flags;
	int err = 0;
	int freed = 0;
	int npolled;

	spin_lock_irqsave(&cq->lock, flags);

676 677 678
	npolled = 0;
repoll:
	while (npolled < num_entries) {
L
Linus Torvalds 已提交
679 680 681 682
		err = mthca_poll_one(dev, cq, &qp,
				     &freed, entry + npolled);
		if (err)
			break;
683
		++npolled;
L
Linus Torvalds 已提交
684 685 686 687 688 689 690
	}

	if (freed) {
		wmb();
		update_cons_index(dev, cq, freed);
	}

691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726
	/*
	 * If a CQ resize is in progress and we discovered that the
	 * old buffer is empty, then peek in the new buffer, and if
	 * it's not empty, switch to the new buffer and continue
	 * polling there.
	 */
	if (unlikely(err == -EAGAIN && cq->resize_buf &&
		     cq->resize_buf->state == CQ_RESIZE_READY)) {
		/*
		 * In Tavor mode, the hardware keeps the producer
		 * index modulo the CQ size.  Since we might be making
		 * the CQ bigger, we need to mask our consumer index
		 * using the size of the old CQ buffer before looking
		 * in the new CQ buffer.
		 */
		if (!mthca_is_memfree(dev))
			cq->cons_index &= cq->ibcq.cqe;

		if (cqe_sw(get_cqe_from_buf(&cq->resize_buf->buf,
					    cq->cons_index & cq->resize_buf->cqe))) {
			struct mthca_cq_buf tbuf;
			int tcqe;

			tbuf         = cq->buf;
			tcqe         = cq->ibcq.cqe;
			cq->buf      = cq->resize_buf->buf;
			cq->ibcq.cqe = cq->resize_buf->cqe;

			cq->resize_buf->buf   = tbuf;
			cq->resize_buf->cqe   = tcqe;
			cq->resize_buf->state = CQ_RESIZE_SWAPPED;

			goto repoll;
		}
	}

L
Linus Torvalds 已提交
727 728 729 730 731
	spin_unlock_irqrestore(&cq->lock, flags);

	return err == 0 || err == -EAGAIN ? npolled : err;
}

732
int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags)
L
Linus Torvalds 已提交
733
{
734
	__be32 doorbell[2];
L
Linus Torvalds 已提交
735

736 737
	doorbell[0] = cpu_to_be32(((flags & IB_CQ_SOLICITED_MASK) ==
				   IB_CQ_SOLICITED ?
L
Linus Torvalds 已提交
738 739 740
				   MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL :
				   MTHCA_TAVOR_CQ_DB_REQ_NOT)      |
				  to_mcq(cq)->cqn);
741
	doorbell[1] = (__force __be32) 0xffffffff;
L
Linus Torvalds 已提交
742 743 744 745 746 747 748 749

	mthca_write64(doorbell,
		      to_mdev(cq->device)->kar + MTHCA_CQ_DOORBELL,
		      MTHCA_GET_DOORBELL_LOCK(&to_mdev(cq->device)->doorbell_lock));

	return 0;
}

750
int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
L
Linus Torvalds 已提交
751 752
{
	struct mthca_cq *cq = to_mcq(ibcq);
753
	__be32 doorbell[2];
L
Linus Torvalds 已提交
754
	u32 sn;
755
	__be32 ci;
L
Linus Torvalds 已提交
756 757 758 759 760 761

	sn = cq->arm_sn & 3;
	ci = cpu_to_be32(cq->cons_index);

	doorbell[0] = ci;
	doorbell[1] = cpu_to_be32((cq->cqn << 8) | (2 << 5) | (sn << 3) |
762 763
				  ((flags & IB_CQ_SOLICITED_MASK) ==
				   IB_CQ_SOLICITED ? 1 : 2));
L
Linus Torvalds 已提交
764 765 766 767 768 769 770 771 772 773

	mthca_write_db_rec(doorbell, cq->arm_db);

	/*
	 * Make sure that the doorbell record in host memory is
	 * written before ringing the doorbell via PCI MMIO.
	 */
	wmb();

	doorbell[0] = cpu_to_be32((sn << 28)                       |
774
				  ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
L
Linus Torvalds 已提交
775 776 777 778 779 780 781 782 783 784 785 786 787
				   MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL :
				   MTHCA_ARBEL_CQ_DB_REQ_NOT)      |
				  cq->cqn);
	doorbell[1] = ci;

	mthca_write64(doorbell,
		      to_mdev(ibcq->device)->kar + MTHCA_CQ_DOORBELL,
		      MTHCA_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->doorbell_lock));

	return 0;
}

int mthca_init_cq(struct mthca_dev *dev, int nent,
788
		  struct mthca_ucontext *ctx, u32 pdn,
L
Linus Torvalds 已提交
789 790
		  struct mthca_cq *cq)
{
791
	struct mthca_mailbox *mailbox;
L
Linus Torvalds 已提交
792 793 794 795
	struct mthca_cq_context *cq_context;
	int err = -ENOMEM;
	u8 status;

796 797
	cq->ibcq.cqe  = nent - 1;
	cq->is_kernel = !ctx;
L
Linus Torvalds 已提交
798 799 800 801 802

	cq->cqn = mthca_alloc(&dev->cq_table.alloc);
	if (cq->cqn == -1)
		return -ENOMEM;

803
	if (mthca_is_memfree(dev)) {
L
Linus Torvalds 已提交
804 805 806 807
		err = mthca_table_get(dev, dev->cq_table.table, cq->cqn);
		if (err)
			goto err_out;

808 809 810 811
		if (cq->is_kernel) {
			cq->arm_sn = 1;

			err = -ENOMEM;
L
Linus Torvalds 已提交
812

813 814 815 816
			cq->set_ci_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_SET_CI,
							     cq->cqn, &cq->set_ci_db);
			if (cq->set_ci_db_index < 0)
				goto err_out_icm;
L
Linus Torvalds 已提交
817

818 819 820 821 822
			cq->arm_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_ARM,
							  cq->cqn, &cq->arm_db);
			if (cq->arm_db_index < 0)
				goto err_out_ci;
		}
L
Linus Torvalds 已提交
823 824
	}

825 826 827
	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
	if (IS_ERR(mailbox))
		goto err_out_arm;
L
Linus Torvalds 已提交
828

829
	cq_context = mailbox->buf;
L
Linus Torvalds 已提交
830

831
	if (cq->is_kernel) {
832
		err = mthca_alloc_cq_buf(dev, &cq->buf, nent);
833 834 835
		if (err)
			goto err_out_mailbox;
	}
L
Linus Torvalds 已提交
836 837

	spin_lock_init(&cq->lock);
838
	cq->refcount = 1;
L
Linus Torvalds 已提交
839
	init_waitqueue_head(&cq->wait);
840
	mutex_init(&cq->mutex);
L
Linus Torvalds 已提交
841 842 843 844 845

	memset(cq_context, 0, sizeof *cq_context);
	cq_context->flags           = cpu_to_be32(MTHCA_CQ_STATUS_OK      |
						  MTHCA_CQ_STATE_DISARMED |
						  MTHCA_CQ_FLAG_TR);
846 847 848 849 850
	cq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24);
	if (ctx)
		cq_context->logsize_usrpage |= cpu_to_be32(ctx->uar.index);
	else
		cq_context->logsize_usrpage |= cpu_to_be32(dev->driver_uar.index);
L
Linus Torvalds 已提交
851 852
	cq_context->error_eqn       = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn);
	cq_context->comp_eqn        = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_COMP].eqn);
853
	cq_context->pd              = cpu_to_be32(pdn);
854
	cq_context->lkey            = cpu_to_be32(cq->buf.mr.ibmr.lkey);
L
Linus Torvalds 已提交
855 856
	cq_context->cqn             = cpu_to_be32(cq->cqn);

857
	if (mthca_is_memfree(dev)) {
L
Linus Torvalds 已提交
858 859 860 861
		cq_context->ci_db    = cpu_to_be32(cq->set_ci_db_index);
		cq_context->state_db = cpu_to_be32(cq->arm_db_index);
	}

862
	err = mthca_SW2HW_CQ(dev, mailbox, cq->cqn, &status);
L
Linus Torvalds 已提交
863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885
	if (err) {
		mthca_warn(dev, "SW2HW_CQ failed (%d)\n", err);
		goto err_out_free_mr;
	}

	if (status) {
		mthca_warn(dev, "SW2HW_CQ returned status 0x%02x\n",
			   status);
		err = -EINVAL;
		goto err_out_free_mr;
	}

	spin_lock_irq(&dev->cq_table.lock);
	if (mthca_array_set(&dev->cq_table.cq,
			    cq->cqn & (dev->limits.num_cqs - 1),
			    cq)) {
		spin_unlock_irq(&dev->cq_table.lock);
		goto err_out_free_mr;
	}
	spin_unlock_irq(&dev->cq_table.lock);

	cq->cons_index = 0;

886
	mthca_free_mailbox(dev, mailbox);
L
Linus Torvalds 已提交
887 888 889 890

	return 0;

err_out_free_mr:
891
	if (cq->is_kernel)
892
		mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
L
Linus Torvalds 已提交
893 894

err_out_mailbox:
895
	mthca_free_mailbox(dev, mailbox);
L
Linus Torvalds 已提交
896

897
err_out_arm:
898
	if (cq->is_kernel && mthca_is_memfree(dev))
899
		mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index);
L
Linus Torvalds 已提交
900 901

err_out_ci:
902
	if (cq->is_kernel && mthca_is_memfree(dev))
903
		mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index);
L
Linus Torvalds 已提交
904 905 906 907 908 909 910 911 912 913

err_out_icm:
	mthca_table_put(dev, dev->cq_table.table, cq->cqn);

err_out:
	mthca_free(&dev->cq_table.alloc, cq->cqn);

	return err;
}

914 915 916 917 918 919 920 921 922 923 924
static inline int get_cq_refcount(struct mthca_dev *dev, struct mthca_cq *cq)
{
	int c;

	spin_lock_irq(&dev->cq_table.lock);
	c = cq->refcount;
	spin_unlock_irq(&dev->cq_table.lock);

	return c;
}

L
Linus Torvalds 已提交
925 926 927
void mthca_free_cq(struct mthca_dev *dev,
		   struct mthca_cq *cq)
{
928
	struct mthca_mailbox *mailbox;
L
Linus Torvalds 已提交
929 930 931
	int err;
	u8 status;

932 933
	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
	if (IS_ERR(mailbox)) {
L
Linus Torvalds 已提交
934 935 936 937
		mthca_warn(dev, "No memory for mailbox to free CQ.\n");
		return;
	}

938
	err = mthca_HW2SW_CQ(dev, mailbox, cq->cqn, &status);
L
Linus Torvalds 已提交
939 940 941
	if (err)
		mthca_warn(dev, "HW2SW_CQ failed (%d)\n", err);
	else if (status)
942
		mthca_warn(dev, "HW2SW_CQ returned status 0x%02x\n", status);
L
Linus Torvalds 已提交
943 944

	if (0) {
945
		__be32 *ctx = mailbox->buf;
L
Linus Torvalds 已提交
946 947 948
		int j;

		printk(KERN_ERR "context for CQN %x (cons index %x, next sw %d)\n",
949 950
		       cq->cqn, cq->cons_index,
		       cq->is_kernel ? !!next_cqe_sw(cq) : 0);
L
Linus Torvalds 已提交
951 952 953 954 955 956 957
		for (j = 0; j < 16; ++j)
			printk(KERN_ERR "[%2x] %08x\n", j * 4, be32_to_cpu(ctx[j]));
	}

	spin_lock_irq(&dev->cq_table.lock);
	mthca_array_clear(&dev->cq_table.cq,
			  cq->cqn & (dev->limits.num_cqs - 1));
958
	--cq->refcount;
L
Linus Torvalds 已提交
959 960 961 962 963 964 965
	spin_unlock_irq(&dev->cq_table.lock);

	if (dev->mthca_flags & MTHCA_FLAG_MSI_X)
		synchronize_irq(dev->eq_table.eq[MTHCA_EQ_COMP].msi_x_vector);
	else
		synchronize_irq(dev->pdev->irq);

966
	wait_event(cq->wait, !get_cq_refcount(dev, cq));
L
Linus Torvalds 已提交
967

968
	if (cq->is_kernel) {
969
		mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
970 971 972 973
		if (mthca_is_memfree(dev)) {
			mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM,    cq->arm_db_index);
			mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index);
		}
L
Linus Torvalds 已提交
974 975
	}

976
	mthca_table_put(dev, dev->cq_table.table, cq->cqn);
L
Linus Torvalds 已提交
977
	mthca_free(&dev->cq_table.alloc, cq->cqn);
978
	mthca_free_mailbox(dev, mailbox);
L
Linus Torvalds 已提交
979 980
}

R
Roland Dreier 已提交
981
int mthca_init_cq_table(struct mthca_dev *dev)
L
Linus Torvalds 已提交
982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001
{
	int err;

	spin_lock_init(&dev->cq_table.lock);

	err = mthca_alloc_init(&dev->cq_table.alloc,
			       dev->limits.num_cqs,
			       (1 << 24) - 1,
			       dev->limits.reserved_cqs);
	if (err)
		return err;

	err = mthca_array_init(&dev->cq_table.cq,
			       dev->limits.num_cqs);
	if (err)
		mthca_alloc_cleanup(&dev->cq_table.alloc);

	return err;
}

1002
void mthca_cleanup_cq_table(struct mthca_dev *dev)
L
Linus Torvalds 已提交
1003 1004 1005 1006
{
	mthca_array_cleanup(&dev->cq_table.cq, dev->limits.num_cqs);
	mthca_alloc_cleanup(&dev->cq_table.alloc);
}