mthca_cq.c 25.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3
 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4
 * Copyright (c) 2005, 2006 Cisco Systems, Inc. All rights reserved.
5 6
 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
L
Linus Torvalds 已提交
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 *
 * $Id: mthca_cq.c 1369 2004-12-20 16:17:07Z roland $
 */

#include <linux/hardirq.h>

41 42
#include <asm/io.h>

43
#include <rdma/ib_pack.h>
L
Linus Torvalds 已提交
44 45 46 47 48 49 50 51 52 53 54 55 56

#include "mthca_dev.h"
#include "mthca_cmd.h"
#include "mthca_memfree.h"

enum {
	MTHCA_MAX_DIRECT_CQ_SIZE = 4 * PAGE_SIZE
};

enum {
	MTHCA_CQ_ENTRY_SIZE = 0x20
};

57 58 59 60
enum {
	MTHCA_ATOMIC_BYTE_LEN = 8
};

L
Linus Torvalds 已提交
61 62 63 64
/*
 * Must be packed because start is 64 bits but only aligned to 32 bits.
 */
struct mthca_cq_context {
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
	__be32 flags;
	__be64 start;
	__be32 logsize_usrpage;
	__be32 error_eqn;	/* Tavor only */
	__be32 comp_eqn;
	__be32 pd;
	__be32 lkey;
	__be32 last_notified_index;
	__be32 solicit_producer_index;
	__be32 consumer_index;
	__be32 producer_index;
	__be32 cqn;
	__be32 ci_db;		/* Arbel only */
	__be32 state_db;	/* Arbel only */
	u32    reserved;
L
Linus Torvalds 已提交
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
} __attribute__((packed));

#define MTHCA_CQ_STATUS_OK          ( 0 << 28)
#define MTHCA_CQ_STATUS_OVERFLOW    ( 9 << 28)
#define MTHCA_CQ_STATUS_WRITE_FAIL  (10 << 28)
#define MTHCA_CQ_FLAG_TR            ( 1 << 18)
#define MTHCA_CQ_FLAG_OI            ( 1 << 17)
#define MTHCA_CQ_STATE_DISARMED     ( 0 <<  8)
#define MTHCA_CQ_STATE_ARMED        ( 1 <<  8)
#define MTHCA_CQ_STATE_ARMED_SOL    ( 4 <<  8)
#define MTHCA_EQ_STATE_FIRED        (10 <<  8)

enum {
	MTHCA_ERROR_CQE_OPCODE_MASK = 0xfe
};

enum {
	SYNDROME_LOCAL_LENGTH_ERR 	 = 0x01,
	SYNDROME_LOCAL_QP_OP_ERR  	 = 0x02,
	SYNDROME_LOCAL_EEC_OP_ERR 	 = 0x03,
	SYNDROME_LOCAL_PROT_ERR   	 = 0x04,
	SYNDROME_WR_FLUSH_ERR     	 = 0x05,
	SYNDROME_MW_BIND_ERR      	 = 0x06,
	SYNDROME_BAD_RESP_ERR     	 = 0x10,
	SYNDROME_LOCAL_ACCESS_ERR 	 = 0x11,
	SYNDROME_REMOTE_INVAL_REQ_ERR 	 = 0x12,
	SYNDROME_REMOTE_ACCESS_ERR 	 = 0x13,
	SYNDROME_REMOTE_OP_ERR     	 = 0x14,
	SYNDROME_RETRY_EXC_ERR 		 = 0x15,
	SYNDROME_RNR_RETRY_EXC_ERR 	 = 0x16,
	SYNDROME_LOCAL_RDD_VIOL_ERR 	 = 0x20,
	SYNDROME_REMOTE_INVAL_RD_REQ_ERR = 0x21,
	SYNDROME_REMOTE_ABORTED_ERR 	 = 0x22,
	SYNDROME_INVAL_EECN_ERR 	 = 0x23,
	SYNDROME_INVAL_EEC_STATE_ERR 	 = 0x24
};

struct mthca_cqe {
118 119 120 121 122 123 124 125 126 127 128 129
	__be32 my_qpn;
	__be32 my_ee;
	__be32 rqpn;
	__be16 sl_g_mlpath;
	__be16 rlid;
	__be32 imm_etype_pkey_eec;
	__be32 byte_cnt;
	__be32 wqe;
	u8     opcode;
	u8     is_send;
	u8     reserved;
	u8     owner;
L
Linus Torvalds 已提交
130 131 132
};

struct mthca_err_cqe {
133 134 135
	__be32 my_qpn;
	u32    reserved1[3];
	u8     syndrome;
136
	u8     vendor_err;
137
	__be16 db_cnt;
138
	u32    reserved2;
139 140
	__be32 wqe;
	u8     opcode;
141
	u8     reserved3[2];
142
	u8     owner;
L
Linus Torvalds 已提交
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
};

#define MTHCA_CQ_ENTRY_OWNER_SW      (0 << 7)
#define MTHCA_CQ_ENTRY_OWNER_HW      (1 << 7)

#define MTHCA_TAVOR_CQ_DB_INC_CI       (1 << 24)
#define MTHCA_TAVOR_CQ_DB_REQ_NOT      (2 << 24)
#define MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL  (3 << 24)
#define MTHCA_TAVOR_CQ_DB_SET_CI       (4 << 24)
#define MTHCA_TAVOR_CQ_DB_REQ_NOT_MULT (5 << 24)

#define MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL  (1 << 24)
#define MTHCA_ARBEL_CQ_DB_REQ_NOT      (2 << 24)
#define MTHCA_ARBEL_CQ_DB_REQ_NOT_MULT (3 << 24)

158 159
static inline struct mthca_cqe *get_cqe_from_buf(struct mthca_cq_buf *buf,
						 int entry)
L
Linus Torvalds 已提交
160
{
161 162
	if (buf->is_direct)
		return buf->queue.direct.buf + (entry * MTHCA_CQ_ENTRY_SIZE);
L
Linus Torvalds 已提交
163
	else
164
		return buf->queue.page_list[entry * MTHCA_CQ_ENTRY_SIZE / PAGE_SIZE].buf
L
Linus Torvalds 已提交
165 166 167
			+ (entry * MTHCA_CQ_ENTRY_SIZE) % PAGE_SIZE;
}

168 169 170 171 172 173
static inline struct mthca_cqe *get_cqe(struct mthca_cq *cq, int entry)
{
	return get_cqe_from_buf(&cq->buf, entry);
}

static inline struct mthca_cqe *cqe_sw(struct mthca_cqe *cqe)
L
Linus Torvalds 已提交
174 175 176 177 178 179
{
	return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe;
}

static inline struct mthca_cqe *next_cqe_sw(struct mthca_cq *cq)
{
180
	return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe));
L
Linus Torvalds 已提交
181 182 183 184 185 186 187
}

static inline void set_cqe_hw(struct mthca_cqe *cqe)
{
	cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW;
}

188 189 190 191 192 193 194 195 196 197 198
static void dump_cqe(struct mthca_dev *dev, void *cqe_ptr)
{
	__be32 *cqe = cqe_ptr;

	(void) cqe;	/* avoid warning if mthca_dbg compiled away... */
	mthca_dbg(dev, "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
		  be32_to_cpu(cqe[0]), be32_to_cpu(cqe[1]), be32_to_cpu(cqe[2]),
		  be32_to_cpu(cqe[3]), be32_to_cpu(cqe[4]), be32_to_cpu(cqe[5]),
		  be32_to_cpu(cqe[6]), be32_to_cpu(cqe[7]));
}

L
Linus Torvalds 已提交
199 200 201 202 203 204 205
/*
 * incr is ignored in native Arbel (mem-free) mode, so cq->cons_index
 * should be correct before calling update_cons_index().
 */
static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq,
				     int incr)
{
206
	__be32 doorbell[2];
L
Linus Torvalds 已提交
207

208
	if (mthca_is_memfree(dev)) {
L
Linus Torvalds 已提交
209 210 211 212 213 214 215 216 217
		*cq->set_ci_db = cpu_to_be32(cq->cons_index);
		wmb();
	} else {
		doorbell[0] = cpu_to_be32(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn);
		doorbell[1] = cpu_to_be32(incr - 1);

		mthca_write64(doorbell,
			      dev->kar + MTHCA_CQ_DOORBELL,
			      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
218 219 220 221 222
		/*
		 * Make sure doorbells don't leak out of CQ spinlock
		 * and reach the HCA out of order:
		 */
		mmiowb();
L
Linus Torvalds 已提交
223 224 225
	}
}

226
void mthca_cq_completion(struct mthca_dev *dev, u32 cqn)
L
Linus Torvalds 已提交
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
{
	struct mthca_cq *cq;

	cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));

	if (!cq) {
		mthca_warn(dev, "Completion event for bogus CQ %08x\n", cqn);
		return;
	}

	++cq->arm_sn;

	cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
}

242 243 244 245 246 247 248 249 250 251
void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
		    enum ib_event_type event_type)
{
	struct mthca_cq *cq;
	struct ib_event event;

	spin_lock(&dev->cq_table.lock);

	cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
	if (cq)
252 253
		++cq->refcount;

254 255 256 257 258 259 260 261 262 263 264 265 266
	spin_unlock(&dev->cq_table.lock);

	if (!cq) {
		mthca_warn(dev, "Async event for bogus CQ %08x\n", cqn);
		return;
	}

	event.device      = &dev->ib_dev;
	event.event       = event_type;
	event.element.cq  = &cq->ibcq;
	if (cq->ibcq.event_handler)
		cq->ibcq.event_handler(&event, cq->ibcq.cq_context);

267 268
	spin_lock(&dev->cq_table.lock);
	if (!--cq->refcount)
269
		wake_up(&cq->wait);
270
	spin_unlock(&dev->cq_table.lock);
271 272
}

273 274 275 276 277 278 279 280 281
static inline int is_recv_cqe(struct mthca_cqe *cqe)
{
	if ((cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) ==
	    MTHCA_ERROR_CQE_OPCODE_MASK)
		return !(cqe->opcode & 0x01);
	else
		return !(cqe->is_send & 0x80);
}

282
void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn,
283
		    struct mthca_srq *srq)
L
Linus Torvalds 已提交
284 285
{
	struct mthca_cqe *cqe;
286
	u32 prod_index;
L
Linus Torvalds 已提交
287 288 289 290 291 292 293 294 295 296 297 298
	int nfreed = 0;

	spin_lock_irq(&cq->lock);

	/*
	 * First we need to find the current producer index, so we
	 * know where to start cleaning from.  It doesn't matter if HW
	 * adds new entries after this loop -- the QP we're worried
	 * about is already in RESET, so the new entries won't come
	 * from our QP and therefore don't need to be checked.
	 */
	for (prod_index = cq->cons_index;
299
	     cqe_sw(get_cqe(cq, prod_index & cq->ibcq.cqe));
L
Linus Torvalds 已提交
300 301 302 303 304 305
	     ++prod_index)
		if (prod_index == cq->cons_index + cq->ibcq.cqe)
			break;

	if (0)
		mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n",
306
			  qpn, cq->cqn, cq->cons_index, prod_index);
L
Linus Torvalds 已提交
307 308 309 310 311

	/*
	 * Now sweep backwards through the CQ, removing CQ entries
	 * that match our QP by copying older entries on top of them.
	 */
312 313
	while ((int) --prod_index - (int) cq->cons_index >= 0) {
		cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
314
		if (cqe->my_qpn == cpu_to_be32(qpn)) {
315
			if (srq && is_recv_cqe(cqe))
316
				mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe));
L
Linus Torvalds 已提交
317
			++nfreed;
318 319 320
		} else if (nfreed)
			memcpy(get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe),
			       cqe, MTHCA_CQ_ENTRY_SIZE);
L
Linus Torvalds 已提交
321 322 323 324 325 326 327 328 329 330 331
	}

	if (nfreed) {
		wmb();
		cq->cons_index += nfreed;
		update_cons_index(dev, cq, nfreed);
	}

	spin_unlock_irq(&cq->lock);
}

332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378
void mthca_cq_resize_copy_cqes(struct mthca_cq *cq)
{
	int i;

	/*
	 * In Tavor mode, the hardware keeps the consumer and producer
	 * indices mod the CQ size.  Since we might be making the CQ
	 * bigger, we need to deal with the case where the producer
	 * index wrapped around before the CQ was resized.
	 */
	if (!mthca_is_memfree(to_mdev(cq->ibcq.device)) &&
	    cq->ibcq.cqe < cq->resize_buf->cqe) {
		cq->cons_index &= cq->ibcq.cqe;
		if (cqe_sw(get_cqe(cq, cq->ibcq.cqe)))
			cq->cons_index -= cq->ibcq.cqe + 1;
	}

	for (i = cq->cons_index; cqe_sw(get_cqe(cq, i & cq->ibcq.cqe)); ++i)
		memcpy(get_cqe_from_buf(&cq->resize_buf->buf,
					i & cq->resize_buf->cqe),
		       get_cqe(cq, i & cq->ibcq.cqe), MTHCA_CQ_ENTRY_SIZE);
}

int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent)
{
	int ret;
	int i;

	ret = mthca_buf_alloc(dev, nent * MTHCA_CQ_ENTRY_SIZE,
			      MTHCA_MAX_DIRECT_CQ_SIZE,
			      &buf->queue, &buf->is_direct,
			      &dev->driver_pd, 1, &buf->mr);
	if (ret)
		return ret;

	for (i = 0; i < nent; ++i)
		set_cqe_hw(get_cqe_from_buf(buf, i));

	return 0;
}

void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int cqe)
{
	mthca_buf_free(dev, (cqe + 1) * MTHCA_CQ_ENTRY_SIZE, &buf->queue,
		       buf->is_direct, &buf->mr);
}

379 380 381 382
static void handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
			     struct mthca_qp *qp, int wqe_index, int is_send,
			     struct mthca_err_cqe *cqe,
			     struct ib_wc *entry, int *free_cqe)
L
Linus Torvalds 已提交
383 384
{
	int dbd;
385
	__be32 new_wqe;
L
Linus Torvalds 已提交
386

387 388 389 390 391 392
	if (cqe->syndrome == SYNDROME_LOCAL_QP_OP_ERR) {
		mthca_dbg(dev, "local QP operation err "
			  "(QPN %06x, WQE @ %08x, CQN %06x, index %d)\n",
			  be32_to_cpu(cqe->my_qpn), be32_to_cpu(cqe->wqe),
			  cq->cqn, cq->cons_index);
		dump_cqe(dev, cqe);
L
Linus Torvalds 已提交
393 394 395
	}

	/*
396 397
	 * For completions in error, only work request ID, status, vendor error
	 * (and freed resource count for RD) have to be set.
L
Linus Torvalds 已提交
398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458
	 */
	switch (cqe->syndrome) {
	case SYNDROME_LOCAL_LENGTH_ERR:
		entry->status = IB_WC_LOC_LEN_ERR;
		break;
	case SYNDROME_LOCAL_QP_OP_ERR:
		entry->status = IB_WC_LOC_QP_OP_ERR;
		break;
	case SYNDROME_LOCAL_EEC_OP_ERR:
		entry->status = IB_WC_LOC_EEC_OP_ERR;
		break;
	case SYNDROME_LOCAL_PROT_ERR:
		entry->status = IB_WC_LOC_PROT_ERR;
		break;
	case SYNDROME_WR_FLUSH_ERR:
		entry->status = IB_WC_WR_FLUSH_ERR;
		break;
	case SYNDROME_MW_BIND_ERR:
		entry->status = IB_WC_MW_BIND_ERR;
		break;
	case SYNDROME_BAD_RESP_ERR:
		entry->status = IB_WC_BAD_RESP_ERR;
		break;
	case SYNDROME_LOCAL_ACCESS_ERR:
		entry->status = IB_WC_LOC_ACCESS_ERR;
		break;
	case SYNDROME_REMOTE_INVAL_REQ_ERR:
		entry->status = IB_WC_REM_INV_REQ_ERR;
		break;
	case SYNDROME_REMOTE_ACCESS_ERR:
		entry->status = IB_WC_REM_ACCESS_ERR;
		break;
	case SYNDROME_REMOTE_OP_ERR:
		entry->status = IB_WC_REM_OP_ERR;
		break;
	case SYNDROME_RETRY_EXC_ERR:
		entry->status = IB_WC_RETRY_EXC_ERR;
		break;
	case SYNDROME_RNR_RETRY_EXC_ERR:
		entry->status = IB_WC_RNR_RETRY_EXC_ERR;
		break;
	case SYNDROME_LOCAL_RDD_VIOL_ERR:
		entry->status = IB_WC_LOC_RDD_VIOL_ERR;
		break;
	case SYNDROME_REMOTE_INVAL_RD_REQ_ERR:
		entry->status = IB_WC_REM_INV_RD_REQ_ERR;
		break;
	case SYNDROME_REMOTE_ABORTED_ERR:
		entry->status = IB_WC_REM_ABORT_ERR;
		break;
	case SYNDROME_INVAL_EECN_ERR:
		entry->status = IB_WC_INV_EECN_ERR;
		break;
	case SYNDROME_INVAL_EEC_STATE_ERR:
		entry->status = IB_WC_INV_EEC_STATE_ERR;
		break;
	default:
		entry->status = IB_WC_GENERAL_ERR;
		break;
	}

459 460
	entry->vendor_err = cqe->vendor_err;

461 462 463 464 465
	/*
	 * Mem-free HCAs always generate one CQE per WQE, even in the
	 * error case, so we don't have to check the doorbell count, etc.
	 */
	if (mthca_is_memfree(dev))
466
		return;
467

468
	mthca_free_err_wqe(dev, qp, is_send, wqe_index, &dbd, &new_wqe);
L
Linus Torvalds 已提交
469 470 471 472 473 474

	/*
	 * If we're at the end of the WQE chain, or we've used up our
	 * doorbell count, free the CQE.  Otherwise just update it for
	 * the next poll operation.
	 */
475
	if (!(new_wqe & cpu_to_be32(0x3f)) || (!cqe->db_cnt && dbd))
476
		return;
L
Linus Torvalds 已提交
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512

	cqe->db_cnt   = cpu_to_be16(be16_to_cpu(cqe->db_cnt) - dbd);
	cqe->wqe      = new_wqe;
	cqe->syndrome = SYNDROME_WR_FLUSH_ERR;

	*free_cqe = 0;
}

static inline int mthca_poll_one(struct mthca_dev *dev,
				 struct mthca_cq *cq,
				 struct mthca_qp **cur_qp,
				 int *freed,
				 struct ib_wc *entry)
{
	struct mthca_wq *wq;
	struct mthca_cqe *cqe;
	int wqe_index;
	int is_error;
	int is_send;
	int free_cqe = 1;
	int err = 0;

	cqe = next_cqe_sw(cq);
	if (!cqe)
		return -EAGAIN;

	/*
	 * Make sure we read CQ entry contents after we've checked the
	 * ownership bit.
	 */
	rmb();

	if (0) {
		mthca_dbg(dev, "%x/%d: CQE -> QPN %06x, WQE @ %08x\n",
			  cq->cqn, cq->cons_index, be32_to_cpu(cqe->my_qpn),
			  be32_to_cpu(cqe->wqe));
513
		dump_cqe(dev, cqe);
L
Linus Torvalds 已提交
514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536
	}

	is_error = (cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) ==
		MTHCA_ERROR_CQE_OPCODE_MASK;
	is_send  = is_error ? cqe->opcode & 0x01 : cqe->is_send & 0x80;

	if (!*cur_qp || be32_to_cpu(cqe->my_qpn) != (*cur_qp)->qpn) {
		/*
		 * We do not have to take the QP table lock here,
		 * because CQs will be locked while QPs are removed
		 * from the table.
		 */
		*cur_qp = mthca_array_get(&dev->qp_table.qp,
					  be32_to_cpu(cqe->my_qpn) &
					  (dev->limits.num_qps - 1));
		if (!*cur_qp) {
			mthca_warn(dev, "CQ entry for unknown QP %06x\n",
				   be32_to_cpu(cqe->my_qpn) & 0xffffff);
			err = -EINVAL;
			goto out;
		}
	}

537
	entry->qp = &(*cur_qp)->ibqp;
L
Linus Torvalds 已提交
538 539 540 541 542 543 544

	if (is_send) {
		wq = &(*cur_qp)->sq;
		wqe_index = ((be32_to_cpu(cqe->wqe) - (*cur_qp)->send_wqe_offset)
			     >> wq->wqe_shift);
		entry->wr_id = (*cur_qp)->wrid[wqe_index +
					       (*cur_qp)->rq.max];
545 546 547 548 549 550 551
	} else if ((*cur_qp)->ibqp.srq) {
		struct mthca_srq *srq = to_msrq((*cur_qp)->ibqp.srq);
		u32 wqe = be32_to_cpu(cqe->wqe);
		wq = NULL;
		wqe_index = wqe >> srq->wqe_shift;
		entry->wr_id = srq->wrid[wqe_index];
		mthca_free_srq_wqe(srq, wqe);
L
Linus Torvalds 已提交
552
	} else {
553
		s32 wqe;
L
Linus Torvalds 已提交
554
		wq = &(*cur_qp)->rq;
555 556
		wqe = be32_to_cpu(cqe->wqe);
		wqe_index = wqe >> wq->wqe_shift;
R
Roland Dreier 已提交
557 558 559 560 561
		/*
		 * WQE addr == base - 1 might be reported in receive completion
		 * with error instead of (rq size - 1) by Sinai FW 1.0.800 and
		 * Arbel FW 5.1.400.  This bug should be fixed in later FW revs.
		 */
562 563
		if (unlikely(wqe_index < 0))
			wqe_index = wq->max - 1;
L
Linus Torvalds 已提交
564 565 566
		entry->wr_id = (*cur_qp)->wrid[wqe_index];
	}

567 568 569 570 571
	if (wq) {
		if (wq->last_comp < wqe_index)
			wq->tail += wqe_index - wq->last_comp;
		else
			wq->tail += wqe_index + wq->max - wq->last_comp;
L
Linus Torvalds 已提交
572

573 574
		wq->last_comp = wqe_index;
	}
L
Linus Torvalds 已提交
575 576

	if (is_error) {
577 578 579
		handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send,
				 (struct mthca_err_cqe *) cqe,
				 entry, &free_cqe);
L
Linus Torvalds 已提交
580 581 582 583
		goto out;
	}

	if (is_send) {
584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
		entry->wc_flags = 0;
		switch (cqe->opcode) {
		case MTHCA_OPCODE_RDMA_WRITE:
			entry->opcode    = IB_WC_RDMA_WRITE;
			break;
		case MTHCA_OPCODE_RDMA_WRITE_IMM:
			entry->opcode    = IB_WC_RDMA_WRITE;
			entry->wc_flags |= IB_WC_WITH_IMM;
			break;
		case MTHCA_OPCODE_SEND:
			entry->opcode    = IB_WC_SEND;
			break;
		case MTHCA_OPCODE_SEND_IMM:
			entry->opcode    = IB_WC_SEND;
			entry->wc_flags |= IB_WC_WITH_IMM;
			break;
		case MTHCA_OPCODE_RDMA_READ:
			entry->opcode    = IB_WC_RDMA_READ;
			entry->byte_len  = be32_to_cpu(cqe->byte_cnt);
			break;
		case MTHCA_OPCODE_ATOMIC_CS:
			entry->opcode    = IB_WC_COMP_SWAP;
606
			entry->byte_len  = MTHCA_ATOMIC_BYTE_LEN;
607 608 609
			break;
		case MTHCA_OPCODE_ATOMIC_FA:
			entry->opcode    = IB_WC_FETCH_ADD;
610
			entry->byte_len  = MTHCA_ATOMIC_BYTE_LEN;
611 612 613 614 615 616 617 618
			break;
		case MTHCA_OPCODE_BIND_MW:
			entry->opcode    = IB_WC_BIND_MW;
			break;
		default:
			entry->opcode    = MTHCA_OPCODE_INVALID;
			break;
		}
L
Linus Torvalds 已提交
619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672
	} else {
		entry->byte_len = be32_to_cpu(cqe->byte_cnt);
		switch (cqe->opcode & 0x1f) {
		case IB_OPCODE_SEND_LAST_WITH_IMMEDIATE:
		case IB_OPCODE_SEND_ONLY_WITH_IMMEDIATE:
			entry->wc_flags = IB_WC_WITH_IMM;
			entry->imm_data = cqe->imm_etype_pkey_eec;
			entry->opcode = IB_WC_RECV;
			break;
		case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:
		case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:
			entry->wc_flags = IB_WC_WITH_IMM;
			entry->imm_data = cqe->imm_etype_pkey_eec;
			entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
			break;
		default:
			entry->wc_flags = 0;
			entry->opcode = IB_WC_RECV;
			break;
		}
		entry->slid 	   = be16_to_cpu(cqe->rlid);
		entry->sl   	   = be16_to_cpu(cqe->sl_g_mlpath) >> 12;
		entry->src_qp 	   = be32_to_cpu(cqe->rqpn) & 0xffffff;
		entry->dlid_path_bits = be16_to_cpu(cqe->sl_g_mlpath) & 0x7f;
		entry->pkey_index  = be32_to_cpu(cqe->imm_etype_pkey_eec) >> 16;
		entry->wc_flags   |= be16_to_cpu(cqe->sl_g_mlpath) & 0x80 ?
					IB_WC_GRH : 0;
	}

	entry->status = IB_WC_SUCCESS;

 out:
	if (likely(free_cqe)) {
		set_cqe_hw(cqe);
		++(*freed);
		++cq->cons_index;
	}

	return err;
}

int mthca_poll_cq(struct ib_cq *ibcq, int num_entries,
		  struct ib_wc *entry)
{
	struct mthca_dev *dev = to_mdev(ibcq->device);
	struct mthca_cq *cq = to_mcq(ibcq);
	struct mthca_qp *qp = NULL;
	unsigned long flags;
	int err = 0;
	int freed = 0;
	int npolled;

	spin_lock_irqsave(&cq->lock, flags);

673 674 675
	npolled = 0;
repoll:
	while (npolled < num_entries) {
L
Linus Torvalds 已提交
676 677 678 679
		err = mthca_poll_one(dev, cq, &qp,
				     &freed, entry + npolled);
		if (err)
			break;
680
		++npolled;
L
Linus Torvalds 已提交
681 682 683 684 685 686 687
	}

	if (freed) {
		wmb();
		update_cons_index(dev, cq, freed);
	}

688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723
	/*
	 * If a CQ resize is in progress and we discovered that the
	 * old buffer is empty, then peek in the new buffer, and if
	 * it's not empty, switch to the new buffer and continue
	 * polling there.
	 */
	if (unlikely(err == -EAGAIN && cq->resize_buf &&
		     cq->resize_buf->state == CQ_RESIZE_READY)) {
		/*
		 * In Tavor mode, the hardware keeps the producer
		 * index modulo the CQ size.  Since we might be making
		 * the CQ bigger, we need to mask our consumer index
		 * using the size of the old CQ buffer before looking
		 * in the new CQ buffer.
		 */
		if (!mthca_is_memfree(dev))
			cq->cons_index &= cq->ibcq.cqe;

		if (cqe_sw(get_cqe_from_buf(&cq->resize_buf->buf,
					    cq->cons_index & cq->resize_buf->cqe))) {
			struct mthca_cq_buf tbuf;
			int tcqe;

			tbuf         = cq->buf;
			tcqe         = cq->ibcq.cqe;
			cq->buf      = cq->resize_buf->buf;
			cq->ibcq.cqe = cq->resize_buf->cqe;

			cq->resize_buf->buf   = tbuf;
			cq->resize_buf->cqe   = tcqe;
			cq->resize_buf->state = CQ_RESIZE_SWAPPED;

			goto repoll;
		}
	}

L
Linus Torvalds 已提交
724 725 726 727 728
	spin_unlock_irqrestore(&cq->lock, flags);

	return err == 0 || err == -EAGAIN ? npolled : err;
}

729
int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags)
L
Linus Torvalds 已提交
730
{
731
	__be32 doorbell[2];
L
Linus Torvalds 已提交
732

733 734
	doorbell[0] = cpu_to_be32(((flags & IB_CQ_SOLICITED_MASK) ==
				   IB_CQ_SOLICITED ?
L
Linus Torvalds 已提交
735 736 737
				   MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL :
				   MTHCA_TAVOR_CQ_DB_REQ_NOT)      |
				  to_mcq(cq)->cqn);
738
	doorbell[1] = (__force __be32) 0xffffffff;
L
Linus Torvalds 已提交
739 740 741 742 743 744 745 746

	mthca_write64(doorbell,
		      to_mdev(cq->device)->kar + MTHCA_CQ_DOORBELL,
		      MTHCA_GET_DOORBELL_LOCK(&to_mdev(cq->device)->doorbell_lock));

	return 0;
}

747
int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
L
Linus Torvalds 已提交
748 749
{
	struct mthca_cq *cq = to_mcq(ibcq);
750
	__be32 doorbell[2];
L
Linus Torvalds 已提交
751
	u32 sn;
752
	__be32 ci;
L
Linus Torvalds 已提交
753 754 755 756 757 758

	sn = cq->arm_sn & 3;
	ci = cpu_to_be32(cq->cons_index);

	doorbell[0] = ci;
	doorbell[1] = cpu_to_be32((cq->cqn << 8) | (2 << 5) | (sn << 3) |
759 760
				  ((flags & IB_CQ_SOLICITED_MASK) ==
				   IB_CQ_SOLICITED ? 1 : 2));
L
Linus Torvalds 已提交
761 762 763 764 765 766 767 768 769 770

	mthca_write_db_rec(doorbell, cq->arm_db);

	/*
	 * Make sure that the doorbell record in host memory is
	 * written before ringing the doorbell via PCI MMIO.
	 */
	wmb();

	doorbell[0] = cpu_to_be32((sn << 28)                       |
771
				  ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
L
Linus Torvalds 已提交
772 773 774 775 776 777 778 779 780 781 782 783 784
				   MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL :
				   MTHCA_ARBEL_CQ_DB_REQ_NOT)      |
				  cq->cqn);
	doorbell[1] = ci;

	mthca_write64(doorbell,
		      to_mdev(ibcq->device)->kar + MTHCA_CQ_DOORBELL,
		      MTHCA_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->doorbell_lock));

	return 0;
}

int mthca_init_cq(struct mthca_dev *dev, int nent,
785
		  struct mthca_ucontext *ctx, u32 pdn,
L
Linus Torvalds 已提交
786 787
		  struct mthca_cq *cq)
{
788
	struct mthca_mailbox *mailbox;
L
Linus Torvalds 已提交
789 790 791 792
	struct mthca_cq_context *cq_context;
	int err = -ENOMEM;
	u8 status;

793 794
	cq->ibcq.cqe  = nent - 1;
	cq->is_kernel = !ctx;
L
Linus Torvalds 已提交
795 796 797 798 799

	cq->cqn = mthca_alloc(&dev->cq_table.alloc);
	if (cq->cqn == -1)
		return -ENOMEM;

800
	if (mthca_is_memfree(dev)) {
L
Linus Torvalds 已提交
801 802 803 804
		err = mthca_table_get(dev, dev->cq_table.table, cq->cqn);
		if (err)
			goto err_out;

805 806 807 808
		if (cq->is_kernel) {
			cq->arm_sn = 1;

			err = -ENOMEM;
L
Linus Torvalds 已提交
809

810 811 812 813
			cq->set_ci_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_SET_CI,
							     cq->cqn, &cq->set_ci_db);
			if (cq->set_ci_db_index < 0)
				goto err_out_icm;
L
Linus Torvalds 已提交
814

815 816 817 818 819
			cq->arm_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_ARM,
							  cq->cqn, &cq->arm_db);
			if (cq->arm_db_index < 0)
				goto err_out_ci;
		}
L
Linus Torvalds 已提交
820 821
	}

822 823 824
	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
	if (IS_ERR(mailbox))
		goto err_out_arm;
L
Linus Torvalds 已提交
825

826
	cq_context = mailbox->buf;
L
Linus Torvalds 已提交
827

828
	if (cq->is_kernel) {
829
		err = mthca_alloc_cq_buf(dev, &cq->buf, nent);
830 831 832
		if (err)
			goto err_out_mailbox;
	}
L
Linus Torvalds 已提交
833 834

	spin_lock_init(&cq->lock);
835
	cq->refcount = 1;
L
Linus Torvalds 已提交
836
	init_waitqueue_head(&cq->wait);
837
	mutex_init(&cq->mutex);
L
Linus Torvalds 已提交
838 839 840 841 842

	memset(cq_context, 0, sizeof *cq_context);
	cq_context->flags           = cpu_to_be32(MTHCA_CQ_STATUS_OK      |
						  MTHCA_CQ_STATE_DISARMED |
						  MTHCA_CQ_FLAG_TR);
843 844 845 846 847
	cq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24);
	if (ctx)
		cq_context->logsize_usrpage |= cpu_to_be32(ctx->uar.index);
	else
		cq_context->logsize_usrpage |= cpu_to_be32(dev->driver_uar.index);
L
Linus Torvalds 已提交
848 849
	cq_context->error_eqn       = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn);
	cq_context->comp_eqn        = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_COMP].eqn);
850
	cq_context->pd              = cpu_to_be32(pdn);
851
	cq_context->lkey            = cpu_to_be32(cq->buf.mr.ibmr.lkey);
L
Linus Torvalds 已提交
852 853
	cq_context->cqn             = cpu_to_be32(cq->cqn);

854
	if (mthca_is_memfree(dev)) {
L
Linus Torvalds 已提交
855 856 857 858
		cq_context->ci_db    = cpu_to_be32(cq->set_ci_db_index);
		cq_context->state_db = cpu_to_be32(cq->arm_db_index);
	}

859
	err = mthca_SW2HW_CQ(dev, mailbox, cq->cqn, &status);
L
Linus Torvalds 已提交
860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882
	if (err) {
		mthca_warn(dev, "SW2HW_CQ failed (%d)\n", err);
		goto err_out_free_mr;
	}

	if (status) {
		mthca_warn(dev, "SW2HW_CQ returned status 0x%02x\n",
			   status);
		err = -EINVAL;
		goto err_out_free_mr;
	}

	spin_lock_irq(&dev->cq_table.lock);
	if (mthca_array_set(&dev->cq_table.cq,
			    cq->cqn & (dev->limits.num_cqs - 1),
			    cq)) {
		spin_unlock_irq(&dev->cq_table.lock);
		goto err_out_free_mr;
	}
	spin_unlock_irq(&dev->cq_table.lock);

	cq->cons_index = 0;

883
	mthca_free_mailbox(dev, mailbox);
L
Linus Torvalds 已提交
884 885 886 887

	return 0;

err_out_free_mr:
888
	if (cq->is_kernel)
889
		mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
L
Linus Torvalds 已提交
890 891

err_out_mailbox:
892
	mthca_free_mailbox(dev, mailbox);
L
Linus Torvalds 已提交
893

894
err_out_arm:
895
	if (cq->is_kernel && mthca_is_memfree(dev))
896
		mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index);
L
Linus Torvalds 已提交
897 898

err_out_ci:
899
	if (cq->is_kernel && mthca_is_memfree(dev))
900
		mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index);
L
Linus Torvalds 已提交
901 902 903 904 905 906 907 908 909 910

err_out_icm:
	mthca_table_put(dev, dev->cq_table.table, cq->cqn);

err_out:
	mthca_free(&dev->cq_table.alloc, cq->cqn);

	return err;
}

911 912 913 914 915 916 917 918 919 920 921
static inline int get_cq_refcount(struct mthca_dev *dev, struct mthca_cq *cq)
{
	int c;

	spin_lock_irq(&dev->cq_table.lock);
	c = cq->refcount;
	spin_unlock_irq(&dev->cq_table.lock);

	return c;
}

L
Linus Torvalds 已提交
922 923 924
void mthca_free_cq(struct mthca_dev *dev,
		   struct mthca_cq *cq)
{
925
	struct mthca_mailbox *mailbox;
L
Linus Torvalds 已提交
926 927 928
	int err;
	u8 status;

929 930
	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
	if (IS_ERR(mailbox)) {
L
Linus Torvalds 已提交
931 932 933 934
		mthca_warn(dev, "No memory for mailbox to free CQ.\n");
		return;
	}

935
	err = mthca_HW2SW_CQ(dev, mailbox, cq->cqn, &status);
L
Linus Torvalds 已提交
936 937 938
	if (err)
		mthca_warn(dev, "HW2SW_CQ failed (%d)\n", err);
	else if (status)
939
		mthca_warn(dev, "HW2SW_CQ returned status 0x%02x\n", status);
L
Linus Torvalds 已提交
940 941

	if (0) {
942
		__be32 *ctx = mailbox->buf;
L
Linus Torvalds 已提交
943 944 945
		int j;

		printk(KERN_ERR "context for CQN %x (cons index %x, next sw %d)\n",
946 947
		       cq->cqn, cq->cons_index,
		       cq->is_kernel ? !!next_cqe_sw(cq) : 0);
L
Linus Torvalds 已提交
948 949 950 951 952 953 954
		for (j = 0; j < 16; ++j)
			printk(KERN_ERR "[%2x] %08x\n", j * 4, be32_to_cpu(ctx[j]));
	}

	spin_lock_irq(&dev->cq_table.lock);
	mthca_array_clear(&dev->cq_table.cq,
			  cq->cqn & (dev->limits.num_cqs - 1));
955
	--cq->refcount;
L
Linus Torvalds 已提交
956 957 958 959 960 961 962
	spin_unlock_irq(&dev->cq_table.lock);

	if (dev->mthca_flags & MTHCA_FLAG_MSI_X)
		synchronize_irq(dev->eq_table.eq[MTHCA_EQ_COMP].msi_x_vector);
	else
		synchronize_irq(dev->pdev->irq);

963
	wait_event(cq->wait, !get_cq_refcount(dev, cq));
L
Linus Torvalds 已提交
964

965
	if (cq->is_kernel) {
966
		mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
967 968 969 970
		if (mthca_is_memfree(dev)) {
			mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM,    cq->arm_db_index);
			mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index);
		}
L
Linus Torvalds 已提交
971 972
	}

973
	mthca_table_put(dev, dev->cq_table.table, cq->cqn);
L
Linus Torvalds 已提交
974
	mthca_free(&dev->cq_table.alloc, cq->cqn);
975
	mthca_free_mailbox(dev, mailbox);
L
Linus Torvalds 已提交
976 977
}

R
Roland Dreier 已提交
978
int mthca_init_cq_table(struct mthca_dev *dev)
L
Linus Torvalds 已提交
979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998
{
	int err;

	spin_lock_init(&dev->cq_table.lock);

	err = mthca_alloc_init(&dev->cq_table.alloc,
			       dev->limits.num_cqs,
			       (1 << 24) - 1,
			       dev->limits.reserved_cqs);
	if (err)
		return err;

	err = mthca_array_init(&dev->cq_table.cq,
			       dev->limits.num_cqs);
	if (err)
		mthca_alloc_cleanup(&dev->cq_table.alloc);

	return err;
}

999
void mthca_cleanup_cq_table(struct mthca_dev *dev)
L
Linus Torvalds 已提交
1000 1001 1002 1003
{
	mthca_array_cleanup(&dev->cq_table.cq, dev->limits.num_cqs);
	mthca_alloc_cleanup(&dev->cq_table.alloc);
}