mthca_provider.h 8.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3
 * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
4
 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 *
 * $Id: mthca_provider.h 1349 2004-12-16 21:09:43Z roland $
 */

#ifndef MTHCA_PROVIDER_H
#define MTHCA_PROVIDER_H

40 41
#include <rdma/ib_verbs.h>
#include <rdma/ib_pack.h>
L
Linus Torvalds 已提交
42 43 44 45 46 47 48 49 50 51 52 53

#define MTHCA_MPT_FLAG_ATOMIC        (1 << 14)
#define MTHCA_MPT_FLAG_REMOTE_WRITE  (1 << 13)
#define MTHCA_MPT_FLAG_REMOTE_READ   (1 << 12)
#define MTHCA_MPT_FLAG_LOCAL_WRITE   (1 << 11)
#define MTHCA_MPT_FLAG_LOCAL_READ    (1 << 10)

struct mthca_buf_list {
	void *buf;
	DECLARE_PCI_UNMAP_ADDR(mapping)
};

54 55 56 57 58
union mthca_buf {
	struct mthca_buf_list direct;
	struct mthca_buf_list *page_list;
};

L
Linus Torvalds 已提交
59 60 61 62 63
struct mthca_uar {
	unsigned long pfn;
	int           index;
};

64 65 66 67 68 69 70 71
struct mthca_user_db_table;

struct mthca_ucontext {
	struct ib_ucontext          ibucontext;
	struct mthca_uar            uar;
	struct mthca_user_db_table *db_tab;
};

72 73
struct mthca_mtt;

L
Linus Torvalds 已提交
74
struct mthca_mr {
75
	struct ib_mr      ibmr;
76
	struct ib_umem   *umem;
77
	struct mthca_mtt *mtt;
L
Linus Torvalds 已提交
78 79
};

80
struct mthca_fmr {
81
	struct ib_fmr      ibmr;
82
	struct ib_fmr_attr attr;
83 84
	struct mthca_mtt  *mtt;
	int                maps;
85 86 87 88 89 90 91 92
	union {
		struct {
			struct mthca_mpt_entry __iomem *mpt;
			u64 __iomem *mtts;
		} tavor;
		struct {
			struct mthca_mpt_entry *mpt;
			__be64 *mtts;
93
			dma_addr_t dma_handle;
94 95 96 97
		} arbel;
	} mem;
};

L
Linus Torvalds 已提交
98 99 100 101 102
struct mthca_pd {
	struct ib_pd    ibpd;
	u32             pd_num;
	atomic_t        sqp_count;
	struct mthca_mr ntmr;
103
	int             privileged;
L
Linus Torvalds 已提交
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
};

struct mthca_eq {
	struct mthca_dev      *dev;
	int                    eqn;
	u32                    eqn_mask;
	u32                    cons_index;
	u16                    msi_x_vector;
	u16                    msi_x_entry;
	int                    have_irq;
	int                    nent;
	struct mthca_buf_list *page_list;
	struct mthca_mr        mr;
};

struct mthca_av;

enum mthca_ah_type {
	MTHCA_AH_ON_HCA,
	MTHCA_AH_PCI_POOL,
	MTHCA_AH_KMALLOC
};

struct mthca_ah {
	struct ib_ah       ibah;
	enum mthca_ah_type type;
	u32                key;
	struct mthca_av   *av;
	dma_addr_t         avdma;
};

/*
 * Quick description of our CQ/QP locking scheme:
 *
 * We have one global lock that protects dev->cq/qp_table.  Each
 * struct mthca_cq/qp also has its own lock.  An individual qp lock
 * may be taken inside of an individual cq lock.  Both cqs attached to
141 142
 * a qp may be locked, with the cq with the lower cqn locked first.
 * No other nesting should be done.
L
Linus Torvalds 已提交
143
 *
144 145 146 147 148 149
 * Each struct mthca_cq/qp also has an ref count, protected by the
 * corresponding table lock.  The pointer from the cq/qp_table to the
 * struct counts as one reference.  This reference also is good for
 * access through the consumer API, so modifying the CQ/QP etc doesn't
 * need to take another reference.  Access to a QP because of a
 * completion being polled does not need a reference either.
L
Linus Torvalds 已提交
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
 *
 * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the
 * destroy function to sleep on.
 *
 * This means that access from the consumer API requires nothing but
 * taking the struct's lock.
 *
 * Access because of a completion event should go as follows:
 * - lock cq/qp_table and look up struct
 * - increment ref count in struct
 * - drop cq/qp_table lock
 * - lock struct, do your thing, and unlock struct
 * - decrement ref count; if zero, wake up waiters
 *
 * To destroy a CQ/QP, we can do the following:
165 166 167
 * - lock cq/qp_table
 * - remove pointer and decrement ref count
 * - unlock cq/qp_table lock
L
Linus Torvalds 已提交
168 169 170
 * - wait_event until ref count is zero
 *
 * It is the consumer's responsibilty to make sure that no QP
171
 * operations (WQE posting or state modification) are pending when a
L
Linus Torvalds 已提交
172
 * QP is destroyed.  Also, the consumer must make sure that calls to
173 174 175
 * qp_modify are serialized.  Similarly, the consumer is responsible
 * for ensuring that no CQ resize operations are pending when a CQ
 * is destroyed.
L
Linus Torvalds 已提交
176 177 178 179 180 181 182 183 184
 *
 * Possible optimizations (wait for profile data to see if/where we
 * have locks bouncing between CPUs):
 * - split cq/qp table lock into n separate (cache-aligned) locks,
 *   indexed (say) by the page in the table
 * - split QP struct lock into three (one for common info, one for the
 *   send queue and one for the receive queue)
 */

185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
struct mthca_cq_buf {
	union mthca_buf		queue;
	struct mthca_mr		mr;
	int			is_direct;
};

struct mthca_cq_resize {
	struct mthca_cq_buf	buf;
	int			cqe;
	enum {
		CQ_RESIZE_ALLOC,
		CQ_RESIZE_READY,
		CQ_RESIZE_SWAPPED
	}			state;
};

L
Linus Torvalds 已提交
201
struct mthca_cq {
202 203
	struct ib_cq		ibcq;
	spinlock_t		lock;
204
	int			refcount;
205 206 207 208 209
	int			cqn;
	u32			cons_index;
	struct mthca_cq_buf	buf;
	struct mthca_cq_resize *resize_buf;
	int			is_kernel;
L
Linus Torvalds 已提交
210 211

	/* Next fields are Arbel only */
212 213 214 215 216
	int			set_ci_db_index;
	__be32		       *set_ci_db;
	int			arm_db_index;
	__be32		       *arm_db;
	int			arm_sn;
L
Linus Torvalds 已提交
217

218
	wait_queue_head_t	wait;
219
	struct mutex		mutex;
L
Linus Torvalds 已提交
220 221
};

222 223 224
struct mthca_srq {
	struct ib_srq		ibsrq;
	spinlock_t		lock;
225
	int			refcount;
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
	int			srqn;
	int			max;
	int			max_gs;
	int			wqe_shift;
	int			first_free;
	int			last_free;
	u16			counter;  /* Arbel only */
	int			db_index; /* Arbel only */
	__be32		       *db;       /* Arbel only */
	void		       *last;

	int			is_direct;
	u64		       *wrid;
	union mthca_buf		queue;
	struct mthca_mr		mr;

	wait_queue_head_t	wait;
243
	struct mutex		mutex;
244 245
};

L
Linus Torvalds 已提交
246 247 248 249 250 251 252 253 254 255 256 257
struct mthca_wq {
	spinlock_t lock;
	int        max;
	unsigned   next_ind;
	unsigned   last_comp;
	unsigned   head;
	unsigned   tail;
	void      *last;
	int        max_gs;
	int        wqe_shift;

	int        db_index;	/* Arbel only */
258
	__be32    *db;
L
Linus Torvalds 已提交
259 260 261 262
};

struct mthca_qp {
	struct ib_qp           ibqp;
263
	int                    refcount;
L
Linus Torvalds 已提交
264 265
	u32                    qpn;
	int                    is_direct;
266 267
	u8                     port; /* for SQP and memfree use only */
	u8                     alt_port; /* for memfree use only */
L
Linus Torvalds 已提交
268 269 270 271 272 273 274 275 276 277 278
	u8                     transport;
	u8                     state;
	u8                     atomic_rd_en;
	u8                     resp_depth;

	struct mthca_mr        mr;

	struct mthca_wq        rq;
	struct mthca_wq        sq;
	enum ib_sig_type       sq_policy;
	int                    send_wqe_offset;
279
	int                    max_inline_data;
L
Linus Torvalds 已提交
280 281

	u64                   *wrid;
282
	union mthca_buf	       queue;
L
Linus Torvalds 已提交
283 284

	wait_queue_head_t      wait;
285
	struct mutex	       mutex;
L
Linus Torvalds 已提交
286 287 288 289 290 291 292 293 294 295 296 297 298
};

struct mthca_sqp {
	struct mthca_qp qp;
	int             pkey_index;
	u32             qkey;
	u32             send_psn;
	struct ib_ud_header ud_header;
	int             header_buf_size;
	void           *header_buf;
	dma_addr_t      header_dma;
};

299 300 301 302 303
static inline struct mthca_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
{
	return container_of(ibucontext, struct mthca_ucontext, ibucontext);
}

304 305 306 307 308
static inline struct mthca_fmr *to_mfmr(struct ib_fmr *ibmr)
{
	return container_of(ibmr, struct mthca_fmr, ibmr);
}

L
Linus Torvalds 已提交
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
static inline struct mthca_mr *to_mmr(struct ib_mr *ibmr)
{
	return container_of(ibmr, struct mthca_mr, ibmr);
}

static inline struct mthca_pd *to_mpd(struct ib_pd *ibpd)
{
	return container_of(ibpd, struct mthca_pd, ibpd);
}

static inline struct mthca_ah *to_mah(struct ib_ah *ibah)
{
	return container_of(ibah, struct mthca_ah, ibah);
}

static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq)
{
	return container_of(ibcq, struct mthca_cq, ibcq);
}

329 330 331 332 333
static inline struct mthca_srq *to_msrq(struct ib_srq *ibsrq)
{
	return container_of(ibsrq, struct mthca_srq, ibsrq);
}

L
Linus Torvalds 已提交
334 335 336 337 338 339 340 341 342 343 344
static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp)
{
	return container_of(ibqp, struct mthca_qp, ibqp);
}

static inline struct mthca_sqp *to_msqp(struct mthca_qp *qp)
{
	return container_of(qp, struct mthca_sqp, qp);
}

#endif /* MTHCA_PROVIDER_H */