hns_roce_device.h 34.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
/*
 * Copyright (c) 2016 Hisilicon Limited.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#ifndef _HNS_ROCE_DEVICE_H
#define _HNS_ROCE_DEVICE_H

#include <rdma/ib_verbs.h>
37
#include <rdma/hns-abi.h>
38 39 40

#define DRV_NAME "hns_roce"

41
#define PCI_REVISION_ID_HIP08			0x21
42
#define PCI_REVISION_ID_HIP09			0x30
43

44 45
#define HNS_ROCE_HW_VER1	('h' << 24 | 'i' << 16 | '0' << 8 | '6')

46 47 48 49 50 51
#define HNS_ROCE_MAX_MSG_LEN			0x80000000

#define HNS_ROCE_IB_MIN_SQ_STRIDE		6

#define HNS_ROCE_BA_SIZE			(32 * 4096)

52 53
#define BA_BYTE_LEN				8

54 55 56
/* Hardware specification only for v1 engine */
#define HNS_ROCE_MIN_CQE_NUM			0x40
#define HNS_ROCE_MIN_WQE_NUM			0x20
57
#define HNS_ROCE_MIN_SRQ_WQE_NUM		1
58 59 60 61 62

/* Hardware specification only for v1 engine */
#define HNS_ROCE_MAX_INNER_MTPT_NUM		0x7
#define HNS_ROCE_MAX_MTPT_PBL_NUM		0x100000

S
Shaobo Xu 已提交
63 64 65 66 67 68
#define HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS	20
#define HNS_ROCE_MAX_FREE_CQ_WAIT_CNT	\
	(5000 / HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS)
#define HNS_ROCE_CQE_WCMD_EMPTY_BIT		0x2
#define HNS_ROCE_MIN_CQE_CNT			16

69 70
#define HNS_ROCE_RESERVED_SGE			1

Y
Yixian Liu 已提交
71
#define HNS_ROCE_MAX_IRQ_NUM			128
72

73 74 75
#define HNS_ROCE_SGE_IN_WQE			2
#define HNS_ROCE_SGE_SHIFT			4

Y
Yixian Liu 已提交
76 77
#define EQ_ENABLE				1
#define EQ_DISABLE				0
78

Y
Yixian Liu 已提交
79 80 81
#define HNS_ROCE_CEQ				0
#define HNS_ROCE_AEQ				1

82 83 84 85
#define HNS_ROCE_CEQE_SIZE 0x4
#define HNS_ROCE_AEQE_SIZE 0x10

#define HNS_ROCE_V3_EQE_SIZE 0x40
86

87 88 89
#define HNS_ROCE_V2_CQE_SIZE 32
#define HNS_ROCE_V3_CQE_SIZE 64

90 91 92
#define HNS_ROCE_V2_QPC_SZ 256
#define HNS_ROCE_V3_QPC_SZ 512

93 94
#define HNS_ROCE_MAX_PORTS			6
#define HNS_ROCE_GID_SIZE			16
95
#define HNS_ROCE_SGE_SIZE			16
Y
Yixing Liu 已提交
96
#define HNS_ROCE_DWQE_SIZE			65536
97

98 99
#define HNS_ROCE_HOP_NUM_0			0xff

100 101 102
#define BITMAP_NO_RR				0
#define BITMAP_RR				1

103
#define MR_TYPE_MR				0x00
Y
Yixian Liu 已提交
104
#define MR_TYPE_FRMR				0x01
105 106
#define MR_TYPE_DMA				0x03

Y
Yixian Liu 已提交
107 108
#define HNS_ROCE_FRMR_MAX_PA			512

109
#define PKEY_ID					0xffff
110
#define GUID_LEN				8
111
#define NODE_DESC_SIZE				64
112
#define DB_REG_OFFSET				0x1000
113

114 115 116
/* Configure to HW for PAGE_SIZE larger than 4KB */
#define PG_SHIFT_OFFSET				(PAGE_SHIFT - 12)

117 118 119 120 121
#define PAGES_SHIFT_8				8
#define PAGES_SHIFT_16				16
#define PAGES_SHIFT_24				24
#define PAGES_SHIFT_32				32

122 123 124
#define HNS_ROCE_IDX_QUE_ENTRY_SZ		4
#define SRQ_DB_REG				0x230

125
#define HNS_ROCE_QP_BANK_NUM 8
126 127 128
#define HNS_ROCE_CQ_BANK_NUM 4

#define CQ_BANKID_SHIFT 2
129

130 131 132 133 134
/* The chip implementation of the consumer index is calculated
 * according to twice the actual EQ depth
 */
#define EQ_DEPTH_COEFF				2

L
Lijun Ou 已提交
135 136 137 138 139
enum {
	SERV_TYPE_RC,
	SERV_TYPE_UC,
	SERV_TYPE_RD,
	SERV_TYPE_UD,
140
	SERV_TYPE_XRC = 5,
L
Lijun Ou 已提交
141 142
};

143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
enum hns_roce_qp_state {
	HNS_ROCE_QP_STATE_RST,
	HNS_ROCE_QP_STATE_INIT,
	HNS_ROCE_QP_STATE_RTR,
	HNS_ROCE_QP_STATE_RTS,
	HNS_ROCE_QP_STATE_SQD,
	HNS_ROCE_QP_STATE_ERR,
	HNS_ROCE_QP_NUM_STATE,
};

enum hns_roce_event {
	HNS_ROCE_EVENT_TYPE_PATH_MIG                  = 0x01,
	HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED           = 0x02,
	HNS_ROCE_EVENT_TYPE_COMM_EST                  = 0x03,
	HNS_ROCE_EVENT_TYPE_SQ_DRAINED                = 0x04,
	HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR            = 0x05,
	HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR    = 0x06,
	HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR     = 0x07,
	HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH           = 0x08,
	HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH        = 0x09,
	HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR           = 0x0a,
	HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR           = 0x0b,
	HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW               = 0x0c,
	HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID             = 0x0d,
	HNS_ROCE_EVENT_TYPE_PORT_CHANGE               = 0x0f,
	/* 0x10 and 0x11 is unused in currently application case */
	HNS_ROCE_EVENT_TYPE_DB_OVERFLOW               = 0x12,
	HNS_ROCE_EVENT_TYPE_MB                        = 0x13,
Y
Yixian Liu 已提交
171
	HNS_ROCE_EVENT_TYPE_FLR			      = 0x15,
172 173
	HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION	      = 0x16,
	HNS_ROCE_EVENT_TYPE_INVALID_XRCETH	      = 0x17,
174 175
};

176 177
#define HNS_ROCE_CAP_FLAGS_EX_SHIFT 12

178 179
enum {
	HNS_ROCE_CAP_FLAG_REREG_MR		= BIT(0),
180
	HNS_ROCE_CAP_FLAG_ROCE_V1_V2		= BIT(1),
181
	HNS_ROCE_CAP_FLAG_RQ_INLINE		= BIT(2),
182 183
	HNS_ROCE_CAP_FLAG_RECORD_DB		= BIT(3),
	HNS_ROCE_CAP_FLAG_SQ_RECORD_DB		= BIT(4),
L
Lijun Ou 已提交
184
	HNS_ROCE_CAP_FLAG_SRQ			= BIT(5),
185
	HNS_ROCE_CAP_FLAG_XRC			= BIT(6),
Y
Yixian Liu 已提交
186
	HNS_ROCE_CAP_FLAG_MW			= BIT(7),
Y
Yixian Liu 已提交
187
	HNS_ROCE_CAP_FLAG_FRMR                  = BIT(8),
188
	HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL		= BIT(9),
L
Lijun Ou 已提交
189
	HNS_ROCE_CAP_FLAG_ATOMIC		= BIT(10),
L
Lang Cheng 已提交
190
	HNS_ROCE_CAP_FLAG_SDI_MODE		= BIT(14),
L
Lang Cheng 已提交
191
	HNS_ROCE_CAP_FLAG_STASH			= BIT(17),
192 193
};

194 195 196
#define HNS_ROCE_DB_TYPE_COUNT			2
#define HNS_ROCE_DB_UNIT_SIZE			4

197 198 199 200
enum {
	HNS_ROCE_DB_PER_PAGE = PAGE_SIZE / 4
};

201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
enum hns_roce_reset_stage {
	HNS_ROCE_STATE_NON_RST,
	HNS_ROCE_STATE_RST_BEF_DOWN,
	HNS_ROCE_STATE_RST_DOWN,
	HNS_ROCE_STATE_RST_UNINIT,
	HNS_ROCE_STATE_RST_INIT,
	HNS_ROCE_STATE_RST_INITED,
};

enum hns_roce_instance_state {
	HNS_ROCE_STATE_NON_INIT,
	HNS_ROCE_STATE_INIT,
	HNS_ROCE_STATE_INITED,
	HNS_ROCE_STATE_UNINIT,
};

enum {
	HNS_ROCE_RST_DIRECT_RETURN		= 0,
};

221 222 223 224 225 226
enum {
	CMD_RST_PRC_OTHERS,
	CMD_RST_PRC_SUCCESS,
	CMD_RST_PRC_EBUSY,
};

227 228
#define HNS_ROCE_CMD_SUCCESS			1

229 230 231
/* The minimum page size is 4K for hardware */
#define HNS_HW_PAGE_SHIFT			12
#define HNS_HW_PAGE_SIZE			(1 << HNS_HW_PAGE_SHIFT)
232 233 234 235

struct hns_roce_uar {
	u64		pfn;
	unsigned long	index;
236
	unsigned long	logic_idx;
237 238 239 240 241
};

struct hns_roce_ucontext {
	struct ib_ucontext	ibucontext;
	struct hns_roce_uar	uar;
242 243
	struct list_head	page_list;
	struct mutex		page_mutex;
244 245 246 247 248 249 250
};

struct hns_roce_pd {
	struct ib_pd		ibpd;
	unsigned long		pdn;
};

251 252 253 254 255
struct hns_roce_xrcd {
	struct ib_xrcd ibxrcd;
	u32 xrcdn;
};

256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
struct hns_roce_bitmap {
	/* Bitmap Traversal last a bit which is 1 */
	unsigned long		last;
	unsigned long		top;
	unsigned long		max;
	unsigned long		reserved_top;
	unsigned long		mask;
	spinlock_t		lock;
	unsigned long		*table;
};

/* For Hardware Entry Memory */
struct hns_roce_hem_table {
	/* HEM type: 0 = qpc, 1 = mtt, 2 = cqc, 3 = srq, 4 = other */
	u32		type;
	/* HEM array elment num */
	unsigned long	num_hem;
	/* HEM entry record obj total num */
	unsigned long	num_obj;
275
	/* Single obj size */
276
	unsigned long	obj_size;
277
	unsigned long	table_chunk_size;
278 279 280
	int		lowmem;
	struct mutex	mutex;
	struct hns_roce_hem **hem;
281 282 283 284
	u64		**bt_l1;
	dma_addr_t	*bt_l1_dma_addr;
	u64		**bt_l0;
	dma_addr_t	*bt_l0_dma_addr;
285 286
};

287
struct hns_roce_buf_region {
288
	u32 offset; /* page offset */
289
	u32 count; /* page count */
290 291 292 293 294 295 296 297 298 299 300
	int hopnum; /* addressing hop num */
};

#define HNS_ROCE_MAX_BT_REGION	3
#define HNS_ROCE_MAX_BT_LEVEL	3
struct hns_roce_hem_list {
	struct list_head root_bt;
	/* link all bt dma mem by hop config */
	struct list_head mid_bt[HNS_ROCE_MAX_BT_REGION][HNS_ROCE_MAX_BT_LEVEL];
	struct list_head btm_bt; /* link all bottom bt in @mid_bt */
	dma_addr_t root_ba; /* pointer to the root ba table */
301 302 303 304 305 306 307
};

struct hns_roce_buf_attr {
	struct {
		size_t	size;  /* region size */
		int	hopnum; /* multi-hop addressing hop num */
	} region[HNS_ROCE_MAX_BT_REGION];
308
	unsigned int region_count; /* valid region count */
309
	unsigned int page_shift;  /* buffer page shift */
310
	unsigned int user_access; /* umem access flag */
311
	bool mtt_only; /* only alloc buffer-required MTT memory */
312 313
};

314 315 316 317 318 319 320
struct hns_roce_hem_cfg {
	dma_addr_t	root_ba; /* root BA table's address */
	bool		is_direct; /* addressing without BA table */
	unsigned int	ba_pg_shift; /* BA table page shift */
	unsigned int	buf_pg_shift; /* buffer page shift */
	unsigned int	buf_pg_count;  /* buffer page count */
	struct hns_roce_buf_region region[HNS_ROCE_MAX_BT_REGION];
321
	unsigned int	region_count;
322 323
};

324 325
/* memory translate region */
struct hns_roce_mtr {
326
	struct hns_roce_hem_list hem_list; /* multi-hop addressing resource */
327 328
	struct ib_umem		*umem; /* user space buffer */
	struct hns_roce_buf	*kmem; /* kernel space buffer */
329
	struct hns_roce_hem_cfg  hem_cfg; /* config for hardware addressing */
330 331
};

Y
Yixian Liu 已提交
332 333 334 335 336 337 338 339 340 341
struct hns_roce_mw {
	struct ib_mw		ibmw;
	u32			pdn;
	u32			rkey;
	int			enabled; /* MW's active status */
	u32			pbl_hop_num;
	u32			pbl_ba_pg_sz;
	u32			pbl_buf_pg_sz;
};

342 343 344 345 346 347 348 349 350
/* Only support 4K page size for mr register */
#define MR_SIZE_4K 0

struct hns_roce_mr {
	struct ib_mr		ibmr;
	u64			iova; /* MR's virtual orignal addr */
	u64			size; /* Address range of MR */
	u32			key; /* Key of MR */
	u32			pd;   /* PD num of MR */
351
	u32			access;	/* Access permission of MR */
352 353
	int			enabled; /* MR's active status */
	int			type;	/* MR's register type */
354
	u32			pbl_hop_num;	/* multi-hop number */
355 356 357
	struct hns_roce_mtr	pbl_mtr;
	u32			npages;
	dma_addr_t		*page_list;
358 359 360 361 362 363 364 365 366 367
};

struct hns_roce_mr_table {
	struct hns_roce_bitmap		mtpt_bitmap;
	struct hns_roce_hem_table	mtpt_table;
};

struct hns_roce_wq {
	u64		*wrid;     /* Work request ID */
	spinlock_t	lock;
368
	u32		wqe_cnt;  /* WQE num */
369
	u32		max_gs;
370
	u32		rsv_sge;
371
	int		offset;
372
	int		wqe_shift;	/* WQE size */
373 374 375 376 377
	u32		head;
	u32		tail;
	void __iomem	*db_reg_l;
};

378
struct hns_roce_sge {
379
	unsigned int	sge_cnt;	/* SGE num */
380
	int		offset;
381
	int		sge_shift;	/* SGE size */
382 383
};

384 385 386 387 388
struct hns_roce_buf_list {
	void		*buf;
	dma_addr_t	map;
};

389 390 391 392 393 394 395 396 397 398 399 400 401 402 403
/*
 * %HNS_ROCE_BUF_DIRECT indicates that the all memory must be in a continuous
 * dma address range.
 *
 * %HNS_ROCE_BUF_NOSLEEP indicates that the caller cannot sleep.
 *
 * %HNS_ROCE_BUF_NOFAIL allocation only failed when allocated size is zero, even
 * the allocated size is smaller than the required size.
 */
enum {
	HNS_ROCE_BUF_DIRECT = BIT(0),
	HNS_ROCE_BUF_NOSLEEP = BIT(1),
	HNS_ROCE_BUF_NOFAIL = BIT(2),
};

404
struct hns_roce_buf {
405 406
	struct hns_roce_buf_list	*trunk_list;
	u32				ntrunks;
407
	u32				npages;
408
	unsigned int			trunk_shift;
409
	unsigned int			page_shift;
410 411
};

412 413 414
struct hns_roce_db_pgdir {
	struct list_head	list;
	DECLARE_BITMAP(order0, HNS_ROCE_DB_PER_PAGE);
415 416
	DECLARE_BITMAP(order1, HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT);
	unsigned long		*bits[HNS_ROCE_DB_TYPE_COUNT];
417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434
	u32			*page;
	dma_addr_t		db_dma;
};

struct hns_roce_user_db_page {
	struct list_head	list;
	struct ib_umem		*umem;
	unsigned long		user_virt;
	refcount_t		refcount;
};

struct hns_roce_db {
	u32		*db_record;
	union {
		struct hns_roce_db_pgdir *pgdir;
		struct hns_roce_user_db_page *user_page;
	} u;
	dma_addr_t	dma;
435
	void		*virt_addr;
436 437
	unsigned long	index;
	unsigned long	order;
438 439
};

440 441
struct hns_roce_cq {
	struct ib_cq			ib_cq;
442
	struct hns_roce_mtr		mtr;
443
	struct hns_roce_db		db;
444
	u32				flags;
445 446 447
	spinlock_t			lock;
	u32				cq_depth;
	u32				cons_index;
448
	u32				*set_ci_db;
449
	void __iomem			*cq_db_l;
450
	u16				*tptr_addr;
451
	int				arm_sn;
452
	int				cqe_size;
453 454 455 456
	unsigned long			cqn;
	u32				vector;
	atomic_t			refcount;
	struct completion		free;
457 458 459 460
	struct list_head		sq_list; /* all qps on this send cq */
	struct list_head		rq_list; /* all qps on this recv cq */
	int				is_armed; /* cq is armed */
	struct list_head		node; /* all armed cqs are on a list */
461 462
};

463
struct hns_roce_idx_que {
464
	struct hns_roce_mtr		mtr;
465
	int				entry_shift;
466
	unsigned long			*bitmap;
467 468
	u32				head;
	u32				tail;
469 470
};

471 472
struct hns_roce_srq {
	struct ib_srq		ibsrq;
473
	unsigned long		srqn;
474
	u32			wqe_cnt;
475
	int			max_gs;
476
	u32			rsv_sge;
477
	int			wqe_shift;
478
	u32			cqn;
479
	u32			xrcdn;
480 481 482 483 484
	void __iomem		*db_reg_l;

	atomic_t		refcount;
	struct completion	free;

485 486
	struct hns_roce_mtr	buf_mtr;

487 488 489 490
	u64		       *wrid;
	struct hns_roce_idx_que idx_que;
	spinlock_t		lock;
	struct mutex		mutex;
491
	void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event);
492 493 494 495 496 497
};

struct hns_roce_uar_table {
	struct hns_roce_bitmap bitmap;
};

498 499 500 501 502 503 504 505
struct hns_roce_bank {
	struct ida ida;
	u32 inuse; /* Number of IDs allocated */
	u32 min; /* Lowest ID to allocate.  */
	u32 max; /* Highest ID to allocate. */
	u32 next; /* Next ID to allocate. */
};

506 507 508
struct hns_roce_qp_table {
	struct hns_roce_hem_table	qp_table;
	struct hns_roce_hem_table	irrl_table;
509
	struct hns_roce_hem_table	trrl_table;
510
	struct hns_roce_hem_table	sccc_table;
511
	struct mutex			scc_mutex;
512
	struct hns_roce_bank bank[HNS_ROCE_QP_BANK_NUM];
513
	struct mutex bank_mutex;
514 515 516
};

struct hns_roce_cq_table {
517
	struct xarray			array;
518
	struct hns_roce_hem_table	table;
519 520
	struct hns_roce_bank bank[HNS_ROCE_CQ_BANK_NUM];
	struct mutex			bank_mutex;
521 522
};

L
Lijun Ou 已提交
523 524 525 526 527 528
struct hns_roce_srq_table {
	struct hns_roce_bitmap		bitmap;
	struct xarray			xa;
	struct hns_roce_hem_table	table;
};

529 530 531 532 533
struct hns_roce_raq_table {
	struct hns_roce_buf_list	*e_raq_buf;
};

struct hns_roce_av {
534 535 536 537 538 539 540 541 542 543 544
	u8 port;
	u8 gid_index;
	u8 stat_rate;
	u8 hop_limit;
	u32 flowlabel;
	u16 udp_sport;
	u8 sl;
	u8 tclass;
	u8 dgid[HNS_ROCE_GID_SIZE];
	u8 mac[ETH_ALEN];
	u16 vlan_id;
545
	u8 vlan_en;
546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565
};

struct hns_roce_ah {
	struct ib_ah		ibah;
	struct hns_roce_av	av;
};

struct hns_roce_cmd_context {
	struct completion	done;
	int			result;
	int			next;
	u64			out_param;
	u16			token;
};

struct hns_roce_cmdq {
	struct dma_pool		*pool;
	struct mutex		hcr_mutex;
	struct semaphore	poll_sem;
	/*
566 567 568
	 * Event mode: cmd register mutex protection,
	 * ensure to not exceed max_cmds and user use limit region
	 */
569 570 571 572 573 574
	struct semaphore	event_sem;
	int			max_cmds;
	spinlock_t		context_lock;
	int			free_head;
	struct hns_roce_cmd_context *context;
	/*
575 576 577
	 * Result of get integer part
	 * which max_comds compute according a power of 2
	 */
578 579
	u16			token_mask;
	/*
580 581 582 583 584
	 * Process whether use event mode, init default non-zero
	 * After the event queue of cmd event ready,
	 * can switch into event mode
	 * close device, switch into poll mode(non event mode)
	 */
585 586 587
	u8			use_events;
};

S
Shaobo Xu 已提交
588 589 590 591 592
struct hns_roce_cmd_mailbox {
	void		       *buf;
	dma_addr_t		dma;
};

593 594
struct hns_roce_dev;

595 596 597 598 599 600 601 602 603 604 605 606 607 608 609
struct hns_roce_rinl_sge {
	void			*addr;
	u32			len;
};

struct hns_roce_rinl_wqe {
	struct hns_roce_rinl_sge *sg_list;
	u32			 sge_cnt;
};

struct hns_roce_rinl_buf {
	struct hns_roce_rinl_wqe *wqe_list;
	u32			 wqe_cnt;
};

610 611 612 613
enum {
	HNS_ROCE_FLUSH_FLAG = 0,
};

614 615 616 617 618
struct hns_roce_work {
	struct hns_roce_dev *hr_dev;
	struct work_struct work;
	int event_type;
	int sub_type;
619
	u32 queue_num;
620 621
};

Y
Yixing Liu 已提交
622 623 624 625
enum {
	HNS_ROCE_QP_CAP_DIRECT_WQE = BIT(5),
};

626 627 628
struct hns_roce_qp {
	struct ib_qp		ibqp;
	struct hns_roce_wq	rq;
629
	struct hns_roce_db	rdb;
630
	struct hns_roce_db	sdb;
631
	unsigned long		en_flags;
632
	u32			doorbell_qpn;
633
	enum ib_sig_type	sq_signal_bits;
634 635
	struct hns_roce_wq	sq;

636 637
	struct hns_roce_mtr	mtr;

638 639 640
	u32			buff_size;
	struct mutex		mutex;
	u8			port;
641
	u8			phy_port;
642 643 644 645
	u8			sl;
	u8			resp_depth;
	u8			state;
	u32			access_flags;
646
	u32                     atomic_rd_en;
647
	u32			pkey_index;
648
	u32			qkey;
649 650
	void			(*event)(struct hns_roce_qp *qp,
					 enum hns_roce_event event_type);
651 652
	unsigned long		qpn;

653 654
	u32			xrcdn;

655 656
	atomic_t		refcount;
	struct completion	free;
657 658 659

	struct hns_roce_sge	sge;
	u32			next_sge;
660 661
	enum ib_mtu		path_mtu;
	u32			max_inline_data;
662

663 664
	/* 0: flush needed, 1: unneeded */
	unsigned long		flush_flag;
665
	struct hns_roce_work	flush_work;
666
	struct hns_roce_rinl_buf rq_inl_buf;
667 668 669
	struct list_head	node;		/* all qps are on a list */
	struct list_head	rq_node;	/* all recv qps are on a list */
	struct list_head	sq_node;	/* all send qps are on a list */
670 671 672 673 674 675 676 677 678
};

struct hns_roce_ib_iboe {
	spinlock_t		lock;
	struct net_device      *netdevs[HNS_ROCE_MAX_PORTS];
	struct notifier_block	nb;
	u8			phy_port[HNS_ROCE_MAX_PORTS];
};

Y
Yixian Liu 已提交
679 680 681 682 683 684
enum {
	HNS_ROCE_EQ_STAT_INVALID  = 0,
	HNS_ROCE_EQ_STAT_VALID    = 2,
};

struct hns_roce_ceqe {
685 686
	__le32	comp;
	__le32	rsv[15];
Y
Yixian Liu 已提交
687 688 689
};

struct hns_roce_aeqe {
690
	__le32 asyn;
Y
Yixian Liu 已提交
691 692
	union {
		struct {
693
			__le32 num;
Y
Yixian Liu 已提交
694 695
			u32 rsv0;
			u32 rsv1;
696
		} queue_event;
Y
Yixian Liu 已提交
697 698 699 700 701 702 703 704

		struct {
			__le64  out_param;
			__le16  token;
			u8	status;
			u8	rsv0;
		} __packed cmd;
	 } event;
705
	__le32 rsv[12];
Y
Yixian Liu 已提交
706 707
};

708 709 710 711
struct hns_roce_eq {
	struct hns_roce_dev		*hr_dev;
	void __iomem			*doorbell;

712
	int				type_flag; /* Aeq:1 ceq:0 */
713 714
	int				eqn;
	u32				entries;
715
	u32				log_entries;
716 717 718
	int				eqe_size;
	int				irq;
	int				log_page_size;
719
	u32				cons_index;
720
	struct hns_roce_buf_list	*buf_list;
Y
Yixian Liu 已提交
721 722 723 724
	int				over_ignore;
	int				coalesce;
	int				arm_st;
	int				hop_num;
725
	struct hns_roce_mtr		mtr;
726
	u16				eq_max_cnt;
727
	u32				eq_period;
Y
Yixian Liu 已提交
728
	int				shift;
729 730
	int				event_type;
	int				sub_type;
731 732 733 734
};

struct hns_roce_eq_table {
	struct hns_roce_eq	*eq;
Y
Yixian Liu 已提交
735
	void __iomem		**eqc_base; /* only for hw v1 */
736 737 738
};

struct hns_roce_caps {
739
	u64		fw_ver;
740 741 742 743 744 745
	u8		num_ports;
	int		gid_table_len[HNS_ROCE_MAX_PORTS];
	int		pkey_table_len[HNS_ROCE_MAX_PORTS];
	int		local_ca_ack_delay;
	int		num_uars;
	u32		phy_num_uars;
746 747 748
	u32		max_sq_sg;
	u32		max_sq_inline;
	u32		max_rq_sg;
749
	u32		max_extend_sg;
750 751
	u32		num_qps;
	u32		reserved_qps;
752 753
	int		num_qpc_timer;
	int		num_cqc_timer;
L
Lijun Ou 已提交
754
	int		num_srqs;
755
	u32		max_wqes;
L
Lijun Ou 已提交
756 757
	u32		max_srq_wrs;
	u32		max_srq_sges;
758 759
	u32		max_sq_desc_sz;
	u32		max_rq_desc_sz;
760
	u32		max_srq_desc_sz;
761 762
	int		max_qp_init_rdma;
	int		max_qp_dest_rdma;
763
	u32		num_cqs;
764 765
	u32		max_cqes;
	u32		min_cqes;
766
	u32		min_wqes;
767
	u32		reserved_cqs;
L
Lijun Ou 已提交
768
	int		reserved_srqs;
769
	int		num_aeq_vectors;
Y
Yixian Liu 已提交
770
	int		num_comp_vectors;
771
	int		num_other_vectors;
772
	u32		num_mtpts;
773
	u32		num_mtt_segs;
774
	u32		num_cqe_segs;
L
Lijun Ou 已提交
775 776
	u32		num_srqwqe_segs;
	u32		num_idx_segs;
777 778 779 780
	int		reserved_mrws;
	int		reserved_uars;
	int		num_pds;
	int		reserved_pds;
781 782
	u32		num_xrcds;
	u32		reserved_xrcds;
783
	u32		mtt_entry_sz;
784
	u32		cqe_sz;
785 786 787
	u32		page_size_cap;
	u32		reserved_lkey;
	int		mtpt_entry_sz;
788
	int		qpc_sz;
789
	int		irrl_entry_sz;
790
	int		trrl_entry_sz;
791
	int		cqc_entry_sz;
792
	int		sccc_sz;
793 794
	int		qpc_timer_entry_sz;
	int		cqc_timer_entry_sz;
L
Lijun Ou 已提交
795 796
	int		srqc_entry_sz;
	int		idx_entry_sz;
797 798 799
	u32		pbl_ba_pg_sz;
	u32		pbl_buf_pg_sz;
	u32		pbl_hop_num;
800
	int		aeqe_depth;
Y
Yixian Liu 已提交
801
	int		ceqe_depth;
802 803
	u32		aeqe_size;
	u32		ceqe_size;
804
	enum ib_mtu	max_mtu;
805
	u32		qpc_bt_num;
806
	u32		qpc_timer_bt_num;
807 808
	u32		srqc_bt_num;
	u32		cqc_bt_num;
809
	u32		cqc_timer_bt_num;
810
	u32		mpt_bt_num;
811
	u32		sccc_bt_num;
812
	u32		gmv_bt_num;
813 814 815 816 817 818 819 820 821 822 823 824
	u32		qpc_ba_pg_sz;
	u32		qpc_buf_pg_sz;
	u32		qpc_hop_num;
	u32		srqc_ba_pg_sz;
	u32		srqc_buf_pg_sz;
	u32		srqc_hop_num;
	u32		cqc_ba_pg_sz;
	u32		cqc_buf_pg_sz;
	u32		cqc_hop_num;
	u32		mpt_ba_pg_sz;
	u32		mpt_buf_pg_sz;
	u32		mpt_hop_num;
825 826 827
	u32		mtt_ba_pg_sz;
	u32		mtt_buf_pg_sz;
	u32		mtt_hop_num;
828 829 830
	u32		wqe_sq_hop_num;
	u32		wqe_sge_hop_num;
	u32		wqe_rq_hop_num;
831 832 833
	u32		sccc_ba_pg_sz;
	u32		sccc_buf_pg_sz;
	u32		sccc_hop_num;
834 835 836 837 838 839
	u32		qpc_timer_ba_pg_sz;
	u32		qpc_timer_buf_pg_sz;
	u32		qpc_timer_hop_num;
	u32		cqc_timer_ba_pg_sz;
	u32		cqc_timer_buf_pg_sz;
	u32		cqc_timer_hop_num;
L
Lang Cheng 已提交
840
	u32             cqe_ba_pg_sz;	/* page_size = 4K*(2^cqe_ba_pg_sz) */
841 842
	u32		cqe_buf_pg_sz;
	u32		cqe_hop_num;
843 844 845 846 847 848
	u32		srqwqe_ba_pg_sz;
	u32		srqwqe_buf_pg_sz;
	u32		srqwqe_hop_num;
	u32		idx_ba_pg_sz;
	u32		idx_buf_pg_sz;
	u32		idx_hop_num;
Y
Yixian Liu 已提交
849 850 851
	u32		eqe_ba_pg_sz;
	u32		eqe_buf_pg_sz;
	u32		eqe_hop_num;
852 853 854 855 856
	u32		gmv_entry_num;
	u32		gmv_entry_sz;
	u32		gmv_ba_pg_sz;
	u32		gmv_buf_pg_sz;
	u32		gmv_hop_num;
O
oulijun 已提交
857 858
	u32		sl_num;
	u32		tsq_buf_pg_sz;
O
oulijun 已提交
859
	u32		tpq_buf_pg_sz;
860
	u32		chunk_sz;	/* chunk size in non multihop mode */
861
	u64		flags;
862 863 864 865 866 867
	u16		default_ceq_max_cnt;
	u16		default_ceq_period;
	u16		default_aeq_max_cnt;
	u16		default_aeq_period;
	u16		default_aeq_arm_st;
	u16		default_ceq_arm_st;
868 869
};

870 871 872 873 874
struct hns_roce_dfx_hw {
	int (*query_cqc_info)(struct hns_roce_dev *hr_dev, u32 cqn,
			      int *buffer);
};

875 876 877 878 879 880
enum hns_roce_device_state {
	HNS_ROCE_DEVICE_STATE_INITED,
	HNS_ROCE_DEVICE_STATE_RST_DOWN,
	HNS_ROCE_DEVICE_STATE_UNINIT,
};

881 882
struct hns_roce_hw {
	int (*reset)(struct hns_roce_dev *hr_dev, bool enable);
883 884
	int (*cmq_init)(struct hns_roce_dev *hr_dev);
	void (*cmq_exit)(struct hns_roce_dev *hr_dev);
885
	int (*hw_profile)(struct hns_roce_dev *hr_dev);
886 887
	int (*hw_init)(struct hns_roce_dev *hr_dev);
	void (*hw_exit)(struct hns_roce_dev *hr_dev);
888 889 890
	int (*post_mbox)(struct hns_roce_dev *hr_dev, u64 in_param,
			 u64 out_param, u32 in_modifier, u8 op_modifier, u16 op,
			 u16 token, int event);
891
	int (*chk_mbox)(struct hns_roce_dev *hr_dev, unsigned int timeout);
892
	int (*rst_prc_mbox)(struct hns_roce_dev *hr_dev);
893
	int (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
894
		       const union ib_gid *gid, const struct ib_gid_attr *attr);
895
	int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
896 897
	void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port,
			enum ib_mtu mtu);
898 899
	int (*write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
			  struct hns_roce_mr *mr, unsigned long mtpt_idx);
900
	int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev,
901
				struct hns_roce_mr *mr, int flags,
902
				void *mb_buf);
903 904
	int (*frmr_write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
			       struct hns_roce_mr *mr);
Y
Yixian Liu 已提交
905
	int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw);
906 907
	void (*write_cqc)(struct hns_roce_dev *hr_dev,
			  struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
908
			  dma_addr_t dma_handle);
909 910
	int (*set_hem)(struct hns_roce_dev *hr_dev,
		       struct hns_roce_hem_table *table, int obj, int step_idx);
W
Wei Hu (Xavier) 已提交
911
	int (*clear_hem)(struct hns_roce_dev *hr_dev,
912 913
			 struct hns_roce_hem_table *table, int obj,
			 int step_idx);
914 915 916 917 918
	int (*query_qp)(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
			int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
	int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
			 int attr_mask, enum ib_qp_state cur_state,
			 enum ib_qp_state new_state);
919
	int (*destroy_qp)(struct ib_qp *ibqp, struct ib_udata *udata);
920 921
	int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev,
			 struct hns_roce_qp *hr_qp);
922 923 924 925
	int (*post_send)(struct ib_qp *ibqp, const struct ib_send_wr *wr,
			 const struct ib_send_wr **bad_wr);
	int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
			 const struct ib_recv_wr **bad_recv_wr);
926 927
	int (*req_notify_cq)(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
	int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
928 929
	int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
			struct ib_udata *udata);
L
Leon Romanovsky 已提交
930
	int (*destroy_cq)(struct ib_cq *ibcq, struct ib_udata *udata);
931
	int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
Y
Yixian Liu 已提交
932 933
	int (*init_eq)(struct hns_roce_dev *hr_dev);
	void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
934
	int (*write_srqc)(struct hns_roce_srq *srq, void *mb_buf);
935 936 937 938 939 940
	int (*modify_srq)(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr,
		       enum ib_srq_attr_mask srq_attr_mask,
		       struct ib_udata *udata);
	int (*query_srq)(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
	int (*post_srq_recv)(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
			     const struct ib_recv_wr **bad_wr);
941 942
	const struct ib_device_ops *hns_roce_dev_ops;
	const struct ib_device_ops *hns_roce_dev_srq_ops;
943 944 945 946 947
};

struct hns_roce_dev {
	struct ib_device	ib_dev;
	struct platform_device  *pdev;
948 949
	struct pci_dev		*pci_dev;
	struct device		*dev;
950
	struct hns_roce_uar     priv_uar;
951
	const char		*irq_names[HNS_ROCE_MAX_IRQ_NUM];
952 953
	spinlock_t		sm_lock;
	spinlock_t		bt_cmd_lock;
954 955
	bool			active;
	bool			is_reset;
956
	bool			dis_db;
957
	unsigned long		reset_cnt;
958
	struct hns_roce_ib_iboe iboe;
959 960 961
	enum hns_roce_device_state state;
	struct list_head	qp_list; /* list of all qps on this dev */
	spinlock_t		qp_list_lock; /* protect qp_list */
962

963 964
	struct list_head        pgdir_list;
	struct mutex            pgdir_mutex;
965 966
	int			irq[HNS_ROCE_MAX_IRQ_NUM];
	u8 __iomem		*reg_base;
Y
Yixing Liu 已提交
967
	void __iomem		*mem_base;
968
	struct hns_roce_caps	caps;
969
	struct xarray		qp_table_xa;
970

971
	unsigned char	dev_addr[HNS_ROCE_MAX_PORTS][ETH_ALEN];
972 973 974 975 976 977 978 979
	u64			sys_image_guid;
	u32                     vendor_id;
	u32                     vendor_part_id;
	u32                     hw_rev;
	void __iomem            *priv_addr;

	struct hns_roce_cmdq	cmd;
	struct hns_roce_bitmap    pd_bitmap;
980
	struct hns_roce_bitmap xrcd_bitmap;
981 982 983
	struct hns_roce_uar_table uar_table;
	struct hns_roce_mr_table  mr_table;
	struct hns_roce_cq_table  cq_table;
L
Lijun Ou 已提交
984
	struct hns_roce_srq_table srq_table;
985 986
	struct hns_roce_qp_table  qp_table;
	struct hns_roce_eq_table  eq_table;
987 988
	struct hns_roce_hem_table  qpc_timer_table;
	struct hns_roce_hem_table  cqc_timer_table;
989 990 991 992
	/* GMV is the memory area that the driver allocates for the hardware
	 * to store SGID, SMAC and VLAN information.
	 */
	struct hns_roce_hem_table  gmv_table;
993 994 995

	int			cmd_mod;
	int			loop_idc;
996 997
	u32			sdb_offset;
	u32			odb_offset;
998 999
	dma_addr_t		tptr_dma_addr;	/* only for hw v1 */
	u32			tptr_size;	/* only for hw v1 */
1000
	const struct hns_roce_hw *hw;
1001
	void			*priv;
1002
	struct workqueue_struct *irq_workq;
1003
	const struct hns_roce_dfx_hw *dfx;
1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021
};

static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
{
	return container_of(ib_dev, struct hns_roce_dev, ib_dev);
}

static inline struct hns_roce_ucontext
			*to_hr_ucontext(struct ib_ucontext *ibucontext)
{
	return container_of(ibucontext, struct hns_roce_ucontext, ibucontext);
}

static inline struct hns_roce_pd *to_hr_pd(struct ib_pd *ibpd)
{
	return container_of(ibpd, struct hns_roce_pd, ibpd);
}

1022 1023 1024 1025 1026
static inline struct hns_roce_xrcd *to_hr_xrcd(struct ib_xrcd *ibxrcd)
{
	return container_of(ibxrcd, struct hns_roce_xrcd, ibxrcd);
}

1027 1028 1029 1030 1031 1032 1033 1034 1035 1036
static inline struct hns_roce_ah *to_hr_ah(struct ib_ah *ibah)
{
	return container_of(ibah, struct hns_roce_ah, ibah);
}

static inline struct hns_roce_mr *to_hr_mr(struct ib_mr *ibmr)
{
	return container_of(ibmr, struct hns_roce_mr, ibmr);
}

Y
Yixian Liu 已提交
1037 1038 1039 1040 1041
static inline struct hns_roce_mw *to_hr_mw(struct ib_mw *ibmw)
{
	return container_of(ibmw, struct hns_roce_mw, ibmw);
}

1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
static inline struct hns_roce_qp *to_hr_qp(struct ib_qp *ibqp)
{
	return container_of(ibqp, struct hns_roce_qp, ibqp);
}

static inline struct hns_roce_cq *to_hr_cq(struct ib_cq *ib_cq)
{
	return container_of(ib_cq, struct hns_roce_cq, ib_cq);
}

static inline struct hns_roce_srq *to_hr_srq(struct ib_srq *ibsrq)
{
	return container_of(ibsrq, struct hns_roce_srq, ibsrq);
}

1057
static inline void hns_roce_write64_k(__le32 val[2], void __iomem *dest)
1058
{
1059
	writeq(*(u64 *)val, dest);
1060 1061 1062 1063 1064
}

static inline struct hns_roce_qp
	*__hns_roce_qp_lookup(struct hns_roce_dev *hr_dev, u32 qpn)
{
1065
	return xa_load(&hr_dev->qp_table_xa, qpn & (hr_dev->caps.num_qps - 1));
1066 1067
}

1068 1069
static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf,
					unsigned int offset)
1070
{
1071 1072
	return (char *)(buf->trunk_list[offset >> buf->trunk_shift].buf) +
			(offset & ((1 << buf->trunk_shift) - 1));
1073 1074
}

1075
static inline dma_addr_t hns_roce_buf_page(struct hns_roce_buf *buf, u32 idx)
1076
{
1077
	unsigned int offset = idx << buf->page_shift;
1078 1079 1080

	return buf->trunk_list[offset >> buf->trunk_shift].map +
			(offset & ((1 << buf->trunk_shift) - 1));
1081 1082
}

1083
#define hr_hw_page_align(x)		ALIGN(x, 1 << HNS_HW_PAGE_SHIFT)
1084

1085 1086
static inline u64 to_hr_hw_page_addr(u64 addr)
{
1087
	return addr >> HNS_HW_PAGE_SHIFT;
1088 1089 1090 1091
}

static inline u32 to_hr_hw_page_shift(u32 page_shift)
{
1092
	return page_shift - HNS_HW_PAGE_SHIFT;
1093 1094
}

1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114
static inline u32 to_hr_hem_hopnum(u32 hopnum, u32 count)
{
	if (count > 0)
		return hopnum == HNS_ROCE_HOP_NUM_0 ? 0 : hopnum;

	return 0;
}

static inline u32 to_hr_hem_entries_size(u32 count, u32 buf_shift)
{
	return hr_hw_page_align(count << buf_shift);
}

static inline u32 to_hr_hem_entries_count(u32 count, u32 buf_shift)
{
	return hr_hw_page_align(count << buf_shift) >> buf_shift;
}

static inline u32 to_hr_hem_entries_shift(u32 count, u32 buf_shift)
{
1115 1116 1117
	if (!count)
		return 0;

1118 1119 1120
	return ilog2(to_hr_hem_entries_count(count, buf_shift));
}

1121 1122 1123 1124 1125 1126 1127 1128
#define DSCP_SHIFT 2

static inline u8 get_tclass(const struct ib_global_route *grh)
{
	return grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP ?
	       grh->traffic_class >> DSCP_SHIFT : grh->traffic_class;
}

1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140
int hns_roce_init_uar_table(struct hns_roce_dev *dev);
int hns_roce_uar_alloc(struct hns_roce_dev *dev, struct hns_roce_uar *uar);
void hns_roce_uar_free(struct hns_roce_dev *dev, struct hns_roce_uar *uar);
void hns_roce_cleanup_uar_table(struct hns_roce_dev *dev);

int hns_roce_cmd_init(struct hns_roce_dev *hr_dev);
void hns_roce_cmd_cleanup(struct hns_roce_dev *hr_dev);
void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
			u64 out_param);
int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev);
void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev);

1141 1142 1143 1144
/* hns roce hw need current block and next block addr from mtt */
#define MTT_MIN_COUNT	 2
int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
		      int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr);
1145
int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
1146 1147 1148
			struct hns_roce_buf_attr *buf_attr,
			unsigned int page_shift, struct ib_udata *udata,
			unsigned long user_addr);
1149 1150 1151
void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev,
			  struct hns_roce_mtr *mtr);
int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
1152
		     dma_addr_t *pages, unsigned int page_cnt);
1153

1154 1155
int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
1156
void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
1157
int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
L
Lijun Ou 已提交
1158
int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev);
1159
int hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev);
1160 1161 1162 1163 1164 1165

void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);
L
Lijun Ou 已提交
1166
void hns_roce_cleanup_srq_table(struct hns_roce_dev *hr_dev);
1167
void hns_roce_cleanup_xrcd_table(struct hns_roce_dev *hr_dev);
1168 1169

int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj);
1170 1171
void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj,
			 int rr);
1172 1173 1174 1175 1176 1177 1178
int hns_roce_bitmap_init(struct hns_roce_bitmap *bitmap, u32 num, u32 mask,
			 u32 reserved_bot, u32 resetrved_top);
void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap);
void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev);
int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
				int align, unsigned long *obj);
void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
1179 1180
				unsigned long obj, int cnt,
				int rr);
1181

1182 1183
int hns_roce_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
		       struct ib_udata *udata);
1184
int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
1185 1186 1187 1188
static inline int hns_roce_destroy_ah(struct ib_ah *ah, u32 flags)
{
	return 0;
}
1189

1190
int hns_roce_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
1191
int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
1192 1193 1194 1195 1196

struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
				   u64 virt_addr, int access_flags,
				   struct ib_udata *udata);
1197 1198 1199 1200
struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start,
				     u64 length, u64 virt_addr,
				     int mr_access_flags, struct ib_pd *pd,
				     struct ib_udata *udata);
Y
Yixian Liu 已提交
1201
struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1202
				u32 max_num_sg);
Y
Yixian Liu 已提交
1203 1204
int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
		       unsigned int *sg_offset);
1205
int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
1206 1207 1208
int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev,
			    struct hns_roce_cmd_mailbox *mailbox,
			    unsigned long mpt_index);
S
Shaobo Xu 已提交
1209
unsigned long key_to_hw_index(u32 key);
1210

1211
int hns_roce_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
Y
Yixian Liu 已提交
1212 1213
int hns_roce_dealloc_mw(struct ib_mw *ibmw);

1214
void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf);
1215 1216
struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size,
					u32 page_shift, u32 flags);
1217

1218 1219 1220 1221
int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
			   int buf_cnt, int start, struct hns_roce_buf *buf);
int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
			   int buf_cnt, int start, struct ib_umem *umem,
1222
			   unsigned int page_shift);
1223

1224 1225 1226
int hns_roce_create_srq(struct ib_srq *srq,
			struct ib_srq_init_attr *srq_init_attr,
			struct ib_udata *udata);
1227 1228 1229
int hns_roce_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr,
			enum ib_srq_attr_mask srq_attr_mask,
			struct ib_udata *udata);
1230
int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
1231

1232 1233 1234
int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata);
int hns_roce_dealloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata);

1235 1236 1237 1238 1239
struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd,
				 struct ib_qp_init_attr *init_attr,
				 struct ib_udata *udata);
int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
		       int attr_mask, struct ib_udata *udata);
1240
void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
1241 1242 1243 1244
void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, unsigned int n);
void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, unsigned int n);
void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, unsigned int n);
bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq,
1245 1246 1247 1248 1249 1250 1251
			  struct ib_cq *ib_cq);
enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state);
void hns_roce_lock_cqs(struct hns_roce_cq *send_cq,
		       struct hns_roce_cq *recv_cq);
void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
			 struct hns_roce_cq *recv_cq);
void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
X
Xi Wang 已提交
1252 1253
void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
			 struct ib_udata *udata);
1254
__be32 send_ieth(const struct ib_send_wr *wr);
1255 1256
int to_hr_qp_type(int qp_type);

1257 1258
int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
		       struct ib_udata *udata);
1259

L
Leon Romanovsky 已提交
1260
int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
1261 1262
int hns_roce_db_map_user(struct hns_roce_ucontext *context,
			 struct ib_udata *udata, unsigned long virt,
1263 1264 1265
			 struct hns_roce_db *db);
void hns_roce_db_unmap_user(struct hns_roce_ucontext *context,
			    struct hns_roce_db *db);
1266 1267 1268 1269
int hns_roce_alloc_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db,
		      int order);
void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db);

1270 1271 1272
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
1273
void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type);
1274
u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index);
1275
void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev);
1276 1277
int hns_roce_init(struct hns_roce_dev *hr_dev);
void hns_roce_exit(struct hns_roce_dev *hr_dev);
1278 1279
int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
			       struct ib_cq *ib_cq);
1280
#endif /* _HNS_ROCE_DEVICE_H */