hns_roce_device.h 34.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
/*
 * Copyright (c) 2016 Hisilicon Limited.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#ifndef _HNS_ROCE_DEVICE_H
#define _HNS_ROCE_DEVICE_H

#include <rdma/ib_verbs.h>
37
#include <rdma/hns-abi.h>
J
Junxian Huang 已提交
38
#include "hns_roce_bond.h"
39

40
#define PCI_REVISION_ID_HIP08			0x21
41
#define PCI_REVISION_ID_HIP09			0x30
42

43 44 45 46
#define HNS_ROCE_MAX_MSG_LEN			0x80000000

#define HNS_ROCE_IB_MIN_SQ_STRIDE		6

47 48
#define BA_BYTE_LEN				8

49
#define HNS_ROCE_MIN_CQE_NUM			0x40
50
#define HNS_ROCE_MIN_SRQ_WQE_NUM		1
51

Y
Yixian Liu 已提交
52
#define HNS_ROCE_MAX_IRQ_NUM			128
53

54 55 56
#define HNS_ROCE_SGE_IN_WQE			2
#define HNS_ROCE_SGE_SHIFT			4

Y
Yixian Liu 已提交
57 58
#define EQ_ENABLE				1
#define EQ_DISABLE				0
59

Y
Yixian Liu 已提交
60 61 62
#define HNS_ROCE_CEQ				0
#define HNS_ROCE_AEQ				1

63 64 65 66
#define HNS_ROCE_CEQE_SIZE 0x4
#define HNS_ROCE_AEQE_SIZE 0x10

#define HNS_ROCE_V3_EQE_SIZE 0x40
67

68 69 70
#define HNS_ROCE_V2_CQE_SIZE 32
#define HNS_ROCE_V3_CQE_SIZE 64

71 72 73
#define HNS_ROCE_V2_QPC_SZ 256
#define HNS_ROCE_V3_QPC_SZ 512

74 75
#define HNS_ROCE_MAX_PORTS			6
#define HNS_ROCE_GID_SIZE			16
76
#define HNS_ROCE_SGE_SIZE			16
W
wangsirong 已提交
77
#define HNS_ROCE_DWQE_SIZE			65536
78

79 80
#define HNS_ROCE_HOP_NUM_0			0xff

81
#define MR_TYPE_MR				0x00
Y
Yixian Liu 已提交
82
#define MR_TYPE_FRMR				0x01
83 84
#define MR_TYPE_DMA				0x03

Y
Yixian Liu 已提交
85 86
#define HNS_ROCE_FRMR_MAX_PA			512

87 88
#define PKEY_ID					0xffff
#define NODE_DESC_SIZE				64
89
#define DB_REG_OFFSET				0x1000
90

91 92 93
/* Configure to HW for PAGE_SIZE larger than 4KB */
#define PG_SHIFT_OFFSET				(PAGE_SHIFT - 12)

94 95 96
#define HNS_ROCE_IDX_QUE_ENTRY_SZ		4
#define SRQ_DB_REG				0x230

97
#define HNS_ROCE_QP_BANK_NUM 8
98 99 100
#define HNS_ROCE_CQ_BANK_NUM 4

#define CQ_BANKID_SHIFT 2
101

L
Lijun Ou 已提交
102 103 104 105 106
enum {
	SERV_TYPE_RC,
	SERV_TYPE_UC,
	SERV_TYPE_RD,
	SERV_TYPE_UD,
107
	SERV_TYPE_XRC = 5,
L
Lijun Ou 已提交
108 109
};

110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
enum hns_roce_event {
	HNS_ROCE_EVENT_TYPE_PATH_MIG                  = 0x01,
	HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED           = 0x02,
	HNS_ROCE_EVENT_TYPE_COMM_EST                  = 0x03,
	HNS_ROCE_EVENT_TYPE_SQ_DRAINED                = 0x04,
	HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR            = 0x05,
	HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR    = 0x06,
	HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR     = 0x07,
	HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH           = 0x08,
	HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH        = 0x09,
	HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR           = 0x0a,
	HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR           = 0x0b,
	HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW               = 0x0c,
	HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID             = 0x0d,
	HNS_ROCE_EVENT_TYPE_PORT_CHANGE               = 0x0f,
	/* 0x10 and 0x11 is unused in currently application case */
	HNS_ROCE_EVENT_TYPE_DB_OVERFLOW               = 0x12,
	HNS_ROCE_EVENT_TYPE_MB                        = 0x13,
Y
Yixian Liu 已提交
128
	HNS_ROCE_EVENT_TYPE_FLR			      = 0x15,
129 130
	HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION	      = 0x16,
	HNS_ROCE_EVENT_TYPE_INVALID_XRCETH	      = 0x17,
131 132
};

133 134
enum {
	HNS_ROCE_CAP_FLAG_REREG_MR		= BIT(0),
135
	HNS_ROCE_CAP_FLAG_ROCE_V1_V2		= BIT(1),
136 137
	/* discard this bit, reserved for compatibility */
	HNS_ROCE_CAP_FLAG_DISCARD		= BIT(2),
138 139
	HNS_ROCE_CAP_FLAG_CQ_RECORD_DB		= BIT(3),
	HNS_ROCE_CAP_FLAG_QP_RECORD_DB		= BIT(4),
L
Lijun Ou 已提交
140
	HNS_ROCE_CAP_FLAG_SRQ			= BIT(5),
141
	HNS_ROCE_CAP_FLAG_XRC			= BIT(6),
Y
Yixian Liu 已提交
142
	HNS_ROCE_CAP_FLAG_MW			= BIT(7),
Y
Yixian Liu 已提交
143
	HNS_ROCE_CAP_FLAG_FRMR                  = BIT(8),
144
	HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL		= BIT(9),
L
Lijun Ou 已提交
145
	HNS_ROCE_CAP_FLAG_ATOMIC		= BIT(10),
146
	HNS_ROCE_CAP_FLAG_DIRECT_WQE		= BIT(12),
147
	HNS_ROCE_CAP_FLAG_SDI_MODE		= BIT(14),
148
	HNS_ROCE_CAP_FLAG_DCA_MODE		= BIT(15),
L
Lang Cheng 已提交
149
	HNS_ROCE_CAP_FLAG_STASH			= BIT(17),
150
	HNS_ROCE_CAP_FLAG_CQE_INLINE		= BIT(19),
151
	HNS_ROCE_CAP_FLAG_RQ_INLINE		= BIT(20),
J
Junxian Huang 已提交
152
	HNS_ROCE_CAP_FLAG_BOND			= BIT(21),
153 154
};

155 156 157
#define HNS_ROCE_DB_TYPE_COUNT			2
#define HNS_ROCE_DB_UNIT_SIZE			4

158 159 160 161
enum {
	HNS_ROCE_DB_PER_PAGE = PAGE_SIZE / 4
};

162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
enum hns_roce_reset_stage {
	HNS_ROCE_STATE_NON_RST,
	HNS_ROCE_STATE_RST_BEF_DOWN,
	HNS_ROCE_STATE_RST_DOWN,
	HNS_ROCE_STATE_RST_UNINIT,
	HNS_ROCE_STATE_RST_INIT,
	HNS_ROCE_STATE_RST_INITED,
};

enum hns_roce_instance_state {
	HNS_ROCE_STATE_NON_INIT,
	HNS_ROCE_STATE_INIT,
	HNS_ROCE_STATE_INITED,
	HNS_ROCE_STATE_UNINIT,
};

enum {
	HNS_ROCE_RST_DIRECT_RETURN		= 0,
};

182 183
#define HNS_ROCE_CMD_SUCCESS			1

184 185
#define HNS_ROCE_MAX_HOP_NUM			3

186 187 188
/* The minimum page size is 4K for hardware */
#define HNS_HW_PAGE_SHIFT			12
#define HNS_HW_PAGE_SIZE			(1 << HNS_HW_PAGE_SHIFT)
189 190 191 192

struct hns_roce_uar {
	u64		pfn;
	unsigned long	index;
193
	unsigned long	logic_idx;
194 195
};

196 197
enum hns_roce_mmap_type {
	HNS_ROCE_MMAP_TYPE_DB = 1,
198
	HNS_ROCE_MMAP_TYPE_DWQE,
199 200 201 202 203 204 205 206
};

struct hns_user_mmap_entry {
	struct rdma_user_mmap_entry rdma_entry;
	enum hns_roce_mmap_type mmap_type;
	u64 address;
};

207 208 209 210 211 212 213 214
struct hns_roce_dca_ctx {
	struct list_head pool; /* all DCA mems link to @pool */
	spinlock_t pool_lock; /* protect @pool */
	unsigned int free_mems; /* free mem num in pool */
	size_t free_size; /* free mem size in pool */
	size_t total_size; /* total size in pool */
};

215 216 217
struct hns_roce_ucontext {
	struct ib_ucontext	ibucontext;
	struct hns_roce_uar	uar;
218 219
	struct list_head	page_list;
	struct mutex		page_mutex;
220
	struct hns_user_mmap_entry *db_mmap_entry;
L
Luoyouming 已提交
221
	u32			config;
222
	struct hns_roce_dca_ctx	dca_ctx;
223 224 225 226 227 228 229
};

struct hns_roce_pd {
	struct ib_pd		ibpd;
	unsigned long		pdn;
};

230 231 232 233 234
struct hns_roce_xrcd {
	struct ib_xrcd ibxrcd;
	u32 xrcdn;
};

235 236 237 238 239 240 241 242 243 244 245
struct hns_roce_bitmap {
	/* Bitmap Traversal last a bit which is 1 */
	unsigned long		last;
	unsigned long		top;
	unsigned long		max;
	unsigned long		reserved_top;
	unsigned long		mask;
	spinlock_t		lock;
	unsigned long		*table;
};

246 247 248 249 250 251
struct hns_roce_ida {
	struct ida ida;
	u32 min; /* Lowest ID to allocate.  */
	u32 max; /* Highest ID to allocate. */
};

252 253 254 255 256 257
/* For Hardware Entry Memory */
struct hns_roce_hem_table {
	/* HEM type: 0 = qpc, 1 = mtt, 2 = cqc, 3 = srq, 4 = other */
	u32		type;
	/* HEM array elment num */
	unsigned long	num_hem;
258
	/* Single obj size */
259
	unsigned long	obj_size;
260
	unsigned long	table_chunk_size;
261 262 263
	int		lowmem;
	struct mutex	mutex;
	struct hns_roce_hem **hem;
264 265 266 267
	u64		**bt_l1;
	dma_addr_t	*bt_l1_dma_addr;
	u64		**bt_l0;
	dma_addr_t	*bt_l0_dma_addr;
268 269
};

270
struct hns_roce_buf_region {
271
	u32 offset; /* page offset */
272
	u32 count; /* page count */
273 274 275 276 277 278 279 280 281 282 283
	int hopnum; /* addressing hop num */
};

#define HNS_ROCE_MAX_BT_REGION	3
#define HNS_ROCE_MAX_BT_LEVEL	3
struct hns_roce_hem_list {
	struct list_head root_bt;
	/* link all bt dma mem by hop config */
	struct list_head mid_bt[HNS_ROCE_MAX_BT_REGION][HNS_ROCE_MAX_BT_LEVEL];
	struct list_head btm_bt; /* link all bottom bt in @mid_bt */
	dma_addr_t root_ba; /* pointer to the root ba table */
284 285 286 287 288 289 290
};

struct hns_roce_buf_attr {
	struct {
		size_t	size;  /* region size */
		int	hopnum; /* multi-hop addressing hop num */
	} region[HNS_ROCE_MAX_BT_REGION];
291
	unsigned int region_count; /* valid region count */
292
	unsigned int page_shift;  /* buffer page shift */
293
	unsigned int user_access; /* umem access flag */
294
	u64 iova;
295
	bool mtt_only; /* only alloc buffer-required MTT memory */
296
	bool adaptive; /* adaptive for page_shift and hopnum */
297 298
};

299 300 301 302 303 304 305
struct hns_roce_hem_cfg {
	dma_addr_t	root_ba; /* root BA table's address */
	bool		is_direct; /* addressing without BA table */
	unsigned int	ba_pg_shift; /* BA table page shift */
	unsigned int	buf_pg_shift; /* buffer page shift */
	unsigned int	buf_pg_count;  /* buffer page count */
	struct hns_roce_buf_region region[HNS_ROCE_MAX_BT_REGION];
306
	unsigned int	region_count;
307 308
};

309 310
/* memory translate region */
struct hns_roce_mtr {
311
	struct hns_roce_hem_list hem_list; /* multi-hop addressing resource */
312 313
	struct ib_umem		*umem; /* user space buffer */
	struct hns_roce_buf	*kmem; /* kernel space buffer */
314
	struct hns_roce_hem_cfg  hem_cfg; /* config for hardware addressing */
315 316
};

Y
Yixian Liu 已提交
317 318 319 320 321 322 323 324 325 326
struct hns_roce_mw {
	struct ib_mw		ibmw;
	u32			pdn;
	u32			rkey;
	int			enabled; /* MW's active status */
	u32			pbl_hop_num;
	u32			pbl_ba_pg_sz;
	u32			pbl_buf_pg_sz;
};

327 328
struct hns_roce_mr {
	struct ib_mr		ibmr;
329
	u64			iova; /* MR's virtual original addr */
330 331 332
	u64			size; /* Address range of MR */
	u32			key; /* Key of MR */
	u32			pd;   /* PD num of MR */
333
	u32			access; /* Access permission of MR */
334
	int			enabled; /* MR's active status */
335 336
	int			type; /* MR's register type */
	u32			pbl_hop_num; /* multi-hop number */
337 338 339
	struct hns_roce_mtr	pbl_mtr;
	u32			npages;
	dma_addr_t		*page_list;
340 341 342
};

struct hns_roce_mr_table {
343
	struct hns_roce_ida mtpt_ida;
344 345 346 347 348 349
	struct hns_roce_hem_table	mtpt_table;
};

struct hns_roce_wq {
	u64		*wrid;     /* Work request ID */
	spinlock_t	lock;
350
	u32		wqe_cnt;  /* WQE num */
351
	u32		max_gs;
352
	u32		rsv_sge;
353 354
	u32		offset;
	u32		wqe_shift; /* WQE size */
355 356
	u32		head;
	u32		tail;
357
	void __iomem	*db_reg;
L
Luoyouming 已提交
358
	u32		ext_sge_cnt;
359 360
};

361
struct hns_roce_sge {
362
	unsigned int	sge_cnt; /* SGE num */
363 364
	u32		offset;
	u32		sge_shift; /* SGE size */
365 366
};

367 368 369 370 371
struct hns_roce_buf_list {
	void		*buf;
	dma_addr_t	map;
};

372 373 374 375 376 377 378 379 380 381 382 383 384 385 386
/*
 * %HNS_ROCE_BUF_DIRECT indicates that the all memory must be in a continuous
 * dma address range.
 *
 * %HNS_ROCE_BUF_NOSLEEP indicates that the caller cannot sleep.
 *
 * %HNS_ROCE_BUF_NOFAIL allocation only failed when allocated size is zero, even
 * the allocated size is smaller than the required size.
 */
enum {
	HNS_ROCE_BUF_DIRECT = BIT(0),
	HNS_ROCE_BUF_NOSLEEP = BIT(1),
	HNS_ROCE_BUF_NOFAIL = BIT(2),
};

387
struct hns_roce_buf {
388 389
	struct hns_roce_buf_list	*trunk_list;
	u32				ntrunks;
390
	u32				npages;
391
	unsigned int			trunk_shift;
392
	unsigned int			page_shift;
393 394
};

395 396 397
struct hns_roce_db_pgdir {
	struct list_head	list;
	DECLARE_BITMAP(order0, HNS_ROCE_DB_PER_PAGE);
398 399
	DECLARE_BITMAP(order1, HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT);
	unsigned long		*bits[HNS_ROCE_DB_TYPE_COUNT];
400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
	u32			*page;
	dma_addr_t		db_dma;
};

struct hns_roce_user_db_page {
	struct list_head	list;
	struct ib_umem		*umem;
	unsigned long		user_virt;
	refcount_t		refcount;
};

struct hns_roce_db {
	u32		*db_record;
	union {
		struct hns_roce_db_pgdir *pgdir;
		struct hns_roce_user_db_page *user_page;
	} u;
	dma_addr_t	dma;
418
	void		*virt_addr;
419 420
	unsigned long	index;
	unsigned long	order;
421 422
};

423 424
struct hns_roce_cq {
	struct ib_cq			ib_cq;
425
	struct hns_roce_mtr		mtr;
426
	struct hns_roce_db		db;
427
	u32				flags;
428 429 430
	spinlock_t			lock;
	u32				cq_depth;
	u32				cons_index;
431
	u32				*set_ci_db;
432
	void __iomem			*db_reg;
433
	int				arm_sn;
434
	int				cqe_size;
435 436
	unsigned long			cqn;
	u32				vector;
437
	refcount_t			refcount;
438
	struct completion		free;
439 440 441 442
	struct list_head		sq_list; /* all qps on this send cq */
	struct list_head		rq_list; /* all qps on this recv cq */
	int				is_armed; /* cq is armed */
	struct list_head		node; /* all armed cqs are on a list */
443 444
};

445
struct hns_roce_idx_que {
446
	struct hns_roce_mtr		mtr;
447
	u32				entry_shift;
448
	unsigned long			*bitmap;
449 450
	u32				head;
	u32				tail;
451 452
};

453 454
struct hns_roce_srq {
	struct ib_srq		ibsrq;
455
	unsigned long		srqn;
456
	u32			wqe_cnt;
457
	int			max_gs;
458
	u32			rsv_sge;
459
	u32			wqe_shift;
460
	u32			cqn;
461
	u32			xrcdn;
462
	void __iomem		*db_reg;
463

464
	refcount_t		refcount;
465 466
	struct completion	free;

467 468
	struct hns_roce_mtr	buf_mtr;

469 470 471 472
	u64		       *wrid;
	struct hns_roce_idx_que idx_que;
	spinlock_t		lock;
	struct mutex		mutex;
473
	void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event);
474 475 476 477 478 479
};

struct hns_roce_uar_table {
	struct hns_roce_bitmap bitmap;
};

480 481 482 483 484 485 486 487
struct hns_roce_bank {
	struct ida ida;
	u32 inuse; /* Number of IDs allocated */
	u32 min; /* Lowest ID to allocate.  */
	u32 max; /* Highest ID to allocate. */
	u32 next; /* Next ID to allocate. */
};

488 489 490 491 492 493
struct hns_roce_idx_table {
	u32 *spare_idx;
	u32 head;
	u32 tail;
};

494 495 496
struct hns_roce_qp_table {
	struct hns_roce_hem_table	qp_table;
	struct hns_roce_hem_table	irrl_table;
497
	struct hns_roce_hem_table	trrl_table;
498
	struct hns_roce_hem_table	sccc_table;
499
	struct mutex			scc_mutex;
500
	struct hns_roce_bank bank[HNS_ROCE_QP_BANK_NUM];
501
	struct mutex bank_mutex;
502
	struct hns_roce_idx_table	idx_table;
503 504 505
};

struct hns_roce_cq_table {
506
	struct xarray			array;
507
	struct hns_roce_hem_table	table;
508 509
	struct hns_roce_bank bank[HNS_ROCE_CQ_BANK_NUM];
	struct mutex			bank_mutex;
510 511
};

L
Lijun Ou 已提交
512
struct hns_roce_srq_table {
513
	struct hns_roce_ida		srq_ida;
L
Lijun Ou 已提交
514 515 516 517
	struct xarray			xa;
	struct hns_roce_hem_table	table;
};

518
struct hns_roce_av {
519 520 521 522 523 524 525 526 527 528 529
	u8 port;
	u8 gid_index;
	u8 stat_rate;
	u8 hop_limit;
	u32 flowlabel;
	u16 udp_sport;
	u8 sl;
	u8 tclass;
	u8 dgid[HNS_ROCE_GID_SIZE];
	u8 mac[ETH_ALEN];
	u16 vlan_id;
530
	u8 vlan_en;
531 532 533 534 535 536 537 538 539 540 541 542 543
};

struct hns_roce_ah {
	struct ib_ah		ibah;
	struct hns_roce_av	av;
};

struct hns_roce_cmd_context {
	struct completion	done;
	int			result;
	int			next;
	u64			out_param;
	u16			token;
L
Lang Cheng 已提交
544
	u16			busy;
545 546
};

547 548 549 550 551
enum hns_roce_cmdq_state {
	HNS_ROCE_CMDQ_STATE_NORMAL,
	HNS_ROCE_CMDQ_STATE_FATAL_ERR,
};

552 553 554 555
struct hns_roce_cmdq {
	struct dma_pool		*pool;
	struct semaphore	poll_sem;
	/*
556 557 558
	 * Event mode: cmd register mutex protection,
	 * ensure to not exceed max_cmds and user use limit region
	 */
559 560 561 562 563 564
	struct semaphore	event_sem;
	int			max_cmds;
	spinlock_t		context_lock;
	int			free_head;
	struct hns_roce_cmd_context *context;
	/*
565 566 567 568 569
	 * Process whether use event mode, init default non-zero
	 * After the event queue of cmd event ready,
	 * can switch into event mode
	 * close device, switch into poll mode(non event mode)
	 */
570
	u8			use_events;
571
	enum hns_roce_cmdq_state state;
572 573
};

S
Shaobo Xu 已提交
574 575 576 577 578
struct hns_roce_cmd_mailbox {
	void		       *buf;
	dma_addr_t		dma;
};

579 580 581 582 583 584 585 586 587
struct hns_roce_mbox_msg {
	u64 in_param;
	u64 out_param;
	u8 cmd;
	u32 tag;
	u16 token;
	u8 event_en;
};

588 589
struct hns_roce_dev;

590 591 592 593
enum {
	HNS_ROCE_FLUSH_FLAG = 0,
};

594 595 596 597 598
struct hns_roce_work {
	struct hns_roce_dev *hr_dev;
	struct work_struct work;
	int event_type;
	int sub_type;
599
	u32 queue_num;
600 601
};

602 603 604
struct hns_roce_qp {
	struct ib_qp		ibqp;
	struct hns_roce_wq	rq;
605
	struct hns_roce_db	rdb;
606
	struct hns_roce_db	sdb;
607
	unsigned long		en_flags;
608
	u32			doorbell_qpn;
609
	enum ib_sig_type	sq_signal_bits;
610 611
	struct hns_roce_wq	sq;

612 613
	struct hns_roce_mtr	mtr;

614 615 616
	u32			buff_size;
	struct mutex		mutex;
	u8			port;
617
	u8			phy_port;
618 619 620
	u8			sl;
	u8			resp_depth;
	u8			state;
621
	u32                     atomic_rd_en;
622
	u32			qkey;
623 624
	void			(*event)(struct hns_roce_qp *qp,
					 enum hns_roce_event event_type);
625 626
	unsigned long		qpn;

627 628
	u32			xrcdn;

629
	refcount_t		refcount;
630
	struct completion	free;
631 632 633

	struct hns_roce_sge	sge;
	u32			next_sge;
634 635
	enum ib_mtu		path_mtu;
	u32			max_inline_data;
636
	u8			free_mr_en;
637

638 639
	/* 0: flush needed, 1: unneeded */
	unsigned long		flush_flag;
640
	struct hns_roce_work	flush_work;
641 642 643
	struct list_head	node; /* all qps are on a list */
	struct list_head	rq_node; /* all recv qps are on a list */
	struct list_head	sq_node; /* all send qps are on a list */
644
	struct hns_user_mmap_entry *dwqe_mmap_entry;
L
Luoyouming 已提交
645
	u32			config;
Y
Yixing Liu 已提交
646 647
	u8			tc_mode;
	u8			priority;
648 649 650 651 652 653 654
};

struct hns_roce_ib_iboe {
	spinlock_t		lock;
	struct net_device      *netdevs[HNS_ROCE_MAX_PORTS];
	struct notifier_block	nb;
	u8			phy_port[HNS_ROCE_MAX_PORTS];
655
	enum ib_port_state	port_state[HNS_ROCE_MAX_PORTS];
656 657
};

Y
Yixian Liu 已提交
658
struct hns_roce_ceqe {
659 660
	__le32	comp;
	__le32	rsv[15];
Y
Yixian Liu 已提交
661 662
};

663 664 665 666 667
#define CEQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_ceqe, h, l)

#define CEQE_CQN CEQE_FIELD_LOC(23, 0)
#define CEQE_OWNER CEQE_FIELD_LOC(31, 31)

Y
Yixian Liu 已提交
668
struct hns_roce_aeqe {
669
	__le32 asyn;
Y
Yixian Liu 已提交
670 671
	union {
		struct {
672
			__le32 num;
Y
Yixian Liu 已提交
673 674
			u32 rsv0;
			u32 rsv1;
675
		} queue_event;
Y
Yixian Liu 已提交
676 677 678 679 680 681 682 683

		struct {
			__le64  out_param;
			__le16  token;
			u8	status;
			u8	rsv0;
		} __packed cmd;
	 } event;
684
	__le32 rsv[12];
Y
Yixian Liu 已提交
685 686
};

687 688 689 690 691 692 693
#define AEQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_aeqe, h, l)

#define AEQE_EVENT_TYPE AEQE_FIELD_LOC(7, 0)
#define AEQE_SUB_TYPE AEQE_FIELD_LOC(15, 8)
#define AEQE_OWNER AEQE_FIELD_LOC(31, 31)
#define AEQE_EVENT_QUEUE_NUM AEQE_FIELD_LOC(55, 32)

694 695
struct hns_roce_eq {
	struct hns_roce_dev		*hr_dev;
696
	void __iomem			*db_reg;
697

698
	int				type_flag; /* Aeq:1 ceq:0 */
699 700 701 702
	int				eqn;
	u32				entries;
	int				eqe_size;
	int				irq;
703
	u32				cons_index;
Y
Yixian Liu 已提交
704 705 706 707
	int				over_ignore;
	int				coalesce;
	int				arm_st;
	int				hop_num;
708
	struct hns_roce_mtr		mtr;
709
	u16				eq_max_cnt;
710
	u32				eq_period;
Y
Yixian Liu 已提交
711
	int				shift;
712 713
	int				event_type;
	int				sub_type;
714 715 716 717 718 719
};

struct hns_roce_eq_table {
	struct hns_roce_eq	*eq;
};

720 721 722 723 724 725 726
enum cong_type {
	CONG_TYPE_DCQCN,
	CONG_TYPE_LDCP,
	CONG_TYPE_HC3,
	CONG_TYPE_DIP,
};

727
struct hns_roce_caps {
728
	u64		fw_ver;
729 730 731 732 733 734
	u8		num_ports;
	int		gid_table_len[HNS_ROCE_MAX_PORTS];
	int		pkey_table_len[HNS_ROCE_MAX_PORTS];
	int		local_ca_ack_delay;
	int		num_uars;
	u32		phy_num_uars;
735 736 737
	u32		max_sq_sg;
	u32		max_sq_inline;
	u32		max_rq_sg;
738
	u32		max_extend_sg;
739
	u32		num_qps;
740
	u32		num_pi_qps;
741
	u32		reserved_qps;
742
	int		num_qpc_timer;
743
	u32		num_srqs;
744
	u32		max_wqes;
L
Lijun Ou 已提交
745 746
	u32		max_srq_wrs;
	u32		max_srq_sges;
747 748
	u32		max_sq_desc_sz;
	u32		max_rq_desc_sz;
749
	u32		max_srq_desc_sz;
750 751
	int		max_qp_init_rdma;
	int		max_qp_dest_rdma;
752
	u32		num_cqs;
753 754
	u32		max_cqes;
	u32		min_cqes;
755
	u32		min_wqes;
756
	u32		reserved_cqs;
757
	u32		reserved_srqs;
758
	int		num_aeq_vectors;
Y
Yixian Liu 已提交
759
	int		num_comp_vectors;
760
	int		num_other_vectors;
761
	u32		num_mtpts;
762
	u32		num_mtt_segs;
L
Lijun Ou 已提交
763 764
	u32		num_srqwqe_segs;
	u32		num_idx_segs;
765 766 767 768
	int		reserved_mrws;
	int		reserved_uars;
	int		num_pds;
	int		reserved_pds;
769 770
	u32		num_xrcds;
	u32		reserved_xrcds;
771
	u32		mtt_entry_sz;
772
	u32		cqe_sz;
773 774 775
	u32		page_size_cap;
	u32		reserved_lkey;
	int		mtpt_entry_sz;
776
	int		qpc_sz;
777
	int		irrl_entry_sz;
778
	int		trrl_entry_sz;
779
	int		cqc_entry_sz;
780
	int		sccc_sz;
781 782
	int		qpc_timer_entry_sz;
	int		cqc_timer_entry_sz;
L
Lijun Ou 已提交
783 784
	int		srqc_entry_sz;
	int		idx_entry_sz;
785 786 787
	u32		pbl_ba_pg_sz;
	u32		pbl_buf_pg_sz;
	u32		pbl_hop_num;
788
	int		aeqe_depth;
Y
Yixian Liu 已提交
789
	int		ceqe_depth;
790 791
	u32		aeqe_size;
	u32		ceqe_size;
792
	enum ib_mtu	max_mtu;
793
	u32		qpc_bt_num;
794
	u32		qpc_timer_bt_num;
795 796
	u32		srqc_bt_num;
	u32		cqc_bt_num;
797
	u32		cqc_timer_bt_num;
798
	u32		mpt_bt_num;
799 800 801
	u32		eqc_bt_num;
	u32		smac_bt_num;
	u32		sgid_bt_num;
802
	u32		sccc_bt_num;
803
	u32		gmv_bt_num;
804 805 806 807 808 809 810 811 812 813 814 815
	u32		qpc_ba_pg_sz;
	u32		qpc_buf_pg_sz;
	u32		qpc_hop_num;
	u32		srqc_ba_pg_sz;
	u32		srqc_buf_pg_sz;
	u32		srqc_hop_num;
	u32		cqc_ba_pg_sz;
	u32		cqc_buf_pg_sz;
	u32		cqc_hop_num;
	u32		mpt_ba_pg_sz;
	u32		mpt_buf_pg_sz;
	u32		mpt_hop_num;
816 817 818
	u32		mtt_ba_pg_sz;
	u32		mtt_buf_pg_sz;
	u32		mtt_hop_num;
819 820 821
	u32		wqe_sq_hop_num;
	u32		wqe_sge_hop_num;
	u32		wqe_rq_hop_num;
822 823 824
	u32		sccc_ba_pg_sz;
	u32		sccc_buf_pg_sz;
	u32		sccc_hop_num;
825 826 827 828 829 830
	u32		qpc_timer_ba_pg_sz;
	u32		qpc_timer_buf_pg_sz;
	u32		qpc_timer_hop_num;
	u32		cqc_timer_ba_pg_sz;
	u32		cqc_timer_buf_pg_sz;
	u32		cqc_timer_hop_num;
831
	u32		cqe_ba_pg_sz; /* page_size = 4K*(2^cqe_ba_pg_sz) */
832 833
	u32		cqe_buf_pg_sz;
	u32		cqe_hop_num;
834 835 836 837 838 839
	u32		srqwqe_ba_pg_sz;
	u32		srqwqe_buf_pg_sz;
	u32		srqwqe_hop_num;
	u32		idx_ba_pg_sz;
	u32		idx_buf_pg_sz;
	u32		idx_hop_num;
Y
Yixian Liu 已提交
840 841 842
	u32		eqe_ba_pg_sz;
	u32		eqe_buf_pg_sz;
	u32		eqe_hop_num;
843 844 845 846 847
	u32		gmv_entry_num;
	u32		gmv_entry_sz;
	u32		gmv_ba_pg_sz;
	u32		gmv_buf_pg_sz;
	u32		gmv_hop_num;
O
oulijun 已提交
848
	u32		sl_num;
849
	u32		llm_buf_pg_sz;
850
	u32		chunk_sz; /* chunk size in non multihop mode */
851
	u64		flags;
852 853 854 855 856 857
	u16		default_ceq_max_cnt;
	u16		default_ceq_period;
	u16		default_aeq_max_cnt;
	u16		default_aeq_period;
	u16		default_aeq_arm_st;
	u16		default_ceq_arm_st;
858
	enum cong_type	cong_type;
859 860
};

861 862 863 864 865 866
enum hns_roce_device_state {
	HNS_ROCE_DEVICE_STATE_INITED,
	HNS_ROCE_DEVICE_STATE_RST_DOWN,
	HNS_ROCE_DEVICE_STATE_UNINIT,
};

867
struct hns_roce_hw {
868 869
	int (*cmq_init)(struct hns_roce_dev *hr_dev);
	void (*cmq_exit)(struct hns_roce_dev *hr_dev);
870
	int (*hw_profile)(struct hns_roce_dev *hr_dev);
871 872
	int (*hw_init)(struct hns_roce_dev *hr_dev);
	void (*hw_exit)(struct hns_roce_dev *hr_dev);
873 874
	int (*post_mbox)(struct hns_roce_dev *hr_dev,
			 struct hns_roce_mbox_msg *mbox_msg);
875
	int (*poll_mbox_done)(struct hns_roce_dev *hr_dev);
876
	bool (*chk_mbox_avail)(struct hns_roce_dev *hr_dev, bool *is_busy);
877
	int (*set_gid)(struct hns_roce_dev *hr_dev, int gid_index,
878
		       const union ib_gid *gid, const struct ib_gid_attr *attr);
879 880
	int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port,
		       const u8 *addr);
881
	int (*write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
882
			  struct hns_roce_mr *mr);
883
	int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev,
884
				struct hns_roce_mr *mr, int flags,
885
				void *mb_buf);
886 887
	int (*frmr_write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
			       struct hns_roce_mr *mr);
Y
Yixian Liu 已提交
888
	int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw);
889 890
	void (*write_cqc)(struct hns_roce_dev *hr_dev,
			  struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
891
			  dma_addr_t dma_handle);
892
	int (*set_hem)(struct hns_roce_dev *hr_dev,
893
		       struct hns_roce_hem_table *table, int obj, u32 step_idx);
W
Wei Hu (Xavier) 已提交
894
	int (*clear_hem)(struct hns_roce_dev *hr_dev,
895
			 struct hns_roce_hem_table *table, int obj,
896
			 u32 step_idx);
897 898
	int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
			 int attr_mask, enum ib_qp_state cur_state,
899
			 enum ib_qp_state new_state, struct ib_udata *udata);
900 901
	int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev,
			 struct hns_roce_qp *hr_qp);
902
	void (*dereg_mr)(struct hns_roce_dev *hr_dev);
Y
Yixian Liu 已提交
903 904
	int (*init_eq)(struct hns_roce_dev *hr_dev);
	void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
905
	int (*write_srqc)(struct hns_roce_srq *srq, void *mb_buf);
906
	int (*query_cqc)(struct hns_roce_dev *hr_dev, u32 cqn, void *buffer);
907
	int (*query_qpc)(struct hns_roce_dev *hr_dev, u32 qpn, void *buffer);
908
	int (*query_mpt)(struct hns_roce_dev *hr_dev, u32 key, void *buffer);
Y
Yixing Liu 已提交
909 910
	int (*get_dscp)(struct hns_roce_dev *hr_dev, u8 dscp,
			u8 *tc_mode, u8 *priority);
911 912
	const struct ib_device_ops *hns_roce_dev_ops;
	const struct ib_device_ops *hns_roce_dev_srq_ops;
J
Junxian Huang 已提交
913 914 915
	int (*bond_init)(struct hns_roce_dev *hr_dev);
	bool (*bond_is_active)(struct hns_roce_dev *hr_dev);
	struct net_device *(*get_bond_netdev)(struct hns_roce_dev *hr_dev);
916 917 918 919
};

struct hns_roce_dev {
	struct ib_device	ib_dev;
920 921
	struct pci_dev		*pci_dev;
	struct device		*dev;
922
	struct hns_roce_uar     priv_uar;
923
	const char		*irq_names[HNS_ROCE_MAX_IRQ_NUM];
924
	spinlock_t		sm_lock;
925 926
	bool			active;
	bool			is_reset;
927
	bool			dis_db;
928
	unsigned long		reset_cnt;
929
	struct hns_roce_ib_iboe iboe;
930 931 932
	enum hns_roce_device_state state;
	struct list_head	qp_list; /* list of all qps on this dev */
	spinlock_t		qp_list_lock; /* protect qp_list */
933 934
	struct list_head	dip_list; /* list of all dest ips on this dev */
	spinlock_t		dip_list_lock; /* protect dip_list */
935

936 937
	struct list_head        pgdir_list;
	struct mutex            pgdir_mutex;
938 939
	int			irq[HNS_ROCE_MAX_IRQ_NUM];
	u8 __iomem		*reg_base;
W
wangsirong 已提交
940
	void __iomem		*mem_base;
941
	struct hns_roce_caps	caps;
942
	struct xarray		qp_table_xa;
943

944
	unsigned char	dev_addr[HNS_ROCE_MAX_PORTS][ETH_ALEN];
945 946 947 948 949 950 951
	u64			sys_image_guid;
	u32                     vendor_id;
	u32                     vendor_part_id;
	u32                     hw_rev;
	void __iomem            *priv_addr;

	struct hns_roce_cmdq	cmd;
952
	struct hns_roce_ida pd_ida;
953
	struct hns_roce_ida xrcd_ida;
954
	struct hns_roce_ida uar_ida;
955 956
	struct hns_roce_mr_table  mr_table;
	struct hns_roce_cq_table  cq_table;
L
Lijun Ou 已提交
957
	struct hns_roce_srq_table srq_table;
958 959
	struct hns_roce_qp_table  qp_table;
	struct hns_roce_eq_table  eq_table;
960 961
	struct hns_roce_hem_table  qpc_timer_table;
	struct hns_roce_hem_table  cqc_timer_table;
962 963 964 965
	/* GMV is the memory area that the driver allocates for the hardware
	 * to store SGID, SMAC and VLAN information.
	 */
	struct hns_roce_hem_table  gmv_table;
966 967

	int			cmd_mod;
968
	u8			mac_type;
969
	int			loop_idc;
970 971
	u32			sdb_offset;
	u32			odb_offset;
972
	const struct hns_roce_hw *hw;
973
	void			*priv;
974
	struct workqueue_struct *irq_workq;
975
	struct work_struct ecc_work;
976
	u32 func_num;
977
	u32 is_vf;
978
	u32 cong_algo_tmpl_id;
979
	u64 dwqe_page;
J
Junxian Huang 已提交
980 981 982 983 984

	struct notifier_block bond_nb;
	struct delayed_work bond_work;
	struct hns_roce_bond_group *bond_grp;
	struct netdev_lag_lower_state_info slave_state;
985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002
};

static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
{
	return container_of(ib_dev, struct hns_roce_dev, ib_dev);
}

static inline struct hns_roce_ucontext
			*to_hr_ucontext(struct ib_ucontext *ibucontext)
{
	return container_of(ibucontext, struct hns_roce_ucontext, ibucontext);
}

static inline struct hns_roce_pd *to_hr_pd(struct ib_pd *ibpd)
{
	return container_of(ibpd, struct hns_roce_pd, ibpd);
}

1003 1004 1005 1006 1007
static inline struct hns_roce_xrcd *to_hr_xrcd(struct ib_xrcd *ibxrcd)
{
	return container_of(ibxrcd, struct hns_roce_xrcd, ibxrcd);
}

1008 1009 1010 1011 1012 1013 1014 1015 1016 1017
static inline struct hns_roce_ah *to_hr_ah(struct ib_ah *ibah)
{
	return container_of(ibah, struct hns_roce_ah, ibah);
}

static inline struct hns_roce_mr *to_hr_mr(struct ib_mr *ibmr)
{
	return container_of(ibmr, struct hns_roce_mr, ibmr);
}

Y
Yixian Liu 已提交
1018 1019 1020 1021 1022
static inline struct hns_roce_mw *to_hr_mw(struct ib_mw *ibmw)
{
	return container_of(ibmw, struct hns_roce_mw, ibmw);
}

1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037
static inline struct hns_roce_qp *to_hr_qp(struct ib_qp *ibqp)
{
	return container_of(ibqp, struct hns_roce_qp, ibqp);
}

static inline struct hns_roce_cq *to_hr_cq(struct ib_cq *ib_cq)
{
	return container_of(ib_cq, struct hns_roce_cq, ib_cq);
}

static inline struct hns_roce_srq *to_hr_srq(struct ib_srq *ibsrq)
{
	return container_of(ibsrq, struct hns_roce_srq, ibsrq);
}

1038 1039 1040 1041 1042 1043
static inline struct hns_user_mmap_entry *
to_hns_mmap(struct rdma_user_mmap_entry *rdma_entry)
{
	return container_of(rdma_entry, struct hns_user_mmap_entry, rdma_entry);
}

1044
static inline void hns_roce_write64_k(__le32 val[2], void __iomem *dest)
1045
{
1046
	writeq(*(u64 *)val, dest);
1047 1048 1049 1050 1051
}

static inline struct hns_roce_qp
	*__hns_roce_qp_lookup(struct hns_roce_dev *hr_dev, u32 qpn)
{
1052
	return xa_load(&hr_dev->qp_table_xa, qpn);
1053 1054
}

1055 1056
static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf,
					unsigned int offset)
1057
{
1058 1059
	return (char *)(buf->trunk_list[offset >> buf->trunk_shift].buf) +
			(offset & ((1 << buf->trunk_shift) - 1));
1060 1061
}

1062 1063
static inline dma_addr_t hns_roce_buf_dma_addr(struct hns_roce_buf *buf,
					       unsigned int offset)
1064
{
1065 1066
	return buf->trunk_list[offset >> buf->trunk_shift].map +
			(offset & ((1 << buf->trunk_shift) - 1));
1067 1068
}

1069 1070 1071 1072 1073
static inline dma_addr_t hns_roce_buf_page(struct hns_roce_buf *buf, u32 idx)
{
	return hns_roce_buf_dma_addr(buf, idx << buf->page_shift);
}

1074
#define hr_hw_page_align(x)		ALIGN(x, 1 << HNS_HW_PAGE_SHIFT)
1075

1076 1077
static inline u64 to_hr_hw_page_addr(u64 addr)
{
1078
	return addr >> HNS_HW_PAGE_SHIFT;
1079 1080 1081 1082
}

static inline u32 to_hr_hw_page_shift(u32 page_shift)
{
1083
	return page_shift - HNS_HW_PAGE_SHIFT;
1084 1085
}

1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105
static inline u32 to_hr_hem_hopnum(u32 hopnum, u32 count)
{
	if (count > 0)
		return hopnum == HNS_ROCE_HOP_NUM_0 ? 0 : hopnum;

	return 0;
}

static inline u32 to_hr_hem_entries_size(u32 count, u32 buf_shift)
{
	return hr_hw_page_align(count << buf_shift);
}

static inline u32 to_hr_hem_entries_count(u32 count, u32 buf_shift)
{
	return hr_hw_page_align(count << buf_shift) >> buf_shift;
}

static inline u32 to_hr_hem_entries_shift(u32 count, u32 buf_shift)
{
1106 1107 1108
	if (!count)
		return 0;

1109 1110 1111
	return ilog2(to_hr_hem_entries_count(count, buf_shift));
}

1112 1113 1114 1115 1116 1117 1118 1119
#define DSCP_SHIFT 2

static inline u8 get_tclass(const struct ib_global_route *grh)
{
	return grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP ?
	       grh->traffic_class >> DSCP_SHIFT : grh->traffic_class;
}

1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130
static inline u8 to_rdma_port_num(u8 phy_port_num)
{
	return phy_port_num + 1;
}

static inline enum ib_port_state get_port_state(struct net_device *net_dev)
{
	return (netif_running(net_dev) && netif_carrier_ok(net_dev)) ?
		IB_PORT_ACTIVE : IB_PORT_DOWN;
}

1131
void hns_roce_init_uar_table(struct hns_roce_dev *dev);
1132 1133 1134 1135 1136 1137 1138 1139 1140
int hns_roce_uar_alloc(struct hns_roce_dev *dev, struct hns_roce_uar *uar);

int hns_roce_cmd_init(struct hns_roce_dev *hr_dev);
void hns_roce_cmd_cleanup(struct hns_roce_dev *hr_dev);
void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
			u64 out_param);
int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev);
void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev);

1141 1142 1143
/* hns roce hw need current block and next block addr from mtt */
#define MTT_MIN_COUNT	 2
int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
1144
		      u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr);
1145
int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
1146 1147 1148
			struct hns_roce_buf_attr *buf_attr,
			unsigned int page_shift, struct ib_udata *udata,
			unsigned long user_addr);
1149 1150 1151
void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev,
			  struct hns_roce_mtr *mtr);
int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
1152
		     dma_addr_t *pages, unsigned int page_cnt);
1153

1154
void hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
1155
void hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
1156
void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
1157
int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
1158
void hns_roce_init_srq_table(struct hns_roce_dev *hr_dev);
1159
void hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev);
1160 1161 1162 1163 1164 1165 1166

void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);

void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev);

1167 1168
int hns_roce_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
		       struct ib_udata *udata);
1169
int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
1170 1171 1172 1173
static inline int hns_roce_destroy_ah(struct ib_ah *ah, u32 flags)
{
	return 0;
}
1174

1175
int hns_roce_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
1176
int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
1177 1178 1179 1180 1181

struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
				   u64 virt_addr, int access_flags,
				   struct ib_udata *udata);
1182 1183 1184
int hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, u64 length,
			   u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
			   struct ib_udata *udata);
Y
Yixian Liu 已提交
1185
struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1186
				u32 max_num_sg);
Y
Yixian Liu 已提交
1187 1188
int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
		       unsigned int *sg_offset);
1189
int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
S
Shaobo Xu 已提交
1190
unsigned long key_to_hw_index(u32 key);
1191

1192
int hns_roce_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
Y
Yixian Liu 已提交
1193 1194
int hns_roce_dealloc_mw(struct ib_mw *ibmw);

1195
void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf);
1196 1197
struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size,
					u32 page_shift, u32 flags);
1198

1199
int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
1200 1201
			   int buf_cnt, struct hns_roce_buf *buf,
			   unsigned int page_shift);
1202
int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
1203
			   int buf_cnt, struct ib_umem *umem,
1204
			   unsigned int page_shift);
1205

1206 1207 1208
int hns_roce_create_srq(struct ib_srq *srq,
			struct ib_srq_init_attr *srq_init_attr,
			struct ib_udata *udata);
1209 1210 1211
int hns_roce_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr,
			enum ib_srq_attr_mask srq_attr_mask,
			struct ib_udata *udata);
1212
int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
1213

1214 1215 1216
int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata);
int hns_roce_dealloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata);

1217 1218 1219 1220 1221
struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd,
				 struct ib_qp_init_attr *init_attr,
				 struct ib_udata *udata);
int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
		       int attr_mask, struct ib_udata *udata);
1222
void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
1223 1224 1225 1226
void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, unsigned int n);
void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, unsigned int n);
void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, unsigned int n);
bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq,
1227 1228 1229 1230 1231 1232
			  struct ib_cq *ib_cq);
void hns_roce_lock_cqs(struct hns_roce_cq *send_cq,
		       struct hns_roce_cq *recv_cq);
void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
			 struct hns_roce_cq *recv_cq);
void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
X
Xi Wang 已提交
1233 1234
void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
			 struct ib_udata *udata);
1235
__be32 send_ieth(const struct ib_send_wr *wr);
1236 1237
int to_hr_qp_type(int qp_type);

1238 1239
int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
		       struct ib_udata *udata);
1240

L
Leon Romanovsky 已提交
1241
int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
1242
int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,
1243 1244 1245
			 struct hns_roce_db *db);
void hns_roce_db_unmap_user(struct hns_roce_ucontext *context,
			    struct hns_roce_db *db);
1246 1247 1248 1249
int hns_roce_alloc_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db,
		      int order);
void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db);

1250 1251
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
1252
void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp);
1253
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
1254
void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type);
1255
u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index);
1256
void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev);
1257 1258
int hns_roce_init(struct hns_roce_dev *hr_dev);
void hns_roce_exit(struct hns_roce_dev *hr_dev);
1259
int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq);
1260
int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq);
1261
int hns_roce_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp);
1262
int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp);
1263
int hns_roce_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr);
1264
int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr);
1265 1266 1267 1268
struct hns_user_mmap_entry *
hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
				size_t length,
				enum hns_roce_mmap_type mmap_type);
1269
#endif /* _HNS_ROCE_DEVICE_H */