hns_roce_device.h 38.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
/*
 * Copyright (c) 2016 Hisilicon Limited.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#ifndef _HNS_ROCE_DEVICE_H
#define _HNS_ROCE_DEVICE_H

#include <rdma/ib_verbs.h>
37
#include <rdma/hns-abi.h>
J
Junxian Huang 已提交
38
#include "hns_roce_bond.h"
39

40
#define PCI_REVISION_ID_HIP08			0x21
41
#define PCI_REVISION_ID_HIP09			0x30
42

43 44 45 46
#define HNS_ROCE_MAX_MSG_LEN			0x80000000

#define HNS_ROCE_IB_MIN_SQ_STRIDE		6

47 48
#define BA_BYTE_LEN				8

49
#define HNS_ROCE_MIN_CQE_NUM			0x40
50
#define HNS_ROCE_MIN_SRQ_WQE_NUM		1
51

Y
Yixian Liu 已提交
52
#define HNS_ROCE_MAX_IRQ_NUM			128
53

54 55 56
#define HNS_ROCE_SGE_IN_WQE			2
#define HNS_ROCE_SGE_SHIFT			4

Y
Yixian Liu 已提交
57 58
#define EQ_ENABLE				1
#define EQ_DISABLE				0
59

Y
Yixian Liu 已提交
60 61
#define HNS_ROCE_CEQ				0
#define HNS_ROCE_AEQ				1
62
#define HNS_ROCE_IS_RESETTING			1
Y
Yixian Liu 已提交
63

64 65 66 67
#define HNS_ROCE_CEQE_SIZE 0x4
#define HNS_ROCE_AEQE_SIZE 0x10

#define HNS_ROCE_V3_EQE_SIZE 0x40
68

69 70 71
#define HNS_ROCE_V2_CQE_SIZE 32
#define HNS_ROCE_V3_CQE_SIZE 64

72 73 74
#define HNS_ROCE_V2_QPC_SZ 256
#define HNS_ROCE_V3_QPC_SZ 512

75 76
#define HNS_ROCE_MAX_PORTS			6
#define HNS_ROCE_GID_SIZE			16
77
#define HNS_ROCE_SGE_SIZE			16
W
wangsirong 已提交
78
#define HNS_ROCE_DWQE_SIZE			65536
79

80 81
#define HNS_ROCE_HOP_NUM_0			0xff

82
#define MR_TYPE_MR				0x00
Y
Yixian Liu 已提交
83
#define MR_TYPE_FRMR				0x01
84 85
#define MR_TYPE_DMA				0x03

Y
Yixian Liu 已提交
86 87
#define HNS_ROCE_FRMR_MAX_PA			512

88 89
#define PKEY_ID					0xffff
#define NODE_DESC_SIZE				64
90
#define DB_REG_OFFSET				0x1000
91

92 93 94
/* Configure to HW for PAGE_SIZE larger than 4KB */
#define PG_SHIFT_OFFSET				(PAGE_SHIFT - 12)

95 96 97
#define HNS_ROCE_IDX_QUE_ENTRY_SZ		4
#define SRQ_DB_REG				0x230

98
#define HNS_ROCE_QP_BANK_NUM 8
99 100 101
#define HNS_ROCE_CQ_BANK_NUM 4

#define CQ_BANKID_SHIFT 2
102

L
Lijun Ou 已提交
103 104 105 106 107
enum {
	SERV_TYPE_RC,
	SERV_TYPE_UC,
	SERV_TYPE_RD,
	SERV_TYPE_UD,
108
	SERV_TYPE_XRC = 5,
L
Lijun Ou 已提交
109 110
};

111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
enum hns_roce_event {
	HNS_ROCE_EVENT_TYPE_PATH_MIG                  = 0x01,
	HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED           = 0x02,
	HNS_ROCE_EVENT_TYPE_COMM_EST                  = 0x03,
	HNS_ROCE_EVENT_TYPE_SQ_DRAINED                = 0x04,
	HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR            = 0x05,
	HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR    = 0x06,
	HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR     = 0x07,
	HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH           = 0x08,
	HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH        = 0x09,
	HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR           = 0x0a,
	HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR           = 0x0b,
	HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW               = 0x0c,
	HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID             = 0x0d,
	HNS_ROCE_EVENT_TYPE_PORT_CHANGE               = 0x0f,
	/* 0x10 and 0x11 is unused in currently application case */
	HNS_ROCE_EVENT_TYPE_DB_OVERFLOW               = 0x12,
	HNS_ROCE_EVENT_TYPE_MB                        = 0x13,
Y
Yixian Liu 已提交
129
	HNS_ROCE_EVENT_TYPE_FLR			      = 0x15,
130 131
	HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION	      = 0x16,
	HNS_ROCE_EVENT_TYPE_INVALID_XRCETH	      = 0x17,
132 133
};

134 135 136 137 138 139 140 141 142
/* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
 *
 * These flags are intended for internal use by the hns driver, and they
 * rely on the range reserved for that use in the ib_qp_create_flags enum.
 */
enum hns_roce_qp_create_flags {
	HNS_ROCE_QP_CREATE_DCA_EN = IB_QP_CREATE_RESERVED_START,
};

143 144
enum {
	HNS_ROCE_CAP_FLAG_REREG_MR		= BIT(0),
145
	HNS_ROCE_CAP_FLAG_ROCE_V1_V2		= BIT(1),
146 147
	/* discard this bit, reserved for compatibility */
	HNS_ROCE_CAP_FLAG_DISCARD		= BIT(2),
148 149
	HNS_ROCE_CAP_FLAG_CQ_RECORD_DB		= BIT(3),
	HNS_ROCE_CAP_FLAG_QP_RECORD_DB		= BIT(4),
L
Lijun Ou 已提交
150
	HNS_ROCE_CAP_FLAG_SRQ			= BIT(5),
151
	HNS_ROCE_CAP_FLAG_XRC			= BIT(6),
Y
Yixian Liu 已提交
152
	HNS_ROCE_CAP_FLAG_MW			= BIT(7),
Y
Yixian Liu 已提交
153
	HNS_ROCE_CAP_FLAG_FRMR                  = BIT(8),
154
	HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL		= BIT(9),
L
Lijun Ou 已提交
155
	HNS_ROCE_CAP_FLAG_ATOMIC		= BIT(10),
156
	HNS_ROCE_CAP_FLAG_DIRECT_WQE		= BIT(12),
157
	HNS_ROCE_CAP_FLAG_SVE_DIRECT_WQE	= BIT(13),
158
	HNS_ROCE_CAP_FLAG_SDI_MODE		= BIT(14),
159
	HNS_ROCE_CAP_FLAG_DCA_MODE		= BIT(15),
L
Lang Cheng 已提交
160
	HNS_ROCE_CAP_FLAG_STASH			= BIT(17),
161
	HNS_ROCE_CAP_FLAG_CQE_INLINE		= BIT(19),
162
	HNS_ROCE_CAP_FLAG_RQ_INLINE		= BIT(20),
J
Junxian Huang 已提交
163
	HNS_ROCE_CAP_FLAG_BOND			= BIT(21),
164 165
};

166 167 168
#define HNS_ROCE_DB_TYPE_COUNT			2
#define HNS_ROCE_DB_UNIT_SIZE			4

169 170 171 172
enum {
	HNS_ROCE_DB_PER_PAGE = PAGE_SIZE / 4
};

173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
enum hns_roce_reset_stage {
	HNS_ROCE_STATE_NON_RST,
	HNS_ROCE_STATE_RST_BEF_DOWN,
	HNS_ROCE_STATE_RST_DOWN,
	HNS_ROCE_STATE_RST_UNINIT,
	HNS_ROCE_STATE_RST_INIT,
	HNS_ROCE_STATE_RST_INITED,
};

enum hns_roce_instance_state {
	HNS_ROCE_STATE_NON_INIT,
	HNS_ROCE_STATE_INIT,
	HNS_ROCE_STATE_INITED,
	HNS_ROCE_STATE_UNINIT,
};

enum {
	HNS_ROCE_RST_DIRECT_RETURN		= 0,
};

193 194
#define HNS_ROCE_CMD_SUCCESS			1

195 196
#define HNS_ROCE_MAX_HOP_NUM			3

197 198 199
/* The minimum page size is 4K for hardware */
#define HNS_HW_PAGE_SHIFT			12
#define HNS_HW_PAGE_SIZE			(1 << HNS_HW_PAGE_SHIFT)
200 201 202 203

struct hns_roce_uar {
	u64		pfn;
	unsigned long	index;
204
	unsigned long	logic_idx;
205 206
};

207 208
enum hns_roce_mmap_type {
	HNS_ROCE_MMAP_TYPE_DB = 1,
209
	HNS_ROCE_MMAP_TYPE_DWQE,
210
	HNS_ROCE_MMAP_TYPE_DCA,
211
	HNS_ROCE_MMAP_TYPE_RESET,
212 213 214 215 216 217 218 219
};

struct hns_user_mmap_entry {
	struct rdma_user_mmap_entry rdma_entry;
	enum hns_roce_mmap_type mmap_type;
	u64 address;
};

220 221 222 223 224 225
struct hns_roce_dca_ctx {
	struct list_head pool; /* all DCA mems link to @pool */
	spinlock_t pool_lock; /* protect @pool */
	unsigned int free_mems; /* free mem num in pool */
	size_t free_size; /* free mem size in pool */
	size_t total_size; /* total size in pool */
226 227 228
	size_t max_size; /* max size the pool can expand to */
	size_t min_size; /* shrink if @free_size > @min_size */
	unsigned int unit_size; /* unit size per DCA mem */
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243

	unsigned int max_qps;
	unsigned int status_npage;
	struct ida ida;

#define HNS_DCA_BITS_PER_STATUS 1
	unsigned long *buf_status;
	unsigned long *sync_status;

	bool exit_aging;
	struct list_head aging_proc_list;
	struct list_head aging_new_list;
	spinlock_t aging_lock;
	struct delayed_work aging_dwork;
	struct hns_user_mmap_entry *dca_mmap_entry;
244 245
};

246 247
struct hns_roce_ucontext {
	struct ib_ucontext	ibucontext;
248 249
	struct list_head list; /* link all uctx to uctx_list on hr_dev */
	pid_t pid; /* process id to which the uctx belongs */
250
	struct hns_roce_uar	uar;
251 252
	struct list_head	page_list;
	struct mutex		page_mutex;
253
	struct hns_user_mmap_entry *db_mmap_entry;
254
	struct hns_user_mmap_entry *reset_mmap_entry;
L
Luoyouming 已提交
255
	u32			config;
256
	struct hns_roce_dca_ctx	dca_ctx;
257
	void *dca_dbgfs;
258 259 260 261 262 263 264
};

struct hns_roce_pd {
	struct ib_pd		ibpd;
	unsigned long		pdn;
};

265 266 267 268 269
struct hns_roce_xrcd {
	struct ib_xrcd ibxrcd;
	u32 xrcdn;
};

270 271 272 273 274 275 276 277 278 279 280
struct hns_roce_bitmap {
	/* Bitmap Traversal last a bit which is 1 */
	unsigned long		last;
	unsigned long		top;
	unsigned long		max;
	unsigned long		reserved_top;
	unsigned long		mask;
	spinlock_t		lock;
	unsigned long		*table;
};

281 282 283 284 285 286
struct hns_roce_ida {
	struct ida ida;
	u32 min; /* Lowest ID to allocate.  */
	u32 max; /* Highest ID to allocate. */
};

287 288 289 290 291 292
/* For Hardware Entry Memory */
struct hns_roce_hem_table {
	/* HEM type: 0 = qpc, 1 = mtt, 2 = cqc, 3 = srq, 4 = other */
	u32		type;
	/* HEM array elment num */
	unsigned long	num_hem;
293
	/* Single obj size */
294
	unsigned long	obj_size;
295
	unsigned long	table_chunk_size;
296 297 298
	int		lowmem;
	struct mutex	mutex;
	struct hns_roce_hem **hem;
299 300 301 302
	u64		**bt_l1;
	dma_addr_t	*bt_l1_dma_addr;
	u64		**bt_l0;
	dma_addr_t	*bt_l0_dma_addr;
303 304
};

305
struct hns_roce_buf_region {
306
	u32 offset; /* page offset */
307
	u32 count; /* page count */
308 309 310 311 312 313 314 315 316 317 318
	int hopnum; /* addressing hop num */
};

#define HNS_ROCE_MAX_BT_REGION	3
#define HNS_ROCE_MAX_BT_LEVEL	3
struct hns_roce_hem_list {
	struct list_head root_bt;
	/* link all bt dma mem by hop config */
	struct list_head mid_bt[HNS_ROCE_MAX_BT_REGION][HNS_ROCE_MAX_BT_LEVEL];
	struct list_head btm_bt; /* link all bottom bt in @mid_bt */
	dma_addr_t root_ba; /* pointer to the root ba table */
319 320 321 322 323 324 325
};

struct hns_roce_buf_attr {
	struct {
		size_t	size;  /* region size */
		int	hopnum; /* multi-hop addressing hop num */
	} region[HNS_ROCE_MAX_BT_REGION];
326
	unsigned int region_count; /* valid region count */
327
	unsigned int page_shift;  /* buffer page shift */
328
	unsigned int user_access; /* umem access flag */
329
	u64 iova;
330
	bool mtt_only; /* only alloc buffer-required MTT memory */
331
	bool adaptive; /* adaptive for page_shift and hopnum */
332 333
};

334 335 336 337 338 339 340
struct hns_roce_hem_cfg {
	dma_addr_t	root_ba; /* root BA table's address */
	bool		is_direct; /* addressing without BA table */
	unsigned int	ba_pg_shift; /* BA table page shift */
	unsigned int	buf_pg_shift; /* buffer page shift */
	unsigned int	buf_pg_count;  /* buffer page count */
	struct hns_roce_buf_region region[HNS_ROCE_MAX_BT_REGION];
341
	unsigned int	region_count;
342 343
};

344 345
/* memory translate region */
struct hns_roce_mtr {
346
	struct hns_roce_hem_list hem_list; /* multi-hop addressing resource */
347 348
	struct ib_umem		*umem; /* user space buffer */
	struct hns_roce_buf	*kmem; /* kernel space buffer */
349
	struct hns_roce_hem_cfg  hem_cfg; /* config for hardware addressing */
350 351
};

352
/* DCA config */
353
struct hns_roce_dca_cfg {
354 355
	spinlock_t		lock;
	u16			attach_count;
356 357
	u32			buf_id;
	u32			dcan;
358 359 360
	void			**buf_list;
	u32			npages;
	u32			sq_idx;
361 362
	bool			aging_enable;
	struct list_head	aging_node;
363 364
};

Y
Yixian Liu 已提交
365 366 367 368 369 370 371 372 373 374
struct hns_roce_mw {
	struct ib_mw		ibmw;
	u32			pdn;
	u32			rkey;
	int			enabled; /* MW's active status */
	u32			pbl_hop_num;
	u32			pbl_ba_pg_sz;
	u32			pbl_buf_pg_sz;
};

375 376
struct hns_roce_mr {
	struct ib_mr		ibmr;
377
	u64			iova; /* MR's virtual original addr */
378 379 380
	u64			size; /* Address range of MR */
	u32			key; /* Key of MR */
	u32			pd;   /* PD num of MR */
381
	u32			access; /* Access permission of MR */
382
	int			enabled; /* MR's active status */
383 384
	int			type; /* MR's register type */
	u32			pbl_hop_num; /* multi-hop number */
385 386 387
	struct hns_roce_mtr	pbl_mtr;
	u32			npages;
	dma_addr_t		*page_list;
388 389 390
};

struct hns_roce_mr_table {
391
	struct hns_roce_ida mtpt_ida;
392 393 394 395 396 397
	struct hns_roce_hem_table	mtpt_table;
};

struct hns_roce_wq {
	u64		*wrid;     /* Work request ID */
	spinlock_t	lock;
398
	u32		wqe_cnt;  /* WQE num */
399
	u32		max_gs;
400
	u32		rsv_sge;
401
	u32		offset;
402
	int		wqe_offset;
403
	u32		wqe_shift; /* WQE size */
404 405
	u32		head;
	u32		tail;
406
	void __iomem	*db_reg;
L
Luoyouming 已提交
407
	u32		ext_sge_cnt;
408 409
};

410
struct hns_roce_sge {
411
	unsigned int	sge_cnt; /* SGE num */
412 413
	u32		offset;
	u32		sge_shift; /* SGE size */
414
	int		wqe_offset;
415 416
};

417 418 419 420 421
struct hns_roce_buf_list {
	void		*buf;
	dma_addr_t	map;
};

422 423 424 425 426 427 428 429 430 431 432 433 434 435 436
/*
 * %HNS_ROCE_BUF_DIRECT indicates that the all memory must be in a continuous
 * dma address range.
 *
 * %HNS_ROCE_BUF_NOSLEEP indicates that the caller cannot sleep.
 *
 * %HNS_ROCE_BUF_NOFAIL allocation only failed when allocated size is zero, even
 * the allocated size is smaller than the required size.
 */
enum {
	HNS_ROCE_BUF_DIRECT = BIT(0),
	HNS_ROCE_BUF_NOSLEEP = BIT(1),
	HNS_ROCE_BUF_NOFAIL = BIT(2),
};

437
struct hns_roce_buf {
438 439
	struct hns_roce_buf_list	*trunk_list;
	u32				ntrunks;
440
	u32				npages;
441
	unsigned int			trunk_shift;
442
	unsigned int			page_shift;
443 444
};

445 446 447
struct hns_roce_db_pgdir {
	struct list_head	list;
	DECLARE_BITMAP(order0, HNS_ROCE_DB_PER_PAGE);
448 449
	DECLARE_BITMAP(order1, HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT);
	unsigned long		*bits[HNS_ROCE_DB_TYPE_COUNT];
450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467
	u32			*page;
	dma_addr_t		db_dma;
};

struct hns_roce_user_db_page {
	struct list_head	list;
	struct ib_umem		*umem;
	unsigned long		user_virt;
	refcount_t		refcount;
};

struct hns_roce_db {
	u32		*db_record;
	union {
		struct hns_roce_db_pgdir *pgdir;
		struct hns_roce_user_db_page *user_page;
	} u;
	dma_addr_t	dma;
468
	void		*virt_addr;
469 470
	unsigned long	index;
	unsigned long	order;
471 472
};

473 474
struct hns_roce_cq {
	struct ib_cq			ib_cq;
475
	struct hns_roce_mtr		mtr;
476
	struct hns_roce_db		db;
477
	u32				flags;
478 479 480
	spinlock_t			lock;
	u32				cq_depth;
	u32				cons_index;
481
	u32				*set_ci_db;
482
	void __iomem			*db_reg;
483
	int				arm_sn;
484
	int				cqe_size;
485 486
	unsigned long			cqn;
	u32				vector;
487
	refcount_t			refcount;
488
	struct completion		free;
489 490 491 492
	struct list_head		sq_list; /* all qps on this send cq */
	struct list_head		rq_list; /* all qps on this recv cq */
	int				is_armed; /* cq is armed */
	struct list_head		node; /* all armed cqs are on a list */
493 494
};

495
struct hns_roce_idx_que {
496
	struct hns_roce_mtr		mtr;
497
	u32				entry_shift;
498
	unsigned long			*bitmap;
499 500
	u32				head;
	u32				tail;
501 502
};

503 504
struct hns_roce_srq {
	struct ib_srq		ibsrq;
505
	unsigned long		srqn;
506
	u32			wqe_cnt;
507
	int			max_gs;
508
	u32			rsv_sge;
509
	u32			wqe_shift;
510
	u32			cqn;
511
	u32			xrcdn;
512
	void __iomem		*db_reg;
513

514
	refcount_t		refcount;
515 516
	struct completion	free;

517 518
	struct hns_roce_mtr	buf_mtr;

519 520 521 522
	u64		       *wrid;
	struct hns_roce_idx_que idx_que;
	spinlock_t		lock;
	struct mutex		mutex;
523
	void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event);
524 525 526 527 528 529
};

struct hns_roce_uar_table {
	struct hns_roce_bitmap bitmap;
};

530 531 532 533 534 535 536 537
struct hns_roce_bank {
	struct ida ida;
	u32 inuse; /* Number of IDs allocated */
	u32 min; /* Lowest ID to allocate.  */
	u32 max; /* Highest ID to allocate. */
	u32 next; /* Next ID to allocate. */
};

538 539 540 541 542 543
struct hns_roce_idx_table {
	u32 *spare_idx;
	u32 head;
	u32 tail;
};

544 545 546
struct hns_roce_qp_table {
	struct hns_roce_hem_table	qp_table;
	struct hns_roce_hem_table	irrl_table;
547
	struct hns_roce_hem_table	trrl_table;
548
	struct hns_roce_hem_table	sccc_table;
549
	struct mutex			scc_mutex;
550
	struct hns_roce_bank bank[HNS_ROCE_QP_BANK_NUM];
551
	struct mutex bank_mutex;
552
	struct hns_roce_idx_table	idx_table;
553 554 555
};

struct hns_roce_cq_table {
556
	struct xarray			array;
557
	struct hns_roce_hem_table	table;
558 559
	struct hns_roce_bank bank[HNS_ROCE_CQ_BANK_NUM];
	struct mutex			bank_mutex;
560 561
};

L
Lijun Ou 已提交
562
struct hns_roce_srq_table {
563
	struct hns_roce_ida		srq_ida;
L
Lijun Ou 已提交
564 565 566 567
	struct xarray			xa;
	struct hns_roce_hem_table	table;
};

568
struct hns_roce_av {
569 570 571 572 573 574 575 576 577 578 579
	u8 port;
	u8 gid_index;
	u8 stat_rate;
	u8 hop_limit;
	u32 flowlabel;
	u16 udp_sport;
	u8 sl;
	u8 tclass;
	u8 dgid[HNS_ROCE_GID_SIZE];
	u8 mac[ETH_ALEN];
	u16 vlan_id;
580
	u8 vlan_en;
581 582 583 584 585 586 587 588 589 590 591 592 593
};

struct hns_roce_ah {
	struct ib_ah		ibah;
	struct hns_roce_av	av;
};

struct hns_roce_cmd_context {
	struct completion	done;
	int			result;
	int			next;
	u64			out_param;
	u16			token;
L
Lang Cheng 已提交
594
	u16			busy;
595 596
};

597 598 599 600 601
enum hns_roce_cmdq_state {
	HNS_ROCE_CMDQ_STATE_NORMAL,
	HNS_ROCE_CMDQ_STATE_FATAL_ERR,
};

602 603 604 605
struct hns_roce_cmdq {
	struct dma_pool		*pool;
	struct semaphore	poll_sem;
	/*
606 607 608
	 * Event mode: cmd register mutex protection,
	 * ensure to not exceed max_cmds and user use limit region
	 */
609 610 611 612 613 614
	struct semaphore	event_sem;
	int			max_cmds;
	spinlock_t		context_lock;
	int			free_head;
	struct hns_roce_cmd_context *context;
	/*
615 616 617 618 619
	 * Process whether use event mode, init default non-zero
	 * After the event queue of cmd event ready,
	 * can switch into event mode
	 * close device, switch into poll mode(non event mode)
	 */
620
	u8			use_events;
621
	enum hns_roce_cmdq_state state;
622 623
};

S
Shaobo Xu 已提交
624 625 626 627 628
struct hns_roce_cmd_mailbox {
	void		       *buf;
	dma_addr_t		dma;
};

629 630 631 632 633 634 635 636 637
struct hns_roce_mbox_msg {
	u64 in_param;
	u64 out_param;
	u8 cmd;
	u32 tag;
	u16 token;
	u8 event_en;
};

638 639
struct hns_roce_dev;

640 641 642 643
enum {
	HNS_ROCE_FLUSH_FLAG = 0,
};

644 645 646 647 648
struct hns_roce_work {
	struct hns_roce_dev *hr_dev;
	struct work_struct work;
	int event_type;
	int sub_type;
649
	u32 queue_num;
650 651
};

652 653 654
struct hns_roce_qp {
	struct ib_qp		ibqp;
	struct hns_roce_wq	rq;
655
	struct hns_roce_db	rdb;
656
	struct hns_roce_db	sdb;
657
	unsigned long		en_flags;
658
	unsigned long		congest_type;
659
	u32			doorbell_qpn;
660
	enum ib_sig_type	sq_signal_bits;
661 662
	struct hns_roce_wq	sq;

663
	struct hns_roce_mtr	mtr;
664
	struct hns_roce_dca_cfg	dca_cfg;
665

666 667 668
	u32			buff_size;
	struct mutex		mutex;
	u8			port;
669
	u8			phy_port;
670 671 672
	u8			sl;
	u8			resp_depth;
	u8			state;
673
	u32                     atomic_rd_en;
674
	u32			qkey;
675 676
	void			(*event)(struct hns_roce_qp *qp,
					 enum hns_roce_event event_type);
677 678
	unsigned long		qpn;

679 680
	u32			xrcdn;

681
	refcount_t		refcount;
682
	struct completion	free;
683 684 685

	struct hns_roce_sge	sge;
	u32			next_sge;
686 687
	enum ib_mtu		path_mtu;
	u32			max_inline_data;
688
	u8			free_mr_en;
689

690 691
	/* 0: flush needed, 1: unneeded */
	unsigned long		flush_flag;
692
	struct hns_roce_work	flush_work;
693 694 695
	struct list_head	node; /* all qps are on a list */
	struct list_head	rq_node; /* all recv qps are on a list */
	struct list_head	sq_node; /* all send qps are on a list */
696
	struct hns_user_mmap_entry *dwqe_mmap_entry;
L
Luoyouming 已提交
697
	u32			config;
Y
Yixing Liu 已提交
698 699
	u8			tc_mode;
	u8			priority;
700 701 702 703 704 705 706
};

struct hns_roce_ib_iboe {
	spinlock_t		lock;
	struct net_device      *netdevs[HNS_ROCE_MAX_PORTS];
	struct notifier_block	nb;
	u8			phy_port[HNS_ROCE_MAX_PORTS];
707
	enum ib_port_state	port_state[HNS_ROCE_MAX_PORTS];
708 709
};

Y
Yixian Liu 已提交
710
struct hns_roce_ceqe {
711 712
	__le32	comp;
	__le32	rsv[15];
Y
Yixian Liu 已提交
713 714
};

715 716 717 718 719
#define CEQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_ceqe, h, l)

#define CEQE_CQN CEQE_FIELD_LOC(23, 0)
#define CEQE_OWNER CEQE_FIELD_LOC(31, 31)

Y
Yixian Liu 已提交
720
struct hns_roce_aeqe {
721
	__le32 asyn;
Y
Yixian Liu 已提交
722 723
	union {
		struct {
724
			__le32 num;
Y
Yixian Liu 已提交
725 726
			u32 rsv0;
			u32 rsv1;
727
		} queue_event;
Y
Yixian Liu 已提交
728 729 730 731 732 733 734 735

		struct {
			__le64  out_param;
			__le16  token;
			u8	status;
			u8	rsv0;
		} __packed cmd;
	 } event;
736
	__le32 rsv[12];
Y
Yixian Liu 已提交
737 738
};

739 740 741 742 743 744 745
#define AEQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_aeqe, h, l)

#define AEQE_EVENT_TYPE AEQE_FIELD_LOC(7, 0)
#define AEQE_SUB_TYPE AEQE_FIELD_LOC(15, 8)
#define AEQE_OWNER AEQE_FIELD_LOC(31, 31)
#define AEQE_EVENT_QUEUE_NUM AEQE_FIELD_LOC(55, 32)

746 747
struct hns_roce_eq {
	struct hns_roce_dev		*hr_dev;
748
	void __iomem			*db_reg;
749

750
	int				type_flag; /* Aeq:1 ceq:0 */
751 752 753 754
	int				eqn;
	u32				entries;
	int				eqe_size;
	int				irq;
755
	u32				cons_index;
Y
Yixian Liu 已提交
756 757 758 759
	int				over_ignore;
	int				coalesce;
	int				arm_st;
	int				hop_num;
760
	struct hns_roce_mtr		mtr;
761
	u16				eq_max_cnt;
762
	u32				eq_period;
Y
Yixian Liu 已提交
763
	int				shift;
764 765
	int				event_type;
	int				sub_type;
766 767 768 769 770 771
};

struct hns_roce_eq_table {
	struct hns_roce_eq	*eq;
};

772 773 774 775 776 777 778 779
enum hns_roce_scc_algo {
	HNS_ROCE_SCC_ALGO_DCQCN = 0,
	HNS_ROCE_SCC_ALGO_LDCP,
	HNS_ROCE_SCC_ALGO_HC3,
	HNS_ROCE_SCC_ALGO_DIP,
	HNS_ROCE_SCC_ALGO_TOTAL,
};

780 781 782 783 784
enum congest_type {
	HNS_ROCE_CONGEST_TYPE_DCQCN = 1 << HNS_ROCE_SCC_ALGO_DCQCN,
	HNS_ROCE_CONGEST_TYPE_LDCP = 1 << HNS_ROCE_SCC_ALGO_LDCP,
	HNS_ROCE_CONGEST_TYPE_HC3 = 1 << HNS_ROCE_SCC_ALGO_HC3,
	HNS_ROCE_CONGEST_TYPE_DIP = 1 << HNS_ROCE_SCC_ALGO_DIP,
785 786
};

787
struct hns_roce_caps {
788
	u64		fw_ver;
789 790 791 792 793 794
	u8		num_ports;
	int		gid_table_len[HNS_ROCE_MAX_PORTS];
	int		pkey_table_len[HNS_ROCE_MAX_PORTS];
	int		local_ca_ack_delay;
	int		num_uars;
	u32		phy_num_uars;
795 796 797
	u32		max_sq_sg;
	u32		max_sq_inline;
	u32		max_rq_sg;
798
	u32		max_extend_sg;
799
	u32		num_qps;
800
	u32		num_pi_qps;
801
	u32		reserved_qps;
802
	int		num_qpc_timer;
803
	u32		num_srqs;
804
	u32		max_wqes;
L
Lijun Ou 已提交
805 806
	u32		max_srq_wrs;
	u32		max_srq_sges;
807 808
	u32		max_sq_desc_sz;
	u32		max_rq_desc_sz;
809
	u32		max_srq_desc_sz;
810 811
	int		max_qp_init_rdma;
	int		max_qp_dest_rdma;
812
	u32		num_cqs;
813 814
	u32		max_cqes;
	u32		min_cqes;
815
	u32		min_wqes;
816
	u32		reserved_cqs;
817
	u32		reserved_srqs;
818
	int		num_aeq_vectors;
Y
Yixian Liu 已提交
819
	int		num_comp_vectors;
820
	int		num_other_vectors;
821
	u32		num_mtpts;
822
	u32		num_mtt_segs;
L
Lijun Ou 已提交
823 824
	u32		num_srqwqe_segs;
	u32		num_idx_segs;
825 826 827 828
	int		reserved_mrws;
	int		reserved_uars;
	int		num_pds;
	int		reserved_pds;
829 830
	u32		num_xrcds;
	u32		reserved_xrcds;
831
	u32		mtt_entry_sz;
832
	u32		cqe_sz;
833 834 835
	u32		page_size_cap;
	u32		reserved_lkey;
	int		mtpt_entry_sz;
836
	int		qpc_sz;
837
	int		irrl_entry_sz;
838
	int		trrl_entry_sz;
839
	int		cqc_entry_sz;
840
	int		sccc_sz;
841 842
	int		qpc_timer_entry_sz;
	int		cqc_timer_entry_sz;
L
Lijun Ou 已提交
843 844
	int		srqc_entry_sz;
	int		idx_entry_sz;
845 846 847
	u32		pbl_ba_pg_sz;
	u32		pbl_buf_pg_sz;
	u32		pbl_hop_num;
848
	int		aeqe_depth;
Y
Yixian Liu 已提交
849
	int		ceqe_depth;
850 851
	u32		aeqe_size;
	u32		ceqe_size;
852
	enum ib_mtu	max_mtu;
853
	u32		qpc_bt_num;
854
	u32		qpc_timer_bt_num;
855 856
	u32		srqc_bt_num;
	u32		cqc_bt_num;
857
	u32		cqc_timer_bt_num;
858
	u32		mpt_bt_num;
859 860 861
	u32		eqc_bt_num;
	u32		smac_bt_num;
	u32		sgid_bt_num;
862
	u32		sccc_bt_num;
863
	u32		gmv_bt_num;
864 865 866 867 868 869 870 871 872 873 874 875
	u32		qpc_ba_pg_sz;
	u32		qpc_buf_pg_sz;
	u32		qpc_hop_num;
	u32		srqc_ba_pg_sz;
	u32		srqc_buf_pg_sz;
	u32		srqc_hop_num;
	u32		cqc_ba_pg_sz;
	u32		cqc_buf_pg_sz;
	u32		cqc_hop_num;
	u32		mpt_ba_pg_sz;
	u32		mpt_buf_pg_sz;
	u32		mpt_hop_num;
876 877 878
	u32		mtt_ba_pg_sz;
	u32		mtt_buf_pg_sz;
	u32		mtt_hop_num;
879 880 881
	u32		wqe_sq_hop_num;
	u32		wqe_sge_hop_num;
	u32		wqe_rq_hop_num;
882 883 884
	u32		sccc_ba_pg_sz;
	u32		sccc_buf_pg_sz;
	u32		sccc_hop_num;
885 886 887 888 889 890
	u32		qpc_timer_ba_pg_sz;
	u32		qpc_timer_buf_pg_sz;
	u32		qpc_timer_hop_num;
	u32		cqc_timer_ba_pg_sz;
	u32		cqc_timer_buf_pg_sz;
	u32		cqc_timer_hop_num;
891
	u32		cqe_ba_pg_sz; /* page_size = 4K*(2^cqe_ba_pg_sz) */
892 893
	u32		cqe_buf_pg_sz;
	u32		cqe_hop_num;
894 895 896 897 898 899
	u32		srqwqe_ba_pg_sz;
	u32		srqwqe_buf_pg_sz;
	u32		srqwqe_hop_num;
	u32		idx_ba_pg_sz;
	u32		idx_buf_pg_sz;
	u32		idx_hop_num;
Y
Yixian Liu 已提交
900 901 902
	u32		eqe_ba_pg_sz;
	u32		eqe_buf_pg_sz;
	u32		eqe_hop_num;
903 904 905 906 907
	u32		gmv_entry_num;
	u32		gmv_entry_sz;
	u32		gmv_ba_pg_sz;
	u32		gmv_buf_pg_sz;
	u32		gmv_hop_num;
O
oulijun 已提交
908
	u32		sl_num;
909
	u32		llm_buf_pg_sz;
910
	u32		chunk_sz; /* chunk size in non multihop mode */
911
	u64		flags;
912 913 914 915 916 917
	u16		default_ceq_max_cnt;
	u16		default_ceq_period;
	u16		default_aeq_max_cnt;
	u16		default_aeq_period;
	u16		default_aeq_arm_st;
	u16		default_ceq_arm_st;
918
	u8		congest_type;
919
	u8		default_congest_type;
920 921
};

922 923 924 925 926 927
enum hns_roce_device_state {
	HNS_ROCE_DEVICE_STATE_INITED,
	HNS_ROCE_DEVICE_STATE_RST_DOWN,
	HNS_ROCE_DEVICE_STATE_UNINIT,
};

C
Chengchang Tang 已提交
928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953
enum hns_roce_hw_pkt_stat_index {
	HNS_ROCE_HW_RX_RC_PKT_CNT,
	HNS_ROCE_HW_RX_UC_PKT_CNT,
	HNS_ROCE_HW_RX_UD_PKT_CNT,
	HNS_ROCE_HW_RX_XRC_PKT_CNT,
	HNS_ROCE_HW_RX_PKT_CNT,
	HNS_ROCE_HW_RX_ERR_PKT_CNT,
	HNS_ROCE_HW_RX_CNP_PKT_CNT,
	HNS_ROCE_HW_TX_RC_PKT_CNT,
	HNS_ROCE_HW_TX_UC_PKT_CNT,
	HNS_ROCE_HW_TX_UD_PKT_CNT,
	HNS_ROCE_HW_TX_XRC_PKT_CNT,
	HNS_ROCE_HW_TX_PKT_CNT,
	HNS_ROCE_HW_TX_ERR_PKT_CNT,
	HNS_ROCE_HW_TX_CNP_PKT_CNT,
	HNS_ROCE_HW_TRP_GET_MPT_ERR_PKT_CNT,
	HNS_ROCE_HW_TRP_GET_IRRL_ERR_PKT_CNT,
	HNS_ROCE_HW_ECN_DB_CNT,
	HNS_ROCE_HW_RX_BUF_CNT,
	HNS_ROCE_HW_TRP_RX_SOF_CNT,
	HNS_ROCE_HW_CQ_CQE_CNT,
	HNS_ROCE_HW_CQ_POE_CNT,
	HNS_ROCE_HW_CQ_NOTIFY_CNT,
	HNS_ROCE_HW_CNT_TOTAL,
};

C
Chengchang Tang 已提交
954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974
enum hns_roce_hw_dfx_stat_index {
	HNS_ROCE_DFX_AEQE_CNT,
	HNS_ROCE_DFX_CEQE_CNT,
	HNS_ROCE_DFX_CMDS_CNT,
	HNS_ROCE_DFX_CMDS_ERR_CNT,
	HNS_ROCE_DFX_MBX_POSTED_CNT,
	HNS_ROCE_DFX_MBX_POLLED_CNT,
	HNS_ROCE_DFX_MBX_EVENT_CNT,
	HNS_ROCE_DFX_QP_CREATE_ERR_CNT,
	HNS_ROCE_DFX_QP_MODIFY_ERR_CNT,
	HNS_ROCE_DFX_CQ_CREATE_ERR_CNT,
	HNS_ROCE_DFX_SRQ_CREATE_ERR_CNT,
	HNS_ROCE_DFX_XRCD_ALLOC_ERR_CNT,
	HNS_ROCE_DFX_MR_REG_ERR_CNT,
	HNS_ROCE_DFX_MR_REREG_ERR_CNT,
	HNS_ROCE_DFX_AH_CREATE_ERR_CNT,
	HNS_ROCE_DFX_MMAP_ERR_CNT,
	HNS_ROCE_DFX_UCTX_ALLOC_ERR_CNT,
	HNS_ROCE_DFX_CNT_TOTAL
};

975
struct hns_roce_hw {
976 977
	int (*cmq_init)(struct hns_roce_dev *hr_dev);
	void (*cmq_exit)(struct hns_roce_dev *hr_dev);
978
	int (*hw_profile)(struct hns_roce_dev *hr_dev);
979 980
	int (*hw_init)(struct hns_roce_dev *hr_dev);
	void (*hw_exit)(struct hns_roce_dev *hr_dev);
981 982
	int (*post_mbox)(struct hns_roce_dev *hr_dev,
			 struct hns_roce_mbox_msg *mbox_msg);
983
	int (*poll_mbox_done)(struct hns_roce_dev *hr_dev);
984
	bool (*chk_mbox_avail)(struct hns_roce_dev *hr_dev, bool *is_busy);
985
	int (*set_gid)(struct hns_roce_dev *hr_dev, int gid_index,
986
		       const union ib_gid *gid, const struct ib_gid_attr *attr);
987 988
	int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port,
		       const u8 *addr);
989
	int (*write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
990
			  struct hns_roce_mr *mr);
991
	int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev,
992
				struct hns_roce_mr *mr, int flags,
993
				void *mb_buf);
994 995
	int (*frmr_write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
			       struct hns_roce_mr *mr);
Y
Yixian Liu 已提交
996
	int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw);
997 998
	void (*write_cqc)(struct hns_roce_dev *hr_dev,
			  struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
999
			  dma_addr_t dma_handle);
1000
	int (*set_hem)(struct hns_roce_dev *hr_dev,
1001
		       struct hns_roce_hem_table *table, int obj, u32 step_idx);
W
Wei Hu (Xavier) 已提交
1002
	int (*clear_hem)(struct hns_roce_dev *hr_dev,
1003
			 struct hns_roce_hem_table *table, int obj,
1004
			 u32 step_idx);
1005
	int (*set_dca_buf)(struct hns_roce_dev *hr_dev,
1006
			   struct hns_roce_qp *hr_qp);
1007 1008
	bool (*chk_dca_buf_inactive)(struct hns_roce_dev *hr_dev,
				     struct hns_roce_qp *hr_qp);
1009 1010
	int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
			 int attr_mask, enum ib_qp_state cur_state,
1011
			 enum ib_qp_state new_state, struct ib_udata *udata);
1012 1013
	int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev,
			 struct hns_roce_qp *hr_qp);
1014
	void (*dereg_mr)(struct hns_roce_dev *hr_dev);
Y
Yixian Liu 已提交
1015 1016
	int (*init_eq)(struct hns_roce_dev *hr_dev);
	void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
1017
	int (*write_srqc)(struct hns_roce_srq *srq, void *mb_buf);
1018
	int (*query_cqc)(struct hns_roce_dev *hr_dev, u32 cqn, void *buffer);
1019
	int (*query_qpc)(struct hns_roce_dev *hr_dev, u32 qpn, void *buffer);
1020
	int (*query_mpt)(struct hns_roce_dev *hr_dev, u32 key, void *buffer);
Y
Yixing Liu 已提交
1021 1022
	int (*get_dscp)(struct hns_roce_dev *hr_dev, u8 dscp,
			u8 *tc_mode, u8 *priority);
C
Chengchang Tang 已提交
1023 1024
	int (*query_hw_counter)(struct hns_roce_dev *hr_dev,
				u64 *stats, u32 port, int *hw_counters);
1025 1026
	const struct ib_device_ops *hns_roce_dev_ops;
	const struct ib_device_ops *hns_roce_dev_srq_ops;
J
Junxian Huang 已提交
1027 1028 1029
	int (*bond_init)(struct hns_roce_dev *hr_dev);
	bool (*bond_is_active)(struct hns_roce_dev *hr_dev);
	struct net_device *(*get_bond_netdev)(struct hns_roce_dev *hr_dev);
1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051
	int (*config_scc_param)(struct hns_roce_dev *hr_dev, u8 port_num,
				enum hns_roce_scc_algo algo);
	int (*query_scc_param)(struct hns_roce_dev *hr_dev, u8 port_num,
			       enum hns_roce_scc_algo alog);
};

#define HNS_ROCE_SCC_PARAM_SIZE 4
struct hns_roce_scc_param {
	__le32 param[HNS_ROCE_SCC_PARAM_SIZE];
	u32 lifespan;
	unsigned long timestamp;
	enum hns_roce_scc_algo algo_type;
	struct delayed_work scc_cfg_dwork;
	struct hns_roce_dev *hr_dev;
	u8 port_num;
};

struct hns_roce_port {
	struct hns_roce_dev *hr_dev;
	u8 port_num;
	struct kobject kobj;
	struct hns_roce_scc_param *scc_param;
1052 1053 1054 1055
};

struct hns_roce_dev {
	struct ib_device	ib_dev;
1056 1057
	struct pci_dev		*pci_dev;
	struct device		*dev;
1058 1059 1060 1061 1062
	void			*dbgfs; /* debugfs for this dev */

	struct list_head	uctx_list; /* list of all uctx on this dev */
	spinlock_t		uctx_list_lock; /* protect @uctx_list */

1063
	struct hns_roce_uar     priv_uar;
1064
	const char		*irq_names[HNS_ROCE_MAX_IRQ_NUM];
1065
	spinlock_t		sm_lock;
1066 1067
	bool			active;
	bool			is_reset;
1068
	bool			dis_db;
1069
	unsigned long		reset_cnt;
1070
	struct hns_roce_ib_iboe iboe;
1071 1072 1073
	enum hns_roce_device_state state;
	struct list_head	qp_list; /* list of all qps on this dev */
	spinlock_t		qp_list_lock; /* protect qp_list */
1074 1075
	struct list_head	dip_list; /* list of all dest ips on this dev */
	spinlock_t		dip_list_lock; /* protect dip_list */
1076

1077 1078
	struct list_head        pgdir_list;
	struct mutex            pgdir_mutex;
1079 1080
	int			irq[HNS_ROCE_MAX_IRQ_NUM];
	u8 __iomem		*reg_base;
W
wangsirong 已提交
1081
	void __iomem		*mem_base;
1082
	struct hns_roce_caps	caps;
1083
	struct xarray		qp_table_xa;
1084

1085 1086
	struct hns_roce_dca_ctx	dca_ctx;

1087
	unsigned char	dev_addr[HNS_ROCE_MAX_PORTS][ETH_ALEN];
1088 1089 1090 1091 1092 1093 1094
	u64			sys_image_guid;
	u32                     vendor_id;
	u32                     vendor_part_id;
	u32                     hw_rev;
	void __iomem            *priv_addr;

	struct hns_roce_cmdq	cmd;
1095
	struct hns_roce_ida pd_ida;
1096
	struct hns_roce_ida xrcd_ida;
1097
	struct hns_roce_ida uar_ida;
1098 1099
	struct hns_roce_mr_table  mr_table;
	struct hns_roce_cq_table  cq_table;
L
Lijun Ou 已提交
1100
	struct hns_roce_srq_table srq_table;
1101 1102
	struct hns_roce_qp_table  qp_table;
	struct hns_roce_eq_table  eq_table;
1103 1104
	struct hns_roce_hem_table  qpc_timer_table;
	struct hns_roce_hem_table  cqc_timer_table;
1105 1106 1107 1108
	/* GMV is the memory area that the driver allocates for the hardware
	 * to store SGID, SMAC and VLAN information.
	 */
	struct hns_roce_hem_table  gmv_table;
1109 1110

	int			cmd_mod;
1111
	u8			mac_type;
1112
	int			loop_idc;
1113 1114
	u32			sdb_offset;
	u32			odb_offset;
1115 1116
	struct page		*reset_page; /* store reset state */
	void			*reset_kaddr; /* addr of reset page */
1117
	const struct hns_roce_hw *hw;
1118
	void			*priv;
1119
	struct workqueue_struct *irq_workq;
1120
	struct work_struct ecc_work;
1121
	u32 func_num;
1122
	u32 is_vf;
1123
	u32 congest_algo_tmpl_id;
1124
	u64 dwqe_page;
J
Junxian Huang 已提交
1125 1126 1127

	struct notifier_block bond_nb;
	struct netdev_lag_lower_state_info slave_state;
1128
	struct hns_roce_port port_data[HNS_ROCE_MAX_PORTS];
C
Chengchang Tang 已提交
1129
	atomic64_t *dfx_cnt;
1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
};

static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
{
	return container_of(ib_dev, struct hns_roce_dev, ib_dev);
}

static inline struct hns_roce_ucontext
			*to_hr_ucontext(struct ib_ucontext *ibucontext)
{
	return container_of(ibucontext, struct hns_roce_ucontext, ibucontext);
}

static inline struct hns_roce_pd *to_hr_pd(struct ib_pd *ibpd)
{
	return container_of(ibpd, struct hns_roce_pd, ibpd);
}

1148 1149 1150 1151 1152
static inline struct hns_roce_xrcd *to_hr_xrcd(struct ib_xrcd *ibxrcd)
{
	return container_of(ibxrcd, struct hns_roce_xrcd, ibxrcd);
}

1153 1154 1155 1156 1157 1158 1159 1160 1161 1162
static inline struct hns_roce_ah *to_hr_ah(struct ib_ah *ibah)
{
	return container_of(ibah, struct hns_roce_ah, ibah);
}

static inline struct hns_roce_mr *to_hr_mr(struct ib_mr *ibmr)
{
	return container_of(ibmr, struct hns_roce_mr, ibmr);
}

Y
Yixian Liu 已提交
1163 1164 1165 1166 1167
static inline struct hns_roce_mw *to_hr_mw(struct ib_mw *ibmw)
{
	return container_of(ibmw, struct hns_roce_mw, ibmw);
}

1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182
static inline struct hns_roce_qp *to_hr_qp(struct ib_qp *ibqp)
{
	return container_of(ibqp, struct hns_roce_qp, ibqp);
}

static inline struct hns_roce_cq *to_hr_cq(struct ib_cq *ib_cq)
{
	return container_of(ib_cq, struct hns_roce_cq, ib_cq);
}

static inline struct hns_roce_srq *to_hr_srq(struct ib_srq *ibsrq)
{
	return container_of(ibsrq, struct hns_roce_srq, ibsrq);
}

1183 1184 1185 1186 1187 1188
static inline struct hns_user_mmap_entry *
to_hns_mmap(struct rdma_user_mmap_entry *rdma_entry)
{
	return container_of(rdma_entry, struct hns_user_mmap_entry, rdma_entry);
}

1189
static inline void hns_roce_write64_k(__le32 val[2], void __iomem *dest)
1190
{
1191
	writeq(*(u64 *)val, dest);
1192 1193 1194 1195 1196
}

static inline struct hns_roce_qp
	*__hns_roce_qp_lookup(struct hns_roce_dev *hr_dev, u32 qpn)
{
1197
	return xa_load(&hr_dev->qp_table_xa, qpn);
1198 1199
}

1200 1201
static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf,
					unsigned int offset)
1202
{
1203 1204
	return (char *)(buf->trunk_list[offset >> buf->trunk_shift].buf) +
			(offset & ((1 << buf->trunk_shift) - 1));
1205 1206
}

1207 1208
static inline dma_addr_t hns_roce_buf_dma_addr(struct hns_roce_buf *buf,
					       unsigned int offset)
1209
{
1210 1211
	return buf->trunk_list[offset >> buf->trunk_shift].map +
			(offset & ((1 << buf->trunk_shift) - 1));
1212 1213
}

1214 1215 1216 1217 1218
static inline dma_addr_t hns_roce_buf_page(struct hns_roce_buf *buf, u32 idx)
{
	return hns_roce_buf_dma_addr(buf, idx << buf->page_shift);
}

1219
#define hr_hw_page_align(x)		ALIGN(x, 1 << HNS_HW_PAGE_SHIFT)
1220

1221 1222
static inline u64 to_hr_hw_page_addr(u64 addr)
{
1223
	return addr >> HNS_HW_PAGE_SHIFT;
1224 1225 1226 1227
}

static inline u32 to_hr_hw_page_shift(u32 page_shift)
{
1228
	return page_shift - HNS_HW_PAGE_SHIFT;
1229 1230
}

1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250
static inline u32 to_hr_hem_hopnum(u32 hopnum, u32 count)
{
	if (count > 0)
		return hopnum == HNS_ROCE_HOP_NUM_0 ? 0 : hopnum;

	return 0;
}

static inline u32 to_hr_hem_entries_size(u32 count, u32 buf_shift)
{
	return hr_hw_page_align(count << buf_shift);
}

static inline u32 to_hr_hem_entries_count(u32 count, u32 buf_shift)
{
	return hr_hw_page_align(count << buf_shift) >> buf_shift;
}

static inline u32 to_hr_hem_entries_shift(u32 count, u32 buf_shift)
{
1251 1252 1253
	if (!count)
		return 0;

1254 1255 1256
	return ilog2(to_hr_hem_entries_count(count, buf_shift));
}

1257 1258 1259 1260 1261 1262 1263 1264
#define DSCP_SHIFT 2

static inline u8 get_tclass(const struct ib_global_route *grh)
{
	return grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP ?
	       grh->traffic_class >> DSCP_SHIFT : grh->traffic_class;
}

1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275
static inline u8 to_rdma_port_num(u8 phy_port_num)
{
	return phy_port_num + 1;
}

static inline enum ib_port_state get_port_state(struct net_device *net_dev)
{
	return (netif_running(net_dev) && netif_carrier_ok(net_dev)) ?
		IB_PORT_ACTIVE : IB_PORT_DOWN;
}

1276
void hns_roce_init_uar_table(struct hns_roce_dev *dev);
1277 1278 1279 1280 1281 1282 1283 1284 1285
int hns_roce_uar_alloc(struct hns_roce_dev *dev, struct hns_roce_uar *uar);

int hns_roce_cmd_init(struct hns_roce_dev *hr_dev);
void hns_roce_cmd_cleanup(struct hns_roce_dev *hr_dev);
void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
			u64 out_param);
int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev);
void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev);

1286 1287 1288
/* hns roce hw need current block and next block addr from mtt */
#define MTT_MIN_COUNT	 2
int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
1289
		      u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr);
1290
int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
1291 1292 1293
			struct hns_roce_buf_attr *buf_attr,
			unsigned int page_shift, struct ib_udata *udata,
			unsigned long user_addr);
1294 1295 1296
void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev,
			  struct hns_roce_mtr *mtr);
int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
1297
		     dma_addr_t *pages, unsigned int page_cnt);
1298

1299
void hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
1300
void hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
1301
void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
1302
int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
1303
void hns_roce_init_srq_table(struct hns_roce_dev *hr_dev);
1304
void hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev);
1305 1306 1307 1308 1309 1310 1311

void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);

void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev);

1312 1313
int hns_roce_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
		       struct ib_udata *udata);
1314
int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
1315 1316 1317 1318
static inline int hns_roce_destroy_ah(struct ib_ah *ah, u32 flags)
{
	return 0;
}
1319

1320
int hns_roce_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
1321
int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
1322 1323 1324 1325 1326

struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
				   u64 virt_addr, int access_flags,
				   struct ib_udata *udata);
1327 1328 1329
int hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, u64 length,
			   u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
			   struct ib_udata *udata);
Y
Yixian Liu 已提交
1330
struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1331
				u32 max_num_sg);
Y
Yixian Liu 已提交
1332 1333
int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
		       unsigned int *sg_offset);
1334
int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
S
Shaobo Xu 已提交
1335
unsigned long key_to_hw_index(u32 key);
1336

1337
int hns_roce_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
Y
Yixian Liu 已提交
1338 1339
int hns_roce_dealloc_mw(struct ib_mw *ibmw);

1340
void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf);
1341 1342
struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size,
					u32 page_shift, u32 flags);
1343

1344
int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
1345 1346
			   int buf_cnt, struct hns_roce_buf *buf,
			   unsigned int page_shift);
1347
int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
1348
			   int buf_cnt, struct ib_umem *umem,
1349
			   unsigned int page_shift);
1350

1351 1352 1353
int hns_roce_create_srq(struct ib_srq *srq,
			struct ib_srq_init_attr *srq_init_attr,
			struct ib_udata *udata);
1354 1355 1356
int hns_roce_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr,
			enum ib_srq_attr_mask srq_attr_mask,
			struct ib_udata *udata);
1357
int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
1358

1359 1360 1361
int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata);
int hns_roce_dealloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata);

1362 1363 1364 1365 1366
struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd,
				 struct ib_qp_init_attr *init_attr,
				 struct ib_udata *udata);
int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
		       int attr_mask, struct ib_udata *udata);
1367
void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
1368 1369 1370 1371
void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, unsigned int n);
void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, unsigned int n);
void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, unsigned int n);
bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq,
1372 1373 1374 1375 1376 1377
			  struct ib_cq *ib_cq);
void hns_roce_lock_cqs(struct hns_roce_cq *send_cq,
		       struct hns_roce_cq *recv_cq);
void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
			 struct hns_roce_cq *recv_cq);
void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
X
Xi Wang 已提交
1378 1379
void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
			 struct ib_udata *udata);
1380
__be32 send_ieth(const struct ib_send_wr *wr);
1381 1382
int to_hr_qp_type(int qp_type);

1383 1384
int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
		       struct ib_udata *udata);
1385

L
Leon Romanovsky 已提交
1386
int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
1387
int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,
1388 1389 1390
			 struct hns_roce_db *db);
void hns_roce_db_unmap_user(struct hns_roce_ucontext *context,
			    struct hns_roce_db *db);
1391 1392 1393 1394
int hns_roce_alloc_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db,
		      int order);
void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db);

1395 1396
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
1397
void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp);
1398
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
1399
void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type);
1400
u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index);
1401
void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev);
1402 1403
int hns_roce_init(struct hns_roce_dev *hr_dev);
void hns_roce_exit(struct hns_roce_dev *hr_dev);
1404
int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq);
1405
int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq);
1406
int hns_roce_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp);
1407
int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp);
1408
int hns_roce_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr);
1409
int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr);
1410 1411 1412 1413
struct hns_user_mmap_entry *
hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
				size_t length,
				enum hns_roce_mmap_type mmap_type);
1414 1415 1416
int hns_roce_create_port_files(struct ib_device *ibdev, u8 port_num,
			       struct kobject *kobj);
void hns_roce_unregister_sysfs(struct hns_roce_dev *hr_dev);
1417
#endif /* _HNS_ROCE_DEVICE_H */