hns_roce_main.c 29.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 * Copyright (c) 2016 Hisilicon Limited.
 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */
33
#include <linux/acpi.h>
A
Arnd Bergmann 已提交
34
#include <linux/module.h>
35
#include <linux/pci.h>
36 37 38
#include <rdma/ib_addr.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_user_verbs.h>
39
#include <rdma/ib_cache.h>
40 41 42 43
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_hem.h"

44 45
static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u32 port,
			    const u8 *addr)
46 47
{
	u8 phy_port;
48 49 50 51
	u32 i;

	if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
		return 0;
52

53
	if (!memcmp(hr_dev->dev_addr[port], addr, ETH_ALEN))
54
		return 0;
55

56
	for (i = 0; i < ETH_ALEN; i++)
57 58 59
		hr_dev->dev_addr[port][i] = addr[i];

	phy_port = hr_dev->iboe.phy_port[port];
60
	return hr_dev->hw->set_mac(hr_dev, phy_port, addr);
61 62
}

63
static int hns_roce_add_gid(const struct ib_gid_attr *attr, void **context)
64
{
65 66
	struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
	u8 port = attr->port_num - 1;
67
	int ret;
68 69 70

	if (port >= hr_dev->caps.num_ports)
		return -EINVAL;
71

72
	ret = hr_dev->hw->set_gid(hr_dev, attr->index, &attr->gid, attr);
73

74
	return ret;
75 76
}

77
static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context)
78
{
79 80
	struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
	u8 port = attr->port_num - 1;
81
	int ret;
82 83 84 85

	if (port >= hr_dev->caps.num_ports)
		return -EINVAL;

86
	ret = hr_dev->hw->set_gid(hr_dev, attr->index, NULL, NULL);
87

88
	return ret;
89 90 91
}

static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
92
			   unsigned long dev_event)
93
{
94
	struct device *dev = hr_dev->dev;
95
	enum ib_port_state port_state;
96
	struct net_device *netdev;
97 98
	struct ib_event event;
	unsigned long flags;
99
	int ret = 0;
100 101 102

	netdev = hr_dev->iboe.netdevs[port];
	if (!netdev) {
103
		dev_err(dev, "Can't find netdev on port(%u)!\n", port);
104 105 106
		return -ENODEV;
	}

107
	switch (dev_event) {
108 109
	case NETDEV_REGISTER:
	case NETDEV_CHANGEADDR:
110
		ret = hns_roce_set_mac(hr_dev, port, netdev->dev_addr);
111
		break;
112 113 114 115 116 117
	case NETDEV_UP:
	case NETDEV_CHANGE:
		ret = hns_roce_set_mac(hr_dev, port, netdev->dev_addr);
		if (ret)
			return ret;
		fallthrough;
118
	case NETDEV_DOWN:
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
		port_state = get_port_state(netdev);

		spin_lock_irqsave(&hr_dev->iboe.lock, flags);
		if (hr_dev->iboe.port_state[port] == port_state) {
			spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
			return NOTIFY_DONE;
		}
		hr_dev->iboe.port_state[port] = port_state;
		spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);

		event.device = &hr_dev->ib_dev;
		event.event = (port_state == IB_PORT_ACTIVE) ?
			      IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
		event.element.port_num = to_rdma_port_num(port);
		ib_dispatch_event(&event);
		break;
	case NETDEV_UNREGISTER:
136 137
		break;
	default:
138
		dev_dbg(dev, "NETDEV event = 0x%x!\n", (u32)(dev_event));
139 140 141
		break;
	}

142
	return ret;
143 144 145 146 147 148 149 150
}

static int hns_roce_netdev_event(struct notifier_block *self,
				 unsigned long event, void *ptr)
{
	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
	struct hns_roce_ib_iboe *iboe = NULL;
	struct hns_roce_dev *hr_dev = NULL;
151 152
	int ret;
	u8 port;
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168

	hr_dev = container_of(self, struct hns_roce_dev, iboe.nb);
	iboe = &hr_dev->iboe;

	for (port = 0; port < hr_dev->caps.num_ports; port++) {
		if (dev == iboe->netdevs[port]) {
			ret = handle_en_event(hr_dev, port, event);
			if (ret)
				return NOTIFY_DONE;
			break;
		}
	}

	return NOTIFY_DONE;
}

169
static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev)
170
{
171
	int ret;
172
	u8 i;
173 174

	for (i = 0; i < hr_dev->caps.num_ports; i++) {
175 176
		hr_dev->iboe.port_state[i] = IB_PORT_DOWN;

177 178 179 180
		ret = hns_roce_set_mac(hr_dev, i,
				       hr_dev->iboe.netdevs[i]->dev_addr);
		if (ret)
			return ret;
181 182
	}

183
	return 0;
184 185 186 187 188 189 190 191 192 193
}

static int hns_roce_query_device(struct ib_device *ib_dev,
				 struct ib_device_attr *props,
				 struct ib_udata *uhw)
{
	struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);

	memset(props, 0, sizeof(*props));

194
	props->fw_ver = hr_dev->caps.fw_ver;
195
	props->sys_image_guid = cpu_to_be64(hr_dev->sys_image_guid);
196 197 198 199 200 201 202 203
	props->max_mr_size = (u64)(~(0ULL));
	props->page_size_cap = hr_dev->caps.page_size_cap;
	props->vendor_id = hr_dev->vendor_id;
	props->vendor_part_id = hr_dev->vendor_part_id;
	props->hw_ver = hr_dev->hw_rev;
	props->max_qp = hr_dev->caps.num_qps;
	props->max_qp_wr = hr_dev->caps.max_wqes;
	props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
204
				  IB_DEVICE_RC_RNR_NAK_GEN;
205 206
	props->max_send_sge = hr_dev->caps.max_sq_sg;
	props->max_recv_sge = hr_dev->caps.max_rq_sg;
207 208 209 210 211 212 213
	props->max_sge_rd = 1;
	props->max_cq = hr_dev->caps.num_cqs;
	props->max_cqe = hr_dev->caps.max_cqes;
	props->max_mr = hr_dev->caps.num_mtpts;
	props->max_pd = hr_dev->caps.num_pds;
	props->max_qp_rd_atom = hr_dev->caps.max_qp_dest_rdma;
	props->max_qp_init_rd_atom = hr_dev->caps.max_qp_init_rdma;
L
Lijun Ou 已提交
214 215
	props->atomic_cap = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_ATOMIC ?
			    IB_ATOMIC_HCA : IB_ATOMIC_NONE;
216 217
	props->max_pkeys = 1;
	props->local_ca_ack_delay = hr_dev->caps.local_ca_ack_delay;
L
Lijun Ou 已提交
218
	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
219
		props->max_srq = hr_dev->caps.num_srqs;
L
Lijun Ou 已提交
220 221 222
		props->max_srq_wr = hr_dev->caps.max_srq_wrs;
		props->max_srq_sge = hr_dev->caps.max_srq_sges;
	}
223

224 225
	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR &&
	    hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
226 227 228 229
		props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
		props->max_fast_reg_page_list_len = HNS_ROCE_FRMR_MAX_PA;
	}

230 231 232
	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
		props->device_cap_flags |= IB_DEVICE_XRC;

233 234 235 236 237 238 239
	return 0;
}

static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
			       struct ib_port_attr *props)
{
	struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
240
	struct device *dev = hr_dev->dev;
241 242 243 244 245 246 247
	struct net_device *net_dev;
	unsigned long flags;
	enum ib_mtu mtu;
	u8 port;

	port = port_num - 1;

248
	/* props being zeroed by the caller, avoid zeroing it here */
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264

	props->max_mtu = hr_dev->caps.max_mtu;
	props->gid_tbl_len = hr_dev->caps.gid_table_len[port];
	props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
				IB_PORT_VENDOR_CLASS_SUP |
				IB_PORT_BOOT_MGMT_SUP;
	props->max_msg_sz = HNS_ROCE_MAX_MSG_LEN;
	props->pkey_tbl_len = 1;
	props->active_width = IB_WIDTH_4X;
	props->active_speed = 1;

	spin_lock_irqsave(&hr_dev->iboe.lock, flags);

	net_dev = hr_dev->iboe.netdevs[port];
	if (!net_dev) {
		spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
265
		dev_err(dev, "Find netdev %u failed!\n", port);
266 267 268 269 270
		return -EINVAL;
	}

	mtu = iboe_get_mtu(net_dev->mtu);
	props->active_mtu = mtu ? min(props->max_mtu, mtu) : IB_MTU_256;
271
	props->state = get_port_state(net_dev);
L
Lijun Ou 已提交
272 273 274
	props->phys_state = props->state == IB_PORT_ACTIVE ?
				    IB_PORT_PHYS_STATE_LINK_UP :
				    IB_PORT_PHYS_STATE_DISABLED;
275 276 277 278 279 280 281 282 283 284 285 286 287 288 289

	spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);

	return 0;
}

static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device,
						    u8 port_num)
{
	return IB_LINK_LAYER_ETHERNET;
}

static int hns_roce_query_pkey(struct ib_device *ib_dev, u8 port, u16 index,
			       u16 *pkey)
{
K
Kamal Heib 已提交
290 291 292
	if (index > 0)
		return -EINVAL;

293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
	*pkey = PKEY_ID;

	return 0;
}

static int hns_roce_modify_device(struct ib_device *ib_dev, int mask,
				  struct ib_device_modify *props)
{
	unsigned long flags;

	if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
		return -EOPNOTSUPP;

	if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
		spin_lock_irqsave(&to_hr_dev(ib_dev)->sm_lock, flags);
		memcpy(ib_dev->node_desc, props->node_desc, NODE_DESC_SIZE);
		spin_unlock_irqrestore(&to_hr_dev(ib_dev)->sm_lock, flags);
	}

	return 0;
}

315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
struct hns_user_mmap_entry *
hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
				size_t length,
				enum hns_roce_mmap_type mmap_type)
{
	struct hns_user_mmap_entry *entry;
	int ret;

	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
	if (!entry)
		return NULL;

	entry->address = address;
	entry->mmap_type = mmap_type;

330
	switch (mmap_type) {
331
	/* pgoff 0 must be used by DB for compatibility */
332 333 334 335 336 337
	case HNS_ROCE_MMAP_TYPE_DB:
		ret = rdma_user_mmap_entry_insert_exact(
				ucontext, &entry->rdma_entry, length, 0);
		break;
	case HNS_ROCE_MMAP_TYPE_DWQE:
		ret = rdma_user_mmap_entry_insert_range(
338
				ucontext, &entry->rdma_entry, length, 1,
339 340 341 342 343 344 345
				U32_MAX);
		break;
	default:
		ret = -EINVAL;
		break;
	}

346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
	if (ret) {
		kfree(entry);
		return NULL;
	}

	return entry;
}

static void hns_roce_dealloc_uar_entry(struct hns_roce_ucontext *context)
{
	if (context->db_mmap_entry)
		rdma_user_mmap_entry_remove(
			&context->db_mmap_entry->rdma_entry);
}

static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx)
{
	struct hns_roce_ucontext *context = to_hr_ucontext(uctx);
	u64 address;

	address = context->uar.pfn << PAGE_SHIFT;
	context->db_mmap_entry = hns_roce_user_mmap_entry_insert(
		uctx, address, PAGE_SIZE, HNS_ROCE_MMAP_TYPE_DB);
	if (!context->db_mmap_entry)
		return -ENOMEM;

	return 0;
}

375 376
static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
				   struct ib_udata *udata)
377
{
378 379
	struct hns_roce_ucontext *context = to_hr_ucontext(uctx);
	struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
L
Luoyouming 已提交
380 381 382
	struct hns_roce_ib_alloc_ucontext_resp resp = {};
	struct hns_roce_ib_alloc_ucontext ucmd = {};
	int ret;
383

384
	if (!hr_dev->active)
385
		return -EAGAIN;
386

387
	resp.qp_tab_size = hr_dev->caps.num_qps;
388
	resp.srq_tab_size = hr_dev->caps.num_srqs;
389

L
Luoyouming 已提交
390 391 392 393 394 395 396 397 398 399 400 401 402
	ret = ib_copy_from_udata(&ucmd, udata,
				 min(udata->inlen, sizeof(ucmd)));
	if (ret)
		return ret;

	if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
		context->config = ucmd.config & HNS_ROCE_EXSGE_FLAGS;

	if (context->config & HNS_ROCE_EXSGE_FLAGS) {
		resp.config |= HNS_ROCE_RSP_EXSGE_FLAGS;
		resp.max_inline_data = hr_dev->caps.max_sq_inline;
	}

403 404 405 406 407
	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
		context->config |= ucmd.config & HNS_ROCE_RQ_INLINE_FLAGS;
		resp.config |= HNS_ROCE_RSP_RQ_INLINE_FLAGS;
	}

408 409 410 411 412
	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQE_INLINE) {
		context->config |= ucmd.config & HNS_ROCE_CQE_INLINE_FLAGS;
		resp.config |= HNS_ROCE_RSP_CQE_INLINE_FLAGS;
	}

413 414 415 416
	ret = hns_roce_uar_alloc(hr_dev, &context->uar);
	if (ret)
		goto error_fail_uar_alloc;

417 418 419 420
	ret = hns_roce_alloc_uar_entry(uctx);
	if (ret)
		goto error_fail_uar_entry;

421 422
	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
	    hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) {
423 424 425 426
		INIT_LIST_HEAD(&context->page_list);
		mutex_init(&context->page_mutex);
	}

427 428
	resp.cqe_size = hr_dev->caps.cqe_sz;

429 430
	ret = ib_copy_to_udata(udata, &resp,
			       min(udata->outlen, sizeof(resp)));
431 432 433
	if (ret)
		goto error_fail_copy_to_udata;

434
	return 0;
435 436

error_fail_copy_to_udata:
437 438 439
	hns_roce_dealloc_uar_entry(context);

error_fail_uar_entry:
440
	ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx);
441 442

error_fail_uar_alloc:
443
	return ret;
444 445
}

446
static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
447 448
{
	struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
449
	struct hns_roce_dev *hr_dev = to_hr_dev(ibcontext->device);
450

451 452
	hns_roce_dealloc_uar_entry(context);

453
	ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx);
454 455
}

456
static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma)
457
{
458 459 460 461 462
	struct rdma_user_mmap_entry *rdma_entry;
	struct hns_user_mmap_entry *entry;
	phys_addr_t pfn;
	pgprot_t prot;
	int ret;
463

464 465 466
	rdma_entry = rdma_user_mmap_entry_get_pgoff(uctx, vma->vm_pgoff);
	if (!rdma_entry)
 		return -EINVAL;
467

468 469 470
	entry = to_hns_mmap(rdma_entry);
	pfn = entry->address >> PAGE_SHIFT;

471 472 473 474 475 476 477 478
	switch (entry->mmap_type) {
	case HNS_ROCE_MMAP_TYPE_DB:
	case HNS_ROCE_MMAP_TYPE_DWQE:
		prot = pgprot_device(vma->vm_page_prot);
		break;
	default:
		return -EINVAL;
	}
479 480 481 482 483 484 485 486 487 488 489 490 491 492

	ret = rdma_user_mmap_io(uctx, vma, pfn, rdma_entry->npages * PAGE_SIZE,
				prot, rdma_entry);

	rdma_user_mmap_entry_put(rdma_entry);

	return ret;
}

static void hns_roce_free_mmap(struct rdma_user_mmap_entry *rdma_entry)
{
	struct hns_user_mmap_entry *entry = to_hns_mmap(rdma_entry);

	kfree(entry);
493 494 495 496 497 498 499 500
}

static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num,
				   struct ib_port_immutable *immutable)
{
	struct ib_port_attr attr;
	int ret;

501
	ret = ib_query_port(ib_dev, port_num, &attr);
502 503 504 505 506 507 508
	if (ret)
		return ret;

	immutable->pkey_tbl_len = attr.pkey_tbl_len;
	immutable->gid_tbl_len = attr.gid_tbl_len;

	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
509 510 511 512 513 514 515 516

	if (to_hr_dev(ib_dev)->mac_type == HNAE3_MAC_ROH)
		immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
	else if (to_hr_dev(ib_dev)->caps.flags & HNS_ROCE_CAP_FLAG_ROCE_V1_V2)
		immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
					    RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
	else
		immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
517 518 519 520

	return 0;
}

521 522 523 524
static void hns_roce_disassociate_ucontext(struct ib_ucontext *ibcontext)
{
}

525 526 527 528 529 530 531 532 533 534 535 536 537
static void hns_roce_get_fw_ver(struct ib_device *device, char *str)
{
	u64 fw_ver = to_hr_dev(device)->caps.fw_ver;
	unsigned int major, minor, sub_minor;

	major = upper_32_bits(fw_ver);
	minor = high_16_bits(lower_32_bits(fw_ver));
	sub_minor = low_16_bits(fw_ver);

	snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u.%04u", major, minor,
		 sub_minor);
}

538 539 540 541
static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev)
{
	struct hns_roce_ib_iboe *iboe = &hr_dev->iboe;

542
	hr_dev->active = false;
543 544 545 546
	unregister_netdevice_notifier(&iboe->nb);
	ib_unregister_device(&hr_dev->ib_dev);
}

547
static const struct ib_device_ops hns_roce_dev_ops = {
548
	.owner = THIS_MODULE,
549
	.driver_id = RDMA_DRIVER_HNS,
550
	.uverbs_abi_ver = 1,
551
	.uverbs_no_driver_id_binding = 1,
552

553
	.get_dev_fw_str = hns_roce_get_fw_ver,
554 555 556 557
	.add_gid = hns_roce_add_gid,
	.alloc_pd = hns_roce_alloc_pd,
	.alloc_ucontext = hns_roce_alloc_ucontext,
	.create_ah = hns_roce_create_ah,
558
	.create_cq = hns_roce_create_cq,
559 560 561 562 563 564
	.create_qp = hns_roce_create_qp,
	.dealloc_pd = hns_roce_dealloc_pd,
	.dealloc_ucontext = hns_roce_dealloc_ucontext,
	.del_gid = hns_roce_del_gid,
	.dereg_mr = hns_roce_dereg_mr,
	.destroy_ah = hns_roce_destroy_ah,
565
	.destroy_cq = hns_roce_destroy_cq,
566 567 568 569 570
	.disassociate_ucontext = hns_roce_disassociate_ucontext,
	.get_dma_mr = hns_roce_get_dma_mr,
	.get_link_layer = hns_roce_get_link_layer,
	.get_port_immutable = hns_roce_port_immutable,
	.mmap = hns_roce_mmap,
571
	.mmap_free = hns_roce_free_mmap,
572 573 574 575 576 577 578
	.modify_device = hns_roce_modify_device,
	.modify_qp = hns_roce_modify_qp,
	.query_ah = hns_roce_query_ah,
	.query_device = hns_roce_query_device,
	.query_pkey = hns_roce_query_pkey,
	.query_port = hns_roce_query_port,
	.reg_user_mr = hns_roce_reg_user_mr,
579 580

	INIT_RDMA_OBJ_SIZE(ib_ah, hns_roce_ah, ibah),
581
	INIT_RDMA_OBJ_SIZE(ib_cq, hns_roce_cq, ib_cq),
582
	INIT_RDMA_OBJ_SIZE(ib_pd, hns_roce_pd, ibpd),
583
	INIT_RDMA_OBJ_SIZE(ib_ucontext, hns_roce_ucontext, ibucontext),
584 585 586 587 588 589 590 591 592
};

static const struct ib_device_ops hns_roce_dev_mr_ops = {
	.rereg_user_mr = hns_roce_rereg_user_mr,
};

static const struct ib_device_ops hns_roce_dev_mw_ops = {
	.alloc_mw = hns_roce_alloc_mw,
	.dealloc_mw = hns_roce_dealloc_mw,
593 594

	INIT_RDMA_OBJ_SIZE(ib_mw, hns_roce_mw, ibmw),
595 596 597 598 599 600 601 602 603 604
};

static const struct ib_device_ops hns_roce_dev_frmr_ops = {
	.alloc_mr = hns_roce_alloc_mr,
	.map_mr_sg = hns_roce_map_mr_sg,
};

static const struct ib_device_ops hns_roce_dev_srq_ops = {
	.create_srq = hns_roce_create_srq,
	.destroy_srq = hns_roce_destroy_srq,
605 606

	INIT_RDMA_OBJ_SIZE(ib_srq, hns_roce_srq, ibsrq),
607 608
};

609 610 611 612 613 614 615
static const struct ib_device_ops hns_roce_dev_xrcd_ops = {
	.alloc_xrcd = hns_roce_alloc_xrcd,
	.dealloc_xrcd = hns_roce_dealloc_xrcd,

	INIT_RDMA_OBJ_SIZE(ib_xrcd, hns_roce_xrcd, ibxrcd),
};

616 617
static const struct ib_device_ops hns_roce_dev_restrack_ops = {
	.fill_res_cq_entry = hns_roce_fill_res_cq_entry,
618
	.fill_res_cq_entry_raw = hns_roce_fill_res_cq_entry_raw,
619
	.fill_res_qp_entry = hns_roce_fill_res_qp_entry,
620
	.fill_res_qp_entry_raw = hns_roce_fill_res_qp_entry_raw,
621
	.fill_res_mr_entry = hns_roce_fill_res_mr_entry,
622
	.fill_res_mr_entry_raw = hns_roce_fill_res_mr_entry_raw,
623 624
};

625 626 627 628 629
static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
{
	int ret;
	struct hns_roce_ib_iboe *iboe = NULL;
	struct ib_device *ib_dev = NULL;
630
	struct device *dev = hr_dev->dev;
631
	unsigned int i;
632 633

	iboe = &hr_dev->iboe;
L
Lijun Ou 已提交
634
	spin_lock_init(&iboe->lock);
635 636 637

	ib_dev = &hr_dev->ib_dev;

L
Lijun Ou 已提交
638 639
	ib_dev->node_type = RDMA_NODE_IB_CA;
	ib_dev->dev.parent = dev;
640

L
Lijun Ou 已提交
641 642 643 644
	ib_dev->phys_port_cnt = hr_dev->caps.num_ports;
	ib_dev->local_dma_lkey = hr_dev->caps.reserved_lkey;
	ib_dev->num_comp_vectors = hr_dev->caps.num_comp_vectors;
	ib_dev->uverbs_cmd_mask =
645 646 647 648 649 650 651 652 653 654 655 656 657
		(1ULL << IB_USER_VERBS_CMD_GET_CONTEXT) |
		(1ULL << IB_USER_VERBS_CMD_QUERY_DEVICE) |
		(1ULL << IB_USER_VERBS_CMD_QUERY_PORT) |
		(1ULL << IB_USER_VERBS_CMD_ALLOC_PD) |
		(1ULL << IB_USER_VERBS_CMD_DEALLOC_PD) |
		(1ULL << IB_USER_VERBS_CMD_REG_MR) |
		(1ULL << IB_USER_VERBS_CMD_DEREG_MR) |
		(1ULL << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
		(1ULL << IB_USER_VERBS_CMD_CREATE_CQ) |
		(1ULL << IB_USER_VERBS_CMD_DESTROY_CQ) |
		(1ULL << IB_USER_VERBS_CMD_CREATE_QP) |
		(1ULL << IB_USER_VERBS_CMD_MODIFY_QP) |
		(1ULL << IB_USER_VERBS_CMD_QUERY_QP) |
658 659 660
		(1ULL << IB_USER_VERBS_CMD_DESTROY_QP) |
		(1ULL << IB_USER_VERBS_CMD_CREATE_AH) |
		(1ULL << IB_USER_VERBS_CMD_DESTROY_AH);
661

Y
Yixing Liu 已提交
662 663 664 665 666 667
	ib_dev->uverbs_ex_cmd_mask |=
				(1ULL << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
				(1ULL << IB_USER_VERBS_EX_CMD_MODIFY_CQ) |
				(1ULL << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
				(1ULL << IB_USER_VERBS_EX_CMD_MODIFY_QP) |
				(1ULL << IB_USER_VERBS_EX_CMD_CREATE_QP);
668

669 670
	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_REREG_MR) {
		ib_dev->uverbs_cmd_mask |= (1ULL << IB_USER_VERBS_CMD_REREG_MR);
671
		ib_set_device_ops(ib_dev, &hns_roce_dev_mr_ops);
672
	}
673

Y
Yixian Liu 已提交
674 675 676 677 678
	/* MW */
	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_MW) {
		ib_dev->uverbs_cmd_mask |=
					(1ULL << IB_USER_VERBS_CMD_ALLOC_MW) |
					(1ULL << IB_USER_VERBS_CMD_DEALLOC_MW);
679
		ib_set_device_ops(ib_dev, &hns_roce_dev_mw_ops);
Y
Yixian Liu 已提交
680 681
	}

Y
Yixian Liu 已提交
682
	/* FRMR */
683 684
	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR)
		ib_set_device_ops(ib_dev, &hns_roce_dev_frmr_ops);
Y
Yixian Liu 已提交
685

686 687 688 689 690 691 692 693
	/* SRQ */
	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
		ib_dev->uverbs_cmd_mask |=
				(1ULL << IB_USER_VERBS_CMD_CREATE_SRQ) |
				(1ULL << IB_USER_VERBS_CMD_MODIFY_SRQ) |
				(1ULL << IB_USER_VERBS_CMD_QUERY_SRQ) |
				(1ULL << IB_USER_VERBS_CMD_DESTROY_SRQ) |
				(1ULL << IB_USER_VERBS_CMD_POST_SRQ_RECV);
694 695
		ib_set_device_ops(ib_dev, &hns_roce_dev_srq_ops);
		ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_srq_ops);
696 697
	}

698 699 700 701 702 703 704 705
	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) {
		ib_dev->uverbs_cmd_mask |=
				(1ULL << IB_USER_VERBS_CMD_OPEN_XRCD) |
				(1ULL << IB_USER_VERBS_CMD_CLOSE_XRCD) |
				(1ULL << IB_USER_VERBS_CMD_CREATE_XSRQ);
		ib_set_device_ops(ib_dev, &hns_roce_dev_xrcd_ops);
	}

706 707
	ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_ops);
	ib_set_device_ops(ib_dev, &hns_roce_dev_ops);
708
	ib_set_device_ops(ib_dev, &hns_roce_dev_restrack_ops);
709 710 711 712 713 714 715 716 717
	for (i = 0; i < hr_dev->caps.num_ports; i++) {
		if (!hr_dev->iboe.netdevs[i])
			continue;

		ret = ib_device_set_netdev(ib_dev, hr_dev->iboe.netdevs[i],
					   i + 1);
		if (ret)
			return ret;
	}
718 719
	dma_set_max_seg_size(dev, UINT_MAX);
	ret = ib_register_device(ib_dev, "hns_%d", dev);
720 721 722 723 724
	if (ret) {
		dev_err(dev, "ib_register_device failed!\n");
		return ret;
	}

725
	ret = hns_roce_setup_mtu_mac(hr_dev);
726
	if (ret) {
727 728
		dev_err(dev, "setup_mtu_mac failed!\n");
		goto error_failed_setup_mtu_mac;
729 730 731 732 733 734
	}

	iboe->nb.notifier_call = hns_roce_netdev_event;
	ret = register_netdevice_notifier(&iboe->nb);
	if (ret) {
		dev_err(dev, "register_netdevice_notifier failed!\n");
735
		goto error_failed_setup_mtu_mac;
736 737
	}

738
	hr_dev->active = true;
739 740
	return 0;

741
error_failed_setup_mtu_mac:
742 743 744 745 746 747 748
	ib_unregister_device(ib_dev);

	return ret;
}

static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
{
749
	struct device *dev = hr_dev->dev;
L
Lang Cheng 已提交
750
	int ret;
751 752 753 754 755 756

	ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table,
				      HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz,
				      hr_dev->caps.num_mtpts, 1);
	if (ret) {
		dev_err(dev, "Failed to init MTPT context memory, aborting.\n");
X
Xi Wang 已提交
757
		return ret;
758 759 760
	}

	ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.qp_table,
761
				      HEM_TYPE_QPC, hr_dev->caps.qpc_sz,
762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777
				      hr_dev->caps.num_qps, 1);
	if (ret) {
		dev_err(dev, "Failed to init QP context memory, aborting.\n");
		goto err_unmap_dmpt;
	}

	ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.irrl_table,
				      HEM_TYPE_IRRL,
				      hr_dev->caps.irrl_entry_sz *
				      hr_dev->caps.max_qp_init_rdma,
				      hr_dev->caps.num_qps, 1);
	if (ret) {
		dev_err(dev, "Failed to init irrl_table memory, aborting.\n");
		goto err_unmap_qp;
	}

778 779 780 781 782 783 784 785 786
	if (hr_dev->caps.trrl_entry_sz) {
		ret = hns_roce_init_hem_table(hr_dev,
					      &hr_dev->qp_table.trrl_table,
					      HEM_TYPE_TRRL,
					      hr_dev->caps.trrl_entry_sz *
					      hr_dev->caps.max_qp_dest_rdma,
					      hr_dev->caps.num_qps, 1);
		if (ret) {
			dev_err(dev,
L
Lijun Ou 已提交
787
				"Failed to init trrl_table memory, aborting.\n");
788 789 790 791
			goto err_unmap_irrl;
		}
	}

792 793 794 795 796
	ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cq_table.table,
				      HEM_TYPE_CQC, hr_dev->caps.cqc_entry_sz,
				      hr_dev->caps.num_cqs, 1);
	if (ret) {
		dev_err(dev, "Failed to init CQ context memory, aborting.\n");
797
		goto err_unmap_trrl;
798 799
	}

800
	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
L
Lijun Ou 已提交
801 802 803 804 805 806
		ret = hns_roce_init_hem_table(hr_dev, &hr_dev->srq_table.table,
					      HEM_TYPE_SRQC,
					      hr_dev->caps.srqc_entry_sz,
					      hr_dev->caps.num_srqs, 1);
		if (ret) {
			dev_err(dev,
L
Lijun Ou 已提交
807
				"Failed to init SRQ context memory, aborting.\n");
L
Lijun Ou 已提交
808 809 810 811
			goto err_unmap_cq;
		}
	}

812
	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
813 814 815
		ret = hns_roce_init_hem_table(hr_dev,
					      &hr_dev->qp_table.sccc_table,
					      HEM_TYPE_SCCC,
816
					      hr_dev->caps.sccc_sz,
817 818 819
					      hr_dev->caps.num_qps, 1);
		if (ret) {
			dev_err(dev,
L
Lijun Ou 已提交
820
				"Failed to init SCC context memory, aborting.\n");
X
Xi Wang 已提交
821
			goto err_unmap_srq;
822 823 824
		}
	}

825
	if (hr_dev->caps.qpc_timer_entry_sz) {
L
Lijun Ou 已提交
826
		ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qpc_timer_table,
827 828 829 830 831
					      HEM_TYPE_QPC_TIMER,
					      hr_dev->caps.qpc_timer_entry_sz,
					      hr_dev->caps.num_qpc_timer, 1);
		if (ret) {
			dev_err(dev,
L
Lijun Ou 已提交
832
				"Failed to init QPC timer memory, aborting.\n");
833 834 835 836 837
			goto err_unmap_ctx;
		}
	}

	if (hr_dev->caps.cqc_timer_entry_sz) {
L
Lijun Ou 已提交
838
		ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cqc_timer_table,
839 840
					      HEM_TYPE_CQC_TIMER,
					      hr_dev->caps.cqc_timer_entry_sz,
841
					      hr_dev->caps.cqc_timer_bt_num, 1);
842 843
		if (ret) {
			dev_err(dev,
L
Lijun Ou 已提交
844
				"Failed to init CQC timer memory, aborting.\n");
845 846 847 848
			goto err_unmap_qpc_timer;
		}
	}

849 850 851 852 853 854 855 856 857 858 859 860 861
	if (hr_dev->caps.gmv_entry_sz) {
		ret = hns_roce_init_hem_table(hr_dev, &hr_dev->gmv_table,
					      HEM_TYPE_GMV,
					      hr_dev->caps.gmv_entry_sz,
					      hr_dev->caps.gmv_entry_num, 1);
		if (ret) {
			dev_err(dev,
				"failed to init gmv table memory, ret = %d\n",
				ret);
			goto err_unmap_cqc_timer;
		}
	}

862 863
	return 0;

864 865 866 867
err_unmap_cqc_timer:
	if (hr_dev->caps.cqc_timer_entry_sz)
		hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cqc_timer_table);

868 869
err_unmap_qpc_timer:
	if (hr_dev->caps.qpc_timer_entry_sz)
L
Lijun Ou 已提交
870
		hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qpc_timer_table);
871 872

err_unmap_ctx:
873
	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL)
874 875
		hns_roce_cleanup_hem_table(hr_dev,
					   &hr_dev->qp_table.sccc_table);
L
Lijun Ou 已提交
876
err_unmap_srq:
877
	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
L
Lijun Ou 已提交
878 879 880 881 882
		hns_roce_cleanup_hem_table(hr_dev, &hr_dev->srq_table.table);

err_unmap_cq:
	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);

883 884 885 886 887
err_unmap_trrl:
	if (hr_dev->caps.trrl_entry_sz)
		hns_roce_cleanup_hem_table(hr_dev,
					   &hr_dev->qp_table.trrl_table);

888 889 890 891 892 893 894 895 896 897 898 899 900
err_unmap_irrl:
	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);

err_unmap_qp:
	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);

err_unmap_dmpt:
	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);

	return ret;
}

/**
901 902 903 904
 * hns_roce_setup_hca - setup host channel adapter
 * @hr_dev: pointer to hns roce device
 * Return : int
 */
905 906
static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
{
907
	struct device *dev = hr_dev->dev;
L
Lang Cheng 已提交
908
	int ret;
909 910 911

	spin_lock_init(&hr_dev->sm_lock);

912 913
	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
	    hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) {
914 915 916 917
		INIT_LIST_HEAD(&hr_dev->pgdir_list);
		mutex_init(&hr_dev->pgdir_mutex);
	}

918
	hns_roce_init_uar_table(hr_dev);
919 920 921 922 923 924 925

	ret = hns_roce_uar_alloc(hr_dev, &hr_dev->priv_uar);
	if (ret) {
		dev_err(dev, "Failed to allocate priv_uar.\n");
		goto err_uar_table_free;
	}

926 927 928 929 930 931
	ret = hns_roce_init_qp_table(hr_dev);
	if (ret) {
		dev_err(dev, "Failed to init qp_table.\n");
		goto err_uar_table_free;
	}

932
	hns_roce_init_pd_table(hr_dev);
933

934 935
	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
		hns_roce_init_xrcd_table(hr_dev);
936

937
	hns_roce_init_mr_table(hr_dev);
938

939
	hns_roce_init_cq_table(hr_dev);
940

L
Lijun Ou 已提交
941
	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
942
		hns_roce_init_srq_table(hr_dev);
L
Lijun Ou 已提交
943 944
	}

945 946 947
	return 0;

err_uar_table_free:
948
	ida_destroy(&hr_dev->uar_ida.ida);
949 950 951
	return ret;
}

952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995
static void check_and_get_armed_cq(struct list_head *cq_list, struct ib_cq *cq)
{
	struct hns_roce_cq *hr_cq = to_hr_cq(cq);
	unsigned long flags;

	spin_lock_irqsave(&hr_cq->lock, flags);
	if (cq->comp_handler) {
		if (!hr_cq->is_armed) {
			hr_cq->is_armed = 1;
			list_add_tail(&hr_cq->node, cq_list);
		}
	}
	spin_unlock_irqrestore(&hr_cq->lock, flags);
}

void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev)
{
	struct hns_roce_qp *hr_qp;
	struct hns_roce_cq *hr_cq;
	struct list_head cq_list;
	unsigned long flags_qp;
	unsigned long flags;

	INIT_LIST_HEAD(&cq_list);

	spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
	list_for_each_entry(hr_qp, &hr_dev->qp_list, node) {
		spin_lock_irqsave(&hr_qp->sq.lock, flags_qp);
		if (hr_qp->sq.tail != hr_qp->sq.head)
			check_and_get_armed_cq(&cq_list, hr_qp->ibqp.send_cq);
		spin_unlock_irqrestore(&hr_qp->sq.lock, flags_qp);

		spin_lock_irqsave(&hr_qp->rq.lock, flags_qp);
		if ((!hr_qp->ibqp.srq) && (hr_qp->rq.tail != hr_qp->rq.head))
			check_and_get_armed_cq(&cq_list, hr_qp->ibqp.recv_cq);
		spin_unlock_irqrestore(&hr_qp->rq.lock, flags_qp);
	}

	list_for_each_entry(hr_cq, &cq_list, node)
		hns_roce_cq_completion(hr_dev, hr_cq->cqn);

	spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
}

996
int hns_roce_init(struct hns_roce_dev *hr_dev)
997
{
998
	struct device *dev = hr_dev->dev;
L
Lang Cheng 已提交
999
	int ret;
1000

1001
	hr_dev->is_reset = false;
1002

1003 1004 1005 1006
	if (hr_dev->hw->cmq_init) {
		ret = hr_dev->hw->cmq_init(hr_dev);
		if (ret) {
			dev_err(dev, "Init RoCE Command Queue failed!\n");
1007
			return ret;
1008 1009 1010
		}
	}

1011 1012 1013 1014 1015
	ret = hr_dev->hw->hw_profile(hr_dev);
	if (ret) {
		dev_err(dev, "Get RoCE engine profile failed!\n");
		goto error_failed_cmd_init;
	}
1016 1017 1018 1019 1020 1021 1022

	ret = hns_roce_cmd_init(hr_dev);
	if (ret) {
		dev_err(dev, "cmd init failed!\n");
		goto error_failed_cmd_init;
	}

1023
	/* EQ depends on poll mode, event mode depends on EQ */
Y
Yixian Liu 已提交
1024 1025 1026 1027
	ret = hr_dev->hw->init_eq(hr_dev);
	if (ret) {
		dev_err(dev, "eq init failed!\n");
		goto error_failed_eq_table;
1028 1029 1030 1031
	}

	if (hr_dev->cmd_mod) {
		ret = hns_roce_cmd_use_events(hr_dev);
1032
		if (ret)
1033 1034
			dev_warn(dev,
				 "Cmd event  mode failed, set back to poll!\n");
1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048
	}

	ret = hns_roce_init_hem(hr_dev);
	if (ret) {
		dev_err(dev, "init HEM(Hardware Entry Memory) failed!\n");
		goto error_failed_init_hem;
	}

	ret = hns_roce_setup_hca(hr_dev);
	if (ret) {
		dev_err(dev, "setup hca failed!\n");
		goto error_failed_setup_hca;
	}

1049 1050 1051 1052 1053 1054
	if (hr_dev->hw->hw_init) {
		ret = hr_dev->hw->hw_init(hr_dev);
		if (ret) {
			dev_err(dev, "hw_init failed!\n");
			goto error_failed_engine_init;
		}
1055 1056
	}

1057 1058
	INIT_LIST_HEAD(&hr_dev->qp_list);
	spin_lock_init(&hr_dev->qp_list_lock);
1059 1060
	INIT_LIST_HEAD(&hr_dev->dip_list);
	spin_lock_init(&hr_dev->dip_list_lock);
1061

1062 1063 1064 1065 1066 1067 1068
	ret = hns_roce_register_device(hr_dev);
	if (ret)
		goto error_failed_register_device;

	return 0;

error_failed_register_device:
1069 1070
	if (hr_dev->hw->hw_exit)
		hr_dev->hw->hw_exit(hr_dev);
1071 1072 1073 1074 1075 1076 1077 1078 1079 1080

error_failed_engine_init:
	hns_roce_cleanup_bitmap(hr_dev);

error_failed_setup_hca:
	hns_roce_cleanup_hem(hr_dev);

error_failed_init_hem:
	if (hr_dev->cmd_mod)
		hns_roce_cmd_use_polling(hr_dev);
Y
Yixian Liu 已提交
1081
	hr_dev->hw->cleanup_eq(hr_dev);
1082 1083 1084 1085 1086

error_failed_eq_table:
	hns_roce_cmd_cleanup(hr_dev);

error_failed_cmd_init:
1087 1088 1089
	if (hr_dev->hw->cmq_exit)
		hr_dev->hw->cmq_exit(hr_dev);

1090 1091 1092
	return ret;
}

1093
void hns_roce_exit(struct hns_roce_dev *hr_dev)
1094 1095
{
	hns_roce_unregister_device(hr_dev);
1096

1097 1098
	if (hr_dev->hw->hw_exit)
		hr_dev->hw->hw_exit(hr_dev);
1099 1100 1101 1102 1103 1104
	hns_roce_cleanup_bitmap(hr_dev);
	hns_roce_cleanup_hem(hr_dev);

	if (hr_dev->cmd_mod)
		hns_roce_cmd_use_polling(hr_dev);

Y
Yixian Liu 已提交
1105
	hr_dev->hw->cleanup_eq(hr_dev);
1106
	hns_roce_cmd_cleanup(hr_dev);
1107 1108
	if (hr_dev->hw->cmq_exit)
		hr_dev->hw->cmq_exit(hr_dev);
1109 1110 1111 1112 1113 1114 1115
}

MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
MODULE_DESCRIPTION("HNS RoCE Driver");