hns_roce_main.c 25.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 * Copyright (c) 2016 Hisilicon Limited.
 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */
33
#include <linux/acpi.h>
34
#include <linux/of_platform.h>
A
Arnd Bergmann 已提交
35
#include <linux/module.h>
36 37 38
#include <rdma/ib_addr.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_user_verbs.h>
39
#include <rdma/ib_cache.h>
40 41
#include "hns_roce_common.h"
#include "hns_roce_device.h"
42
#include <rdma/hns-abi.h>
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
#include "hns_roce_hem.h"

/**
 * hns_get_gid_index - Get gid index.
 * @hr_dev: pointer to structure hns_roce_dev.
 * @port:  port, value range: 0 ~ MAX
 * @gid_index:  gid_index, value range: 0 ~ MAX
 * Description:
 *    N ports shared gids, allocation method as follow:
 *		GID[0][0], GID[1][0],.....GID[N - 1][0],
 *		GID[0][0], GID[1][0],.....GID[N - 1][0],
 *		And so on
 */
int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index)
{
	return gid_index * hr_dev->caps.num_ports + port;
}

61
static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
62 63 64 65
{
	u8 phy_port;
	u32 i = 0;

66
	if (!memcmp(hr_dev->dev_addr[port], addr, ETH_ALEN))
67
		return 0;
68

69
	for (i = 0; i < ETH_ALEN; i++)
70 71 72
		hr_dev->dev_addr[port][i] = addr[i];

	phy_port = hr_dev->iboe.phy_port[port];
73
	return hr_dev->hw->set_mac(hr_dev, phy_port, addr);
74 75
}

76
static int hns_roce_add_gid(const struct ib_gid_attr *attr, void **context)
77
{
78 79
	struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
	u8 port = attr->port_num - 1;
80
	int ret;
81 82 83

	if (port >= hr_dev->caps.num_ports)
		return -EINVAL;
84

85
	ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &attr->gid, attr);
86

87
	return ret;
88 89
}

90
static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context)
91
{
92
	struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
L
Lijun Ou 已提交
93
	struct ib_gid_attr zattr = {};
94
	u8 port = attr->port_num - 1;
95
	int ret;
96 97 98 99

	if (port >= hr_dev->caps.num_ports)
		return -EINVAL;

100
	ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &zgid, &zattr);
101

102
	return ret;
103 104 105 106 107
}

static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
			   unsigned long event)
{
108
	struct device *dev = hr_dev->dev;
109
	struct net_device *netdev;
110
	int ret = 0;
111 112 113

	netdev = hr_dev->iboe.netdevs[port];
	if (!netdev) {
114
		dev_err(dev, "Can't find netdev on port(%u)!\n", port);
115 116 117 118 119 120 121 122
		return -ENODEV;
	}

	switch (event) {
	case NETDEV_UP:
	case NETDEV_CHANGE:
	case NETDEV_REGISTER:
	case NETDEV_CHANGEADDR:
123
		ret = hns_roce_set_mac(hr_dev, port, netdev->dev_addr);
124 125 126
		break;
	case NETDEV_DOWN:
		/*
127 128
		 * In v1 engine, only support all ports closed together.
		 */
129 130 131 132 133 134
		break;
	default:
		dev_dbg(dev, "NETDEV event = 0x%x!\n", (u32)(event));
		break;
	}

135
	return ret;
136 137 138 139 140 141 142 143
}

static int hns_roce_netdev_event(struct notifier_block *self,
				 unsigned long event, void *ptr)
{
	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
	struct hns_roce_ib_iboe *iboe = NULL;
	struct hns_roce_dev *hr_dev = NULL;
144 145
	int ret;
	u8 port;
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161

	hr_dev = container_of(self, struct hns_roce_dev, iboe.nb);
	iboe = &hr_dev->iboe;

	for (port = 0; port < hr_dev->caps.num_ports; port++) {
		if (dev == iboe->netdevs[port]) {
			ret = handle_en_event(hr_dev, port, event);
			if (ret)
				return NOTIFY_DONE;
			break;
		}
	}

	return NOTIFY_DONE;
}

162
static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev)
163
{
164
	int ret;
165
	u8 i;
166 167

	for (i = 0; i < hr_dev->caps.num_ports; i++) {
168 169 170
		if (hr_dev->hw->set_mtu)
			hr_dev->hw->set_mtu(hr_dev, hr_dev->iboe.phy_port[i],
					    hr_dev->caps.max_mtu);
171 172 173 174
		ret = hns_roce_set_mac(hr_dev, i,
				       hr_dev->iboe.netdevs[i]->dev_addr);
		if (ret)
			return ret;
175 176
	}

177
	return 0;
178 179 180 181 182 183 184 185 186 187
}

static int hns_roce_query_device(struct ib_device *ib_dev,
				 struct ib_device_attr *props,
				 struct ib_udata *uhw)
{
	struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);

	memset(props, 0, sizeof(*props));

188
	props->fw_ver = hr_dev->caps.fw_ver;
189
	props->sys_image_guid = cpu_to_be64(hr_dev->sys_image_guid);
190 191 192 193 194 195 196 197
	props->max_mr_size = (u64)(~(0ULL));
	props->page_size_cap = hr_dev->caps.page_size_cap;
	props->vendor_id = hr_dev->vendor_id;
	props->vendor_part_id = hr_dev->vendor_part_id;
	props->hw_ver = hr_dev->hw_rev;
	props->max_qp = hr_dev->caps.num_qps;
	props->max_qp_wr = hr_dev->caps.max_wqes;
	props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
198
				  IB_DEVICE_RC_RNR_NAK_GEN;
199 200
	props->max_send_sge = hr_dev->caps.max_sq_sg;
	props->max_recv_sge = hr_dev->caps.max_rq_sg;
201 202 203 204 205 206 207
	props->max_sge_rd = 1;
	props->max_cq = hr_dev->caps.num_cqs;
	props->max_cqe = hr_dev->caps.max_cqes;
	props->max_mr = hr_dev->caps.num_mtpts;
	props->max_pd = hr_dev->caps.num_pds;
	props->max_qp_rd_atom = hr_dev->caps.max_qp_dest_rdma;
	props->max_qp_init_rd_atom = hr_dev->caps.max_qp_init_rdma;
L
Lijun Ou 已提交
208 209
	props->atomic_cap = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_ATOMIC ?
			    IB_ATOMIC_HCA : IB_ATOMIC_NONE;
210 211
	props->max_pkeys = 1;
	props->local_ca_ack_delay = hr_dev->caps.local_ca_ack_delay;
L
Lijun Ou 已提交
212
	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
213
		props->max_srq = hr_dev->caps.num_srqs;
L
Lijun Ou 已提交
214 215 216
		props->max_srq_wr = hr_dev->caps.max_srq_wrs;
		props->max_srq_sge = hr_dev->caps.max_srq_sges;
	}
217

218 219 220 221 222
	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR) {
		props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
		props->max_fast_reg_page_list_len = HNS_ROCE_FRMR_MAX_PA;
	}

223 224 225 226 227 228 229
	return 0;
}

static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
			       struct ib_port_attr *props)
{
	struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
230
	struct device *dev = hr_dev->dev;
231 232 233 234 235 236 237
	struct net_device *net_dev;
	unsigned long flags;
	enum ib_mtu mtu;
	u8 port;

	port = port_num - 1;

238
	/* props being zeroed by the caller, avoid zeroing it here */
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254

	props->max_mtu = hr_dev->caps.max_mtu;
	props->gid_tbl_len = hr_dev->caps.gid_table_len[port];
	props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
				IB_PORT_VENDOR_CLASS_SUP |
				IB_PORT_BOOT_MGMT_SUP;
	props->max_msg_sz = HNS_ROCE_MAX_MSG_LEN;
	props->pkey_tbl_len = 1;
	props->active_width = IB_WIDTH_4X;
	props->active_speed = 1;

	spin_lock_irqsave(&hr_dev->iboe.lock, flags);

	net_dev = hr_dev->iboe.netdevs[port];
	if (!net_dev) {
		spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
255
		dev_err(dev, "Find netdev %u failed!\n", port);
256 257 258 259 260
		return -EINVAL;
	}

	mtu = iboe_get_mtu(net_dev->mtu);
	props->active_mtu = mtu ? min(props->max_mtu, mtu) : IB_MTU_256;
L
Lijun Ou 已提交
261 262 263 264 265 266
	props->state = netif_running(net_dev) && netif_carrier_ok(net_dev) ?
			       IB_PORT_ACTIVE :
			       IB_PORT_DOWN;
	props->phys_state = props->state == IB_PORT_ACTIVE ?
				    IB_PORT_PHYS_STATE_LINK_UP :
				    IB_PORT_PHYS_STATE_DISABLED;
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303

	spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);

	return 0;
}

static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device,
						    u8 port_num)
{
	return IB_LINK_LAYER_ETHERNET;
}

static int hns_roce_query_pkey(struct ib_device *ib_dev, u8 port, u16 index,
			       u16 *pkey)
{
	*pkey = PKEY_ID;

	return 0;
}

static int hns_roce_modify_device(struct ib_device *ib_dev, int mask,
				  struct ib_device_modify *props)
{
	unsigned long flags;

	if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
		return -EOPNOTSUPP;

	if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
		spin_lock_irqsave(&to_hr_dev(ib_dev)->sm_lock, flags);
		memcpy(ib_dev->node_desc, props->node_desc, NODE_DESC_SIZE);
		spin_unlock_irqrestore(&to_hr_dev(ib_dev)->sm_lock, flags);
	}

	return 0;
}

304 305
static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
				   struct ib_udata *udata)
306
{
307
	int ret;
308
	struct hns_roce_ucontext *context = to_hr_ucontext(uctx);
309
	struct hns_roce_ib_alloc_ucontext_resp resp = {};
310
	struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
311

312
	if (!hr_dev->active)
313
		return -EAGAIN;
314

315 316 317 318 319 320
	resp.qp_tab_size = hr_dev->caps.num_qps;

	ret = hns_roce_uar_alloc(hr_dev, &context->uar);
	if (ret)
		goto error_fail_uar_alloc;

321 322 323 324 325
	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
		INIT_LIST_HEAD(&context->page_list);
		mutex_init(&context->page_mutex);
	}

326 327 328 329
	ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
	if (ret)
		goto error_fail_copy_to_udata;

330
	return 0;
331 332 333 334 335

error_fail_copy_to_udata:
	hns_roce_uar_free(hr_dev, &context->uar);

error_fail_uar_alloc:
336
	return ret;
337 338
}

339
static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
340 341 342 343 344 345 346 347 348
{
	struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);

	hns_roce_uar_free(to_hr_dev(ibcontext->device), &context->uar);
}

static int hns_roce_mmap(struct ib_ucontext *context,
			 struct vm_area_struct *vma)
{
349 350
	struct hns_roce_dev *hr_dev = to_hr_dev(context->device);

351 352 353 354 355
	switch (vma->vm_pgoff) {
	case 0:
		return rdma_user_mmap_io(context, vma,
					 to_hr_ucontext(context)->uar.pfn,
					 PAGE_SIZE,
356 357
					 pgprot_noncached(vma->vm_page_prot),
					 NULL);
358 359 360 361 362 363 364 365 366 367 368 369

	/* vm_pgoff: 1 -- TPTR */
	case 1:
		if (!hr_dev->tptr_dma_addr || !hr_dev->tptr_size)
			return -EINVAL;
		/*
		 * FIXME: using io_remap_pfn_range on the dma address returned
		 * by dma_alloc_coherent is totally wrong.
		 */
		return rdma_user_mmap_io(context, vma,
					 hr_dev->tptr_dma_addr >> PAGE_SHIFT,
					 hr_dev->tptr_size,
370 371
					 vma->vm_page_prot,
					 NULL);
372

373
	default:
374
		return -EINVAL;
375
	}
376 377 378 379 380 381 382 383
}

static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num,
				   struct ib_port_immutable *immutable)
{
	struct ib_port_attr attr;
	int ret;

384
	ret = ib_query_port(ib_dev, port_num, &attr);
385 386 387 388 389 390 391
	if (ret)
		return ret;

	immutable->pkey_tbl_len = attr.pkey_tbl_len;
	immutable->gid_tbl_len = attr.gid_tbl_len;

	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
392 393 394
	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
	if (to_hr_dev(ib_dev)->caps.flags & HNS_ROCE_CAP_FLAG_ROCE_V1_V2)
		immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
395 396 397 398

	return 0;
}

399 400 401 402
static void hns_roce_disassociate_ucontext(struct ib_ucontext *ibcontext)
{
}

403 404 405 406
static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev)
{
	struct hns_roce_ib_iboe *iboe = &hr_dev->iboe;

407
	hr_dev->active = false;
408 409 410 411
	unregister_netdevice_notifier(&iboe->nb);
	ib_unregister_device(&hr_dev->ib_dev);
}

412
static const struct ib_device_ops hns_roce_dev_ops = {
413
	.owner = THIS_MODULE,
414
	.driver_id = RDMA_DRIVER_HNS,
415
	.uverbs_abi_ver = 1,
416
	.uverbs_no_driver_id_binding = 1,
417

418 419 420 421
	.add_gid = hns_roce_add_gid,
	.alloc_pd = hns_roce_alloc_pd,
	.alloc_ucontext = hns_roce_alloc_ucontext,
	.create_ah = hns_roce_create_ah,
422
	.create_cq = hns_roce_create_cq,
423 424 425 426 427 428
	.create_qp = hns_roce_create_qp,
	.dealloc_pd = hns_roce_dealloc_pd,
	.dealloc_ucontext = hns_roce_dealloc_ucontext,
	.del_gid = hns_roce_del_gid,
	.dereg_mr = hns_roce_dereg_mr,
	.destroy_ah = hns_roce_destroy_ah,
429
	.destroy_cq = hns_roce_destroy_cq,
430
	.disassociate_ucontext = hns_roce_disassociate_ucontext,
431
	.fill_res_cq_entry = hns_roce_fill_res_cq_entry,
432 433 434 435 436 437 438 439 440 441 442
	.get_dma_mr = hns_roce_get_dma_mr,
	.get_link_layer = hns_roce_get_link_layer,
	.get_port_immutable = hns_roce_port_immutable,
	.mmap = hns_roce_mmap,
	.modify_device = hns_roce_modify_device,
	.modify_qp = hns_roce_modify_qp,
	.query_ah = hns_roce_query_ah,
	.query_device = hns_roce_query_device,
	.query_pkey = hns_roce_query_pkey,
	.query_port = hns_roce_query_port,
	.reg_user_mr = hns_roce_reg_user_mr,
443 444

	INIT_RDMA_OBJ_SIZE(ib_ah, hns_roce_ah, ibah),
445
	INIT_RDMA_OBJ_SIZE(ib_cq, hns_roce_cq, ib_cq),
446
	INIT_RDMA_OBJ_SIZE(ib_pd, hns_roce_pd, ibpd),
447
	INIT_RDMA_OBJ_SIZE(ib_ucontext, hns_roce_ucontext, ibucontext),
448 449 450 451 452 453 454 455 456
};

static const struct ib_device_ops hns_roce_dev_mr_ops = {
	.rereg_user_mr = hns_roce_rereg_user_mr,
};

static const struct ib_device_ops hns_roce_dev_mw_ops = {
	.alloc_mw = hns_roce_alloc_mw,
	.dealloc_mw = hns_roce_dealloc_mw,
457 458

	INIT_RDMA_OBJ_SIZE(ib_mw, hns_roce_mw, ibmw),
459 460 461 462 463 464 465 466 467 468
};

static const struct ib_device_ops hns_roce_dev_frmr_ops = {
	.alloc_mr = hns_roce_alloc_mr,
	.map_mr_sg = hns_roce_map_mr_sg,
};

static const struct ib_device_ops hns_roce_dev_srq_ops = {
	.create_srq = hns_roce_create_srq,
	.destroy_srq = hns_roce_destroy_srq,
469 470

	INIT_RDMA_OBJ_SIZE(ib_srq, hns_roce_srq, ibsrq),
471 472
};

473 474 475 476 477
static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
{
	int ret;
	struct hns_roce_ib_iboe *iboe = NULL;
	struct ib_device *ib_dev = NULL;
478
	struct device *dev = hr_dev->dev;
479
	unsigned int i;
480 481

	iboe = &hr_dev->iboe;
L
Lijun Ou 已提交
482
	spin_lock_init(&iboe->lock);
483 484 485

	ib_dev = &hr_dev->ib_dev;

L
Lijun Ou 已提交
486 487
	ib_dev->node_type = RDMA_NODE_IB_CA;
	ib_dev->dev.parent = dev;
488

L
Lijun Ou 已提交
489 490 491 492
	ib_dev->phys_port_cnt = hr_dev->caps.num_ports;
	ib_dev->local_dma_lkey = hr_dev->caps.reserved_lkey;
	ib_dev->num_comp_vectors = hr_dev->caps.num_comp_vectors;
	ib_dev->uverbs_cmd_mask =
493 494 495 496 497 498 499 500 501 502 503 504 505 506 507
		(1ULL << IB_USER_VERBS_CMD_GET_CONTEXT) |
		(1ULL << IB_USER_VERBS_CMD_QUERY_DEVICE) |
		(1ULL << IB_USER_VERBS_CMD_QUERY_PORT) |
		(1ULL << IB_USER_VERBS_CMD_ALLOC_PD) |
		(1ULL << IB_USER_VERBS_CMD_DEALLOC_PD) |
		(1ULL << IB_USER_VERBS_CMD_REG_MR) |
		(1ULL << IB_USER_VERBS_CMD_DEREG_MR) |
		(1ULL << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
		(1ULL << IB_USER_VERBS_CMD_CREATE_CQ) |
		(1ULL << IB_USER_VERBS_CMD_DESTROY_CQ) |
		(1ULL << IB_USER_VERBS_CMD_CREATE_QP) |
		(1ULL << IB_USER_VERBS_CMD_MODIFY_QP) |
		(1ULL << IB_USER_VERBS_CMD_QUERY_QP) |
		(1ULL << IB_USER_VERBS_CMD_DESTROY_QP);

L
Lijun Ou 已提交
508
	ib_dev->uverbs_ex_cmd_mask |= (1ULL << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
509

510 511
	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_REREG_MR) {
		ib_dev->uverbs_cmd_mask |= (1ULL << IB_USER_VERBS_CMD_REREG_MR);
512
		ib_set_device_ops(ib_dev, &hns_roce_dev_mr_ops);
513
	}
514

Y
Yixian Liu 已提交
515 516 517 518 519
	/* MW */
	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_MW) {
		ib_dev->uverbs_cmd_mask |=
					(1ULL << IB_USER_VERBS_CMD_ALLOC_MW) |
					(1ULL << IB_USER_VERBS_CMD_DEALLOC_MW);
520
		ib_set_device_ops(ib_dev, &hns_roce_dev_mw_ops);
Y
Yixian Liu 已提交
521 522
	}

Y
Yixian Liu 已提交
523
	/* FRMR */
524 525
	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR)
		ib_set_device_ops(ib_dev, &hns_roce_dev_frmr_ops);
Y
Yixian Liu 已提交
526

527 528 529 530 531 532 533 534
	/* SRQ */
	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
		ib_dev->uverbs_cmd_mask |=
				(1ULL << IB_USER_VERBS_CMD_CREATE_SRQ) |
				(1ULL << IB_USER_VERBS_CMD_MODIFY_SRQ) |
				(1ULL << IB_USER_VERBS_CMD_QUERY_SRQ) |
				(1ULL << IB_USER_VERBS_CMD_DESTROY_SRQ) |
				(1ULL << IB_USER_VERBS_CMD_POST_SRQ_RECV);
535 536
		ib_set_device_ops(ib_dev, &hns_roce_dev_srq_ops);
		ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_srq_ops);
537 538
	}

539 540
	ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_ops);
	ib_set_device_ops(ib_dev, &hns_roce_dev_ops);
541 542 543 544 545 546 547 548 549
	for (i = 0; i < hr_dev->caps.num_ports; i++) {
		if (!hr_dev->iboe.netdevs[i])
			continue;

		ret = ib_device_set_netdev(ib_dev, hr_dev->iboe.netdevs[i],
					   i + 1);
		if (ret)
			return ret;
	}
550
	ret = ib_register_device(ib_dev, "hns_%d");
551 552 553 554 555
	if (ret) {
		dev_err(dev, "ib_register_device failed!\n");
		return ret;
	}

556
	ret = hns_roce_setup_mtu_mac(hr_dev);
557
	if (ret) {
558 559
		dev_err(dev, "setup_mtu_mac failed!\n");
		goto error_failed_setup_mtu_mac;
560 561 562 563 564 565
	}

	iboe->nb.notifier_call = hns_roce_netdev_event;
	ret = register_netdevice_notifier(&iboe->nb);
	if (ret) {
		dev_err(dev, "register_netdevice_notifier failed!\n");
566
		goto error_failed_setup_mtu_mac;
567 568
	}

569
	hr_dev->active = true;
570 571
	return 0;

572
error_failed_setup_mtu_mac:
573 574 575 576 577 578 579 580
	ib_unregister_device(ib_dev);

	return ret;
}

static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
{
	int ret;
581
	struct device *dev = hr_dev->dev;
582 583 584 585 586 587

	ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table,
				      HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz,
				      hr_dev->caps.num_mtpts, 1);
	if (ret) {
		dev_err(dev, "Failed to init MTPT context memory, aborting.\n");
X
Xi Wang 已提交
588
		return ret;
589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608
	}

	ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.qp_table,
				      HEM_TYPE_QPC, hr_dev->caps.qpc_entry_sz,
				      hr_dev->caps.num_qps, 1);
	if (ret) {
		dev_err(dev, "Failed to init QP context memory, aborting.\n");
		goto err_unmap_dmpt;
	}

	ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.irrl_table,
				      HEM_TYPE_IRRL,
				      hr_dev->caps.irrl_entry_sz *
				      hr_dev->caps.max_qp_init_rdma,
				      hr_dev->caps.num_qps, 1);
	if (ret) {
		dev_err(dev, "Failed to init irrl_table memory, aborting.\n");
		goto err_unmap_qp;
	}

609 610 611 612 613 614 615 616 617
	if (hr_dev->caps.trrl_entry_sz) {
		ret = hns_roce_init_hem_table(hr_dev,
					      &hr_dev->qp_table.trrl_table,
					      HEM_TYPE_TRRL,
					      hr_dev->caps.trrl_entry_sz *
					      hr_dev->caps.max_qp_dest_rdma,
					      hr_dev->caps.num_qps, 1);
		if (ret) {
			dev_err(dev,
L
Lijun Ou 已提交
618
				"Failed to init trrl_table memory, aborting.\n");
619 620 621 622
			goto err_unmap_irrl;
		}
	}

623 624 625 626 627
	ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cq_table.table,
				      HEM_TYPE_CQC, hr_dev->caps.cqc_entry_sz,
				      hr_dev->caps.num_cqs, 1);
	if (ret) {
		dev_err(dev, "Failed to init CQ context memory, aborting.\n");
628
		goto err_unmap_trrl;
629 630
	}

L
Lijun Ou 已提交
631 632 633 634 635 636 637
	if (hr_dev->caps.srqc_entry_sz) {
		ret = hns_roce_init_hem_table(hr_dev, &hr_dev->srq_table.table,
					      HEM_TYPE_SRQC,
					      hr_dev->caps.srqc_entry_sz,
					      hr_dev->caps.num_srqs, 1);
		if (ret) {
			dev_err(dev,
L
Lijun Ou 已提交
638
				"Failed to init SRQ context memory, aborting.\n");
L
Lijun Ou 已提交
639 640 641 642
			goto err_unmap_cq;
		}
	}

643 644 645 646 647 648 649 650
	if (hr_dev->caps.sccc_entry_sz) {
		ret = hns_roce_init_hem_table(hr_dev,
					      &hr_dev->qp_table.sccc_table,
					      HEM_TYPE_SCCC,
					      hr_dev->caps.sccc_entry_sz,
					      hr_dev->caps.num_qps, 1);
		if (ret) {
			dev_err(dev,
L
Lijun Ou 已提交
651
				"Failed to init SCC context memory, aborting.\n");
X
Xi Wang 已提交
652
			goto err_unmap_srq;
653 654 655
		}
	}

656
	if (hr_dev->caps.qpc_timer_entry_sz) {
L
Lijun Ou 已提交
657
		ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qpc_timer_table,
658 659 660 661 662
					      HEM_TYPE_QPC_TIMER,
					      hr_dev->caps.qpc_timer_entry_sz,
					      hr_dev->caps.num_qpc_timer, 1);
		if (ret) {
			dev_err(dev,
L
Lijun Ou 已提交
663
				"Failed to init QPC timer memory, aborting.\n");
664 665 666 667 668
			goto err_unmap_ctx;
		}
	}

	if (hr_dev->caps.cqc_timer_entry_sz) {
L
Lijun Ou 已提交
669
		ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cqc_timer_table,
670 671 672 673 674
					      HEM_TYPE_CQC_TIMER,
					      hr_dev->caps.cqc_timer_entry_sz,
					      hr_dev->caps.num_cqc_timer, 1);
		if (ret) {
			dev_err(dev,
L
Lijun Ou 已提交
675
				"Failed to init CQC timer memory, aborting.\n");
676 677 678 679
			goto err_unmap_qpc_timer;
		}
	}

680 681
	return 0;

682 683
err_unmap_qpc_timer:
	if (hr_dev->caps.qpc_timer_entry_sz)
L
Lijun Ou 已提交
684
		hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qpc_timer_table);
685 686 687 688 689

err_unmap_ctx:
	if (hr_dev->caps.sccc_entry_sz)
		hns_roce_cleanup_hem_table(hr_dev,
					   &hr_dev->qp_table.sccc_table);
L
Lijun Ou 已提交
690 691 692 693 694 695 696
err_unmap_srq:
	if (hr_dev->caps.srqc_entry_sz)
		hns_roce_cleanup_hem_table(hr_dev, &hr_dev->srq_table.table);

err_unmap_cq:
	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);

697 698 699 700 701
err_unmap_trrl:
	if (hr_dev->caps.trrl_entry_sz)
		hns_roce_cleanup_hem_table(hr_dev,
					   &hr_dev->qp_table.trrl_table);

702 703 704 705 706 707 708 709 710 711 712 713 714
err_unmap_irrl:
	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);

err_unmap_qp:
	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);

err_unmap_dmpt:
	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);

	return ret;
}

/**
715 716 717 718
 * hns_roce_setup_hca - setup host channel adapter
 * @hr_dev: pointer to hns roce device
 * Return : int
 */
719 720 721
static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
{
	int ret;
722
	struct device *dev = hr_dev->dev;
723 724 725 726

	spin_lock_init(&hr_dev->sm_lock);
	spin_lock_init(&hr_dev->bt_cmd_lock);

727 728 729 730 731
	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
		INIT_LIST_HEAD(&hr_dev->pgdir_list);
		mutex_init(&hr_dev->pgdir_mutex);
	}

732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767
	ret = hns_roce_init_uar_table(hr_dev);
	if (ret) {
		dev_err(dev, "Failed to initialize uar table. aborting\n");
		return ret;
	}

	ret = hns_roce_uar_alloc(hr_dev, &hr_dev->priv_uar);
	if (ret) {
		dev_err(dev, "Failed to allocate priv_uar.\n");
		goto err_uar_table_free;
	}

	ret = hns_roce_init_pd_table(hr_dev);
	if (ret) {
		dev_err(dev, "Failed to init protected domain table.\n");
		goto err_uar_alloc_free;
	}

	ret = hns_roce_init_mr_table(hr_dev);
	if (ret) {
		dev_err(dev, "Failed to init memory region table.\n");
		goto err_pd_table_free;
	}

	ret = hns_roce_init_cq_table(hr_dev);
	if (ret) {
		dev_err(dev, "Failed to init completion queue table.\n");
		goto err_mr_table_free;
	}

	ret = hns_roce_init_qp_table(hr_dev);
	if (ret) {
		dev_err(dev, "Failed to init queue pair table.\n");
		goto err_cq_table_free;
	}

L
Lijun Ou 已提交
768 769 770 771 772 773 774 775 776
	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
		ret = hns_roce_init_srq_table(hr_dev);
		if (ret) {
			dev_err(dev,
				"Failed to init share receive queue table.\n");
			goto err_qp_table_free;
		}
	}

777 778
	return 0;

L
Lijun Ou 已提交
779 780 781 782
err_qp_table_free:
	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
		hns_roce_cleanup_qp_table(hr_dev);

783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799
err_cq_table_free:
	hns_roce_cleanup_cq_table(hr_dev);

err_mr_table_free:
	hns_roce_cleanup_mr_table(hr_dev);

err_pd_table_free:
	hns_roce_cleanup_pd_table(hr_dev);

err_uar_alloc_free:
	hns_roce_uar_free(hr_dev, &hr_dev->priv_uar);

err_uar_table_free:
	hns_roce_cleanup_uar_table(hr_dev);
	return ret;
}

800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843
static void check_and_get_armed_cq(struct list_head *cq_list, struct ib_cq *cq)
{
	struct hns_roce_cq *hr_cq = to_hr_cq(cq);
	unsigned long flags;

	spin_lock_irqsave(&hr_cq->lock, flags);
	if (cq->comp_handler) {
		if (!hr_cq->is_armed) {
			hr_cq->is_armed = 1;
			list_add_tail(&hr_cq->node, cq_list);
		}
	}
	spin_unlock_irqrestore(&hr_cq->lock, flags);
}

void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev)
{
	struct hns_roce_qp *hr_qp;
	struct hns_roce_cq *hr_cq;
	struct list_head cq_list;
	unsigned long flags_qp;
	unsigned long flags;

	INIT_LIST_HEAD(&cq_list);

	spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
	list_for_each_entry(hr_qp, &hr_dev->qp_list, node) {
		spin_lock_irqsave(&hr_qp->sq.lock, flags_qp);
		if (hr_qp->sq.tail != hr_qp->sq.head)
			check_and_get_armed_cq(&cq_list, hr_qp->ibqp.send_cq);
		spin_unlock_irqrestore(&hr_qp->sq.lock, flags_qp);

		spin_lock_irqsave(&hr_qp->rq.lock, flags_qp);
		if ((!hr_qp->ibqp.srq) && (hr_qp->rq.tail != hr_qp->rq.head))
			check_and_get_armed_cq(&cq_list, hr_qp->ibqp.recv_cq);
		spin_unlock_irqrestore(&hr_qp->rq.lock, flags_qp);
	}

	list_for_each_entry(hr_cq, &cq_list, node)
		hns_roce_cq_completion(hr_dev, hr_cq->cqn);

	spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
}

844
int hns_roce_init(struct hns_roce_dev *hr_dev)
845 846
{
	int ret;
847
	struct device *dev = hr_dev->dev;
848

849 850 851 852 853 854
	if (hr_dev->hw->reset) {
		ret = hr_dev->hw->reset(hr_dev, true);
		if (ret) {
			dev_err(dev, "Reset RoCE engine failed!\n");
			return ret;
		}
855
	}
856
	hr_dev->is_reset = false;
857

858 859 860 861 862 863 864 865
	if (hr_dev->hw->cmq_init) {
		ret = hr_dev->hw->cmq_init(hr_dev);
		if (ret) {
			dev_err(dev, "Init RoCE Command Queue failed!\n");
			goto error_failed_cmq_init;
		}
	}

866 867 868 869 870
	ret = hr_dev->hw->hw_profile(hr_dev);
	if (ret) {
		dev_err(dev, "Get RoCE engine profile failed!\n");
		goto error_failed_cmd_init;
	}
871 872 873 874 875 876 877

	ret = hns_roce_cmd_init(hr_dev);
	if (ret) {
		dev_err(dev, "cmd init failed!\n");
		goto error_failed_cmd_init;
	}

878
	/* EQ depends on poll mode, event mode depends on EQ */
Y
Yixian Liu 已提交
879 880 881 882
	ret = hr_dev->hw->init_eq(hr_dev);
	if (ret) {
		dev_err(dev, "eq init failed!\n");
		goto error_failed_eq_table;
883 884 885 886 887
	}

	if (hr_dev->cmd_mod) {
		ret = hns_roce_cmd_use_events(hr_dev);
		if (ret) {
888 889 890
			dev_warn(dev,
				 "Cmd event  mode failed, set back to poll!\n");
			hns_roce_cmd_use_polling(hr_dev);
891 892 893 894 895 896 897 898 899 900 901 902 903 904 905
		}
	}

	ret = hns_roce_init_hem(hr_dev);
	if (ret) {
		dev_err(dev, "init HEM(Hardware Entry Memory) failed!\n");
		goto error_failed_init_hem;
	}

	ret = hns_roce_setup_hca(hr_dev);
	if (ret) {
		dev_err(dev, "setup hca failed!\n");
		goto error_failed_setup_hca;
	}

906 907 908 909 910 911
	if (hr_dev->hw->hw_init) {
		ret = hr_dev->hw->hw_init(hr_dev);
		if (ret) {
			dev_err(dev, "hw_init failed!\n");
			goto error_failed_engine_init;
		}
912 913
	}

914 915 916
	INIT_LIST_HEAD(&hr_dev->qp_list);
	spin_lock_init(&hr_dev->qp_list_lock);

917 918 919 920 921 922 923
	ret = hns_roce_register_device(hr_dev);
	if (ret)
		goto error_failed_register_device;

	return 0;

error_failed_register_device:
924 925
	if (hr_dev->hw->hw_exit)
		hr_dev->hw->hw_exit(hr_dev);
926 927 928 929 930 931 932 933 934 935

error_failed_engine_init:
	hns_roce_cleanup_bitmap(hr_dev);

error_failed_setup_hca:
	hns_roce_cleanup_hem(hr_dev);

error_failed_init_hem:
	if (hr_dev->cmd_mod)
		hns_roce_cmd_use_polling(hr_dev);
Y
Yixian Liu 已提交
936
	hr_dev->hw->cleanup_eq(hr_dev);
937 938 939 940 941

error_failed_eq_table:
	hns_roce_cmd_cleanup(hr_dev);

error_failed_cmd_init:
942 943 944 945
	if (hr_dev->hw->cmq_exit)
		hr_dev->hw->cmq_exit(hr_dev);

error_failed_cmq_init:
946
	if (hr_dev->hw->reset) {
947
		if (hr_dev->hw->reset(hr_dev, false))
948 949
			dev_err(dev, "Dereset RoCE engine failed!\n");
	}
950 951 952 953

	return ret;
}

954
void hns_roce_exit(struct hns_roce_dev *hr_dev)
955 956
{
	hns_roce_unregister_device(hr_dev);
957

958 959
	if (hr_dev->hw->hw_exit)
		hr_dev->hw->hw_exit(hr_dev);
960 961 962 963 964 965
	hns_roce_cleanup_bitmap(hr_dev);
	hns_roce_cleanup_hem(hr_dev);

	if (hr_dev->cmd_mod)
		hns_roce_cmd_use_polling(hr_dev);

Y
Yixian Liu 已提交
966
	hr_dev->hw->cleanup_eq(hr_dev);
967
	hns_roce_cmd_cleanup(hr_dev);
968 969
	if (hr_dev->hw->cmq_exit)
		hr_dev->hw->cmq_exit(hr_dev);
970 971
	if (hr_dev->hw->reset)
		hr_dev->hw->reset(hr_dev, false);
972 973 974 975 976 977 978
}

MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
MODULE_DESCRIPTION("HNS RoCE Driver");