nldev.c 36.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. Neither the names of the copyright holders nor the names of its
 *    contributors may be used to endorse or promote products derived from
 *    this software without specific prior written permission.
 *
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

33
#include <linux/module.h>
34 35
#include <linux/pid.h>
#include <linux/pid_namespace.h>
36
#include <linux/mutex.h>
37
#include <net/netlink.h>
38
#include <rdma/rdma_cm.h>
39 40 41
#include <rdma/rdma_netlink.h>

#include "core_priv.h"
42
#include "cma_priv.h"
43
#include "restrack.h"
44

45 46 47 48 49
static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
	[RDMA_NLDEV_ATTR_DEV_INDEX]     = { .type = NLA_U32 },
	[RDMA_NLDEV_ATTR_DEV_NAME]	= { .type = NLA_NUL_STRING,
					    .len = IB_DEVICE_NAME_MAX - 1},
	[RDMA_NLDEV_ATTR_PORT_INDEX]	= { .type = NLA_U32 },
50 51
	[RDMA_NLDEV_ATTR_FW_VERSION]	= { .type = NLA_NUL_STRING,
					    .len = IB_FW_VERSION_NAME_MAX - 1},
52 53
	[RDMA_NLDEV_ATTR_NODE_GUID]	= { .type = NLA_U64 },
	[RDMA_NLDEV_ATTR_SYS_IMAGE_GUID] = { .type = NLA_U64 },
54
	[RDMA_NLDEV_ATTR_SUBNET_PREFIX]	= { .type = NLA_U64 },
55 56
	[RDMA_NLDEV_ATTR_LID]		= { .type = NLA_U32 },
	[RDMA_NLDEV_ATTR_SM_LID]	= { .type = NLA_U32 },
57
	[RDMA_NLDEV_ATTR_LMC]		= { .type = NLA_U8 },
58 59
	[RDMA_NLDEV_ATTR_PORT_STATE]	= { .type = NLA_U8 },
	[RDMA_NLDEV_ATTR_PORT_PHYS_STATE] = { .type = NLA_U8 },
L
Leon Romanovsky 已提交
60
	[RDMA_NLDEV_ATTR_DEV_NODE_TYPE] = { .type = NLA_U8 },
61 62 63 64 65
	[RDMA_NLDEV_ATTR_RES_SUMMARY]	= { .type = NLA_NESTED },
	[RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY]	= { .type = NLA_NESTED },
	[RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME] = { .type = NLA_NUL_STRING,
					     .len = 16 },
	[RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR] = { .type = NLA_U64 },
66 67 68 69 70 71 72 73 74 75 76 77
	[RDMA_NLDEV_ATTR_RES_QP]		= { .type = NLA_NESTED },
	[RDMA_NLDEV_ATTR_RES_QP_ENTRY]		= { .type = NLA_NESTED },
	[RDMA_NLDEV_ATTR_RES_LQPN]		= { .type = NLA_U32 },
	[RDMA_NLDEV_ATTR_RES_RQPN]		= { .type = NLA_U32 },
	[RDMA_NLDEV_ATTR_RES_RQ_PSN]		= { .type = NLA_U32 },
	[RDMA_NLDEV_ATTR_RES_SQ_PSN]		= { .type = NLA_U32 },
	[RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE] = { .type = NLA_U8 },
	[RDMA_NLDEV_ATTR_RES_TYPE]		= { .type = NLA_U8 },
	[RDMA_NLDEV_ATTR_RES_STATE]		= { .type = NLA_U8 },
	[RDMA_NLDEV_ATTR_RES_PID]		= { .type = NLA_U32 },
	[RDMA_NLDEV_ATTR_RES_KERN_NAME]		= { .type = NLA_NUL_STRING,
						    .len = TASK_COMM_LEN },
78 79 80 81 82 83 84
	[RDMA_NLDEV_ATTR_RES_CM_ID]		= { .type = NLA_NESTED },
	[RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY]	= { .type = NLA_NESTED },
	[RDMA_NLDEV_ATTR_RES_PS]		= { .type = NLA_U32 },
	[RDMA_NLDEV_ATTR_RES_SRC_ADDR]	= {
			.len = sizeof(struct __kernel_sockaddr_storage) },
	[RDMA_NLDEV_ATTR_RES_DST_ADDR]	= {
			.len = sizeof(struct __kernel_sockaddr_storage) },
85 86 87 88 89
	[RDMA_NLDEV_ATTR_RES_CQ]		= { .type = NLA_NESTED },
	[RDMA_NLDEV_ATTR_RES_CQ_ENTRY]		= { .type = NLA_NESTED },
	[RDMA_NLDEV_ATTR_RES_CQE]		= { .type = NLA_U32 },
	[RDMA_NLDEV_ATTR_RES_USECNT]		= { .type = NLA_U64 },
	[RDMA_NLDEV_ATTR_RES_POLL_CTX]		= { .type = NLA_U8 },
90 91 92 93 94 95
	[RDMA_NLDEV_ATTR_RES_MR]		= { .type = NLA_NESTED },
	[RDMA_NLDEV_ATTR_RES_MR_ENTRY]		= { .type = NLA_NESTED },
	[RDMA_NLDEV_ATTR_RES_RKEY]		= { .type = NLA_U32 },
	[RDMA_NLDEV_ATTR_RES_LKEY]		= { .type = NLA_U32 },
	[RDMA_NLDEV_ATTR_RES_IOVA]		= { .type = NLA_U64 },
	[RDMA_NLDEV_ATTR_RES_MRLEN]		= { .type = NLA_U64 },
96 97 98 99
	[RDMA_NLDEV_ATTR_RES_PD]		= { .type = NLA_NESTED },
	[RDMA_NLDEV_ATTR_RES_PD_ENTRY]		= { .type = NLA_NESTED },
	[RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY]	= { .type = NLA_U32 },
	[RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY] = { .type = NLA_U32 },
100 101 102
	[RDMA_NLDEV_ATTR_NDEV_INDEX]		= { .type = NLA_U32 },
	[RDMA_NLDEV_ATTR_NDEV_NAME]		= { .type = NLA_NUL_STRING,
						    .len = IFNAMSIZ },
103 104 105 106 107 108 109 110 111
	[RDMA_NLDEV_ATTR_DRIVER]		= { .type = NLA_NESTED },
	[RDMA_NLDEV_ATTR_DRIVER_ENTRY]		= { .type = NLA_NESTED },
	[RDMA_NLDEV_ATTR_DRIVER_STRING]		= { .type = NLA_NUL_STRING,
				    .len = RDMA_NLDEV_ATTR_ENTRY_STRLEN },
	[RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE]	= { .type = NLA_U8 },
	[RDMA_NLDEV_ATTR_DRIVER_S32]		= { .type = NLA_S32 },
	[RDMA_NLDEV_ATTR_DRIVER_U32]		= { .type = NLA_U32 },
	[RDMA_NLDEV_ATTR_DRIVER_S64]		= { .type = NLA_S64 },
	[RDMA_NLDEV_ATTR_DRIVER_U64]		= { .type = NLA_U64 },
112 113 114 115
	[RDMA_NLDEV_ATTR_RES_PDN]		= { .type = NLA_U32 },
	[RDMA_NLDEV_ATTR_RES_CQN]               = { .type = NLA_U32 },
	[RDMA_NLDEV_ATTR_RES_MRN]               = { .type = NLA_U32 },
	[RDMA_NLDEV_ATTR_RES_CM_IDN]            = { .type = NLA_U32 },
116
	[RDMA_NLDEV_ATTR_RES_CTXN]              = { .type = NLA_U32 },
117 118
	[RDMA_NLDEV_ATTR_LINK_TYPE]		= { .type = NLA_NUL_STRING,
				    .len = RDMA_NLDEV_ATTR_ENTRY_STRLEN },
119 120
};

121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
				      enum rdma_nldev_print_type print_type)
{
	if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, name))
		return -EMSGSIZE;
	if (print_type != RDMA_NLDEV_PRINT_TYPE_UNSPEC &&
	    nla_put_u8(msg, RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, print_type))
		return -EMSGSIZE;

	return 0;
}

static int _rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name,
				   enum rdma_nldev_print_type print_type,
				   u32 value)
{
	if (put_driver_name_print_type(msg, name, print_type))
		return -EMSGSIZE;
	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DRIVER_U32, value))
		return -EMSGSIZE;

	return 0;
}

static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name,
				   enum rdma_nldev_print_type print_type,
				   u64 value)
{
	if (put_driver_name_print_type(msg, name, print_type))
		return -EMSGSIZE;
	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_DRIVER_U64, value,
			      RDMA_NLDEV_ATTR_PAD))
		return -EMSGSIZE;

	return 0;
}

int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value)
{
	return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
				       value);
}
EXPORT_SYMBOL(rdma_nl_put_driver_u32);

int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name,
			       u32 value)
{
	return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
				       value);
}
EXPORT_SYMBOL(rdma_nl_put_driver_u32_hex);

int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value)
{
	return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
				       value);
}
EXPORT_SYMBOL(rdma_nl_put_driver_u64);

int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value)
{
	return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
				       value);
}
EXPORT_SYMBOL(rdma_nl_put_driver_u64_hex);

187
static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device)
188 189 190
{
	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
		return -EMSGSIZE;
191 192
	if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME,
			   dev_name(&device->dev)))
193
		return -EMSGSIZE;
194 195 196 197 198 199 200 201 202 203 204

	return 0;
}

static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
{
	char fw[IB_FW_VERSION_NAME_MAX];

	if (fill_nldev_handle(msg, device))
		return -EMSGSIZE;

205 206
	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device)))
		return -EMSGSIZE;
207 208 209

	BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64));
	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
210 211
			      device->attrs.device_cap_flags,
			      RDMA_NLDEV_ATTR_PAD))
212 213
		return -EMSGSIZE;

214
	ib_get_device_fw_str(device, fw);
215
	/* Device without FW has strlen(fw) = 0 */
216 217 218
	if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw))
		return -EMSGSIZE;

219
	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID,
220 221
			      be64_to_cpu(device->node_guid),
			      RDMA_NLDEV_ATTR_PAD))
222 223
		return -EMSGSIZE;
	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID,
224 225
			      be64_to_cpu(device->attrs.sys_image_guid),
			      RDMA_NLDEV_ATTR_PAD))
226
		return -EMSGSIZE;
L
Leon Romanovsky 已提交
227 228
	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type))
		return -EMSGSIZE;
229 230 231
	return 0;
}

232
static int fill_port_info(struct sk_buff *msg,
233 234
			  struct ib_device *device, u32 port,
			  const struct net *net)
235
{
236
	struct net_device *netdev = NULL;
237 238
	struct ib_port_attr attr;
	int ret;
239
	u64 cap_flags = 0;
240

241
	if (fill_nldev_handle(msg, device))
242
		return -EMSGSIZE;
243

244 245
	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port))
		return -EMSGSIZE;
246 247 248 249 250

	ret = ib_query_port(device, port, &attr);
	if (ret)
		return ret;

251
	if (rdma_protocol_ib(device, port)) {
252 253 254 255
		BUILD_BUG_ON((sizeof(attr.port_cap_flags) +
				sizeof(attr.port_cap_flags2)) > sizeof(u64));
		cap_flags = attr.port_cap_flags |
			((u64)attr.port_cap_flags2 << 32);
256
		if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
257
				      cap_flags, RDMA_NLDEV_ATTR_PAD))
258 259 260 261
			return -EMSGSIZE;
		if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX,
				      attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD))
			return -EMSGSIZE;
262 263 264 265
		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid))
			return -EMSGSIZE;
		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid))
			return -EMSGSIZE;
266 267
		if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc))
			return -EMSGSIZE;
268
	}
269 270 271 272
	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state))
		return -EMSGSIZE;
	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state))
		return -EMSGSIZE;
273

274
	netdev = ib_device_get_netdev(device, port);
275 276 277 278 279 280 281 282 283 284 285 286 287
	if (netdev && net_eq(dev_net(netdev), net)) {
		ret = nla_put_u32(msg,
				  RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex);
		if (ret)
			goto out;
		ret = nla_put_string(msg,
				     RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name);
	}

out:
	if (netdev)
		dev_put(netdev);
	return ret;
288 289
}

290 291 292 293 294 295 296 297 298 299 300
static int fill_res_info_entry(struct sk_buff *msg,
			       const char *name, u64 curr)
{
	struct nlattr *entry_attr;

	entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY);
	if (!entry_attr)
		return -EMSGSIZE;

	if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name))
		goto err;
301 302
	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr,
			      RDMA_NLDEV_ATTR_PAD))
303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
		goto err;

	nla_nest_end(msg, entry_attr);
	return 0;

err:
	nla_nest_cancel(msg, entry_attr);
	return -EMSGSIZE;
}

static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
{
	static const char * const names[RDMA_RESTRACK_MAX] = {
		[RDMA_RESTRACK_PD] = "pd",
		[RDMA_RESTRACK_CQ] = "cq",
		[RDMA_RESTRACK_QP] = "qp",
319
		[RDMA_RESTRACK_CM_ID] = "cm_id",
320
		[RDMA_RESTRACK_MR] = "mr",
321
		[RDMA_RESTRACK_CTX] = "ctx",
322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
	};

	struct nlattr *table_attr;
	int ret, i, curr;

	if (fill_nldev_handle(msg, device))
		return -EMSGSIZE;

	table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY);
	if (!table_attr)
		return -EMSGSIZE;

	for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
		if (!names[i])
			continue;
337 338
		curr = rdma_restrack_count(device, i,
					   task_active_pid_ns(current));
339 340 341 342 343 344 345 346 347 348 349 350 351
		ret = fill_res_info_entry(msg, names[i], curr);
		if (ret)
			goto err;
	}

	nla_nest_end(msg, table_attr);
	return 0;

err:
	nla_nest_cancel(msg, table_attr);
	return ret;
}

352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
static int fill_res_name_pid(struct sk_buff *msg,
			     struct rdma_restrack_entry *res)
{
	/*
	 * For user resources, user is should read /proc/PID/comm to get the
	 * name of the task file.
	 */
	if (rdma_is_kernel_res(res)) {
		if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME,
		    res->kern_name))
			return -EMSGSIZE;
	} else {
		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID,
		    task_pid_vnr(res->task)))
			return -EMSGSIZE;
	}
	return 0;
}

371 372 373 374 375 376 377 378
static bool fill_res_entry(struct ib_device *dev, struct sk_buff *msg,
			   struct rdma_restrack_entry *res)
{
	if (!dev->ops.fill_res_entry)
		return false;
	return dev->ops.fill_res_entry(msg, res);
}

379
static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin,
380
			     struct rdma_restrack_entry *res, uint32_t port)
381
{
382
	struct ib_qp *qp = container_of(res, struct ib_qp, res);
383
	struct ib_device *dev = qp->device;
384 385 386 387 388 389 390 391 392
	struct ib_qp_init_attr qp_init_attr;
	struct ib_qp_attr qp_attr;
	int ret;

	ret = ib_query_qp(qp, &qp_attr, 0, &qp_init_attr);
	if (ret)
		return ret;

	if (port && port != qp_attr.port_num)
393
		return -EAGAIN;
394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424

	/* In create_qp() port is not set yet */
	if (qp_attr.port_num &&
	    nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp_attr.port_num))
		goto err;

	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num))
		goto err;
	if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC) {
		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN,
				qp_attr.dest_qp_num))
			goto err;
		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN,
				qp_attr.rq_psn))
			goto err;
	}

	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn))
		goto err;

	if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC ||
	    qp->qp_type == IB_QPT_XRC_INI || qp->qp_type == IB_QPT_XRC_TGT) {
		if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE,
			       qp_attr.path_mig_state))
			goto err;
	}
	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type))
		goto err;
	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state))
		goto err;

425 426 427 428
	if (!rdma_is_kernel_res(res) &&
	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, qp->pd->res.id))
		goto err;

429 430 431
	if (fill_res_name_pid(msg, res))
		goto err;

432
	if (fill_res_entry(dev, msg, res))
433 434
		goto err;

435 436
	return 0;

437
err:	return -EMSGSIZE;
438 439
}

440
static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin,
441 442 443 444
				struct rdma_restrack_entry *res, uint32_t port)
{
	struct rdma_id_private *id_priv =
				container_of(res, struct rdma_id_private, res);
445
	struct ib_device *dev = id_priv->id.device;
446 447 448 449 450 451 452 453 454 455 456
	struct rdma_cm_id *cm_id = &id_priv->id;

	if (port && port != cm_id->port_num)
		return 0;

	if (cm_id->port_num &&
	    nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num))
		goto err;

	if (id_priv->qp_num) {
		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, id_priv->qp_num))
457
			goto err;
458
		if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, cm_id->qp_type))
459 460 461
			goto err;
	}

462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478
	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PS, cm_id->ps))
		goto err;

	if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, id_priv->state))
		goto err;

	if (cm_id->route.addr.src_addr.ss_family &&
	    nla_put(msg, RDMA_NLDEV_ATTR_RES_SRC_ADDR,
		    sizeof(cm_id->route.addr.src_addr),
		    &cm_id->route.addr.src_addr))
		goto err;
	if (cm_id->route.addr.dst_addr.ss_family &&
	    nla_put(msg, RDMA_NLDEV_ATTR_RES_DST_ADDR,
		    sizeof(cm_id->route.addr.dst_addr),
		    &cm_id->route.addr.dst_addr))
		goto err;

479 480 481
	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CM_IDN, res->id))
		goto err;

482 483 484
	if (fill_res_name_pid(msg, res))
		goto err;

485
	if (fill_res_entry(dev, msg, res))
486 487
		goto err;

488 489
	return 0;

490
err: return -EMSGSIZE;
491 492
}

493
static int fill_res_cq_entry(struct sk_buff *msg, bool has_cap_net_admin,
494 495 496
			     struct rdma_restrack_entry *res, uint32_t port)
{
	struct ib_cq *cq = container_of(res, struct ib_cq, res);
497
	struct ib_device *dev = cq->device;
498 499 500 501

	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe))
		goto err;
	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
502
			      atomic_read(&cq->usecnt), RDMA_NLDEV_ATTR_PAD))
503 504 505 506 507 508 509
		goto err;

	/* Poll context is only valid for kernel CQs */
	if (rdma_is_kernel_res(res) &&
	    nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx))
		goto err;

510 511
	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, res->id))
		goto err;
512 513 514 515
	if (!rdma_is_kernel_res(res) &&
	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
			cq->uobject->context->res.id))
		goto err;
516

517 518 519
	if (fill_res_name_pid(msg, res))
		goto err;

520
	if (fill_res_entry(dev, msg, res))
521 522
		goto err;

523 524
	return 0;

525
err:	return -EMSGSIZE;
526 527
}

528
static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin,
529 530 531
			     struct rdma_restrack_entry *res, uint32_t port)
{
	struct ib_mr *mr = container_of(res, struct ib_mr, res);
532
	struct ib_device *dev = mr->pd->device;
533

534
	if (has_cap_net_admin) {
535 536 537 538 539 540
		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey))
			goto err;
		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey))
			goto err;
	}

541 542
	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length,
			      RDMA_NLDEV_ATTR_PAD))
543 544
		goto err;

545 546 547
	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id))
		goto err;

548 549 550 551
	if (!rdma_is_kernel_res(res) &&
	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, mr->pd->res.id))
		goto err;

552 553 554
	if (fill_res_name_pid(msg, res))
		goto err;

555
	if (fill_res_entry(dev, msg, res))
556 557
		goto err;

558 559
	return 0;

560
err:	return -EMSGSIZE;
561 562
}

563
static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin,
564 565 566
			     struct rdma_restrack_entry *res, uint32_t port)
{
	struct ib_pd *pd = container_of(res, struct ib_pd, res);
567
	struct ib_device *dev = pd->device;
568

569
	if (has_cap_net_admin) {
570 571 572 573 574 575 576 577 578
		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY,
				pd->local_dma_lkey))
			goto err;
		if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
		    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
				pd->unsafe_global_rkey))
			goto err;
	}
	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
579
			      atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
580 581
		goto err;

582 583 584
	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, res->id))
		goto err;

585 586 587 588 589
	if (!rdma_is_kernel_res(res) &&
	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
			pd->uobject->context->res.id))
		goto err;

590 591 592
	if (fill_res_name_pid(msg, res))
		goto err;

593
	if (fill_res_entry(dev, msg, res))
594 595
		goto err;

596 597
	return 0;

598
err:	return -EMSGSIZE;
599 600
}

601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616
static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
			  struct netlink_ext_ack *extack)
{
	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
	struct ib_device *device;
	struct sk_buff *msg;
	u32 index;
	int err;

	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
			  nldev_policy, extack);
	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
		return -EINVAL;

	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);

617
	device = ib_device_get_by_index(sock_net(skb->sk), index);
618 619 620 621
	if (!device)
		return -EINVAL;

	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
622 623 624 625
	if (!msg) {
		err = -ENOMEM;
		goto err;
	}
626 627 628 629 630 631

	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
			0, 0);

	err = fill_dev_info(msg, device);
632 633
	if (err)
		goto err_free;
634 635 636

	nlmsg_end(msg, nlh);

637
	ib_device_put(device);
638
	return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
639 640 641 642

err_free:
	nlmsg_free(msg);
err:
643
	ib_device_put(device);
644
	return err;
645 646
}

647 648 649 650 651 652 653 654 655 656 657 658 659 660
static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
			  struct netlink_ext_ack *extack)
{
	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
	struct ib_device *device;
	u32 index;
	int err;

	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
			  extack);
	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
		return -EINVAL;

	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
661
	device = ib_device_get_by_index(sock_net(skb->sk), index);
662 663 664 665 666 667 668 669 670 671 672
	if (!device)
		return -EINVAL;

	if (tb[RDMA_NLDEV_ATTR_DEV_NAME]) {
		char name[IB_DEVICE_NAME_MAX] = {};

		nla_strlcpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
			    IB_DEVICE_NAME_MAX);
		err = ib_device_rename(device, name);
	}

673
	ib_device_put(device);
674 675 676
	return err;
}

677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708
static int _nldev_get_dumpit(struct ib_device *device,
			     struct sk_buff *skb,
			     struct netlink_callback *cb,
			     unsigned int idx)
{
	int start = cb->args[0];
	struct nlmsghdr *nlh;

	if (idx < start)
		return 0;

	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
			0, NLM_F_MULTI);

	if (fill_dev_info(skb, device)) {
		nlmsg_cancel(skb, nlh);
		goto out;
	}

	nlmsg_end(skb, nlh);

	idx++;

out:	cb->args[0] = idx;
	return skb->len;
}

static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
{
	/*
	 * There is no need to take lock, because
709
	 * we are relying on ib_core's locking.
710 711 712 713
	 */
	return ib_enum_all_devs(_nldev_get_dumpit, skb, cb);
}

714 715 716 717 718 719 720 721 722 723 724 725
static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
			       struct netlink_ext_ack *extack)
{
	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
	struct ib_device *device;
	struct sk_buff *msg;
	u32 index;
	u32 port;
	int err;

	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
			  nldev_policy, extack);
726 727 728
	if (err ||
	    !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
	    !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
729 730 731
		return -EINVAL;

	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
732
	device = ib_device_get_by_index(sock_net(skb->sk), index);
733 734 735 736
	if (!device)
		return -EINVAL;

	port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
737 738 739 740
	if (!rdma_is_port_valid(device, port)) {
		err = -EINVAL;
		goto err;
	}
741 742

	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
743 744 745 746
	if (!msg) {
		err = -ENOMEM;
		goto err;
	}
747 748 749 750 751

	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
			0, 0);

752
	err = fill_port_info(msg, device, port, sock_net(skb->sk));
753 754
	if (err)
		goto err_free;
755 756

	nlmsg_end(msg, nlh);
757
	ib_device_put(device);
758 759

	return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
760 761 762 763

err_free:
	nlmsg_free(msg);
err:
764
	ib_device_put(device);
765
	return err;
766 767
}

768 769 770 771 772 773 774 775 776 777
static int nldev_port_get_dumpit(struct sk_buff *skb,
				 struct netlink_callback *cb)
{
	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
	struct ib_device *device;
	int start = cb->args[0];
	struct nlmsghdr *nlh;
	u32 idx = 0;
	u32 ifindex;
	int err;
778
	unsigned int p;
779 780 781 782 783 784 785

	err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
			  nldev_policy, NULL);
	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
		return -EINVAL;

	ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
786
	device = ib_device_get_by_index(sock_net(skb->sk), ifindex);
787 788 789
	if (!device)
		return -EINVAL;

790
	rdma_for_each_port (device, p) {
791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811
		/*
		 * The dumpit function returns all information from specific
		 * index. This specific index is taken from the netlink
		 * messages request sent by user and it is available
		 * in cb->args[0].
		 *
		 * Usually, the user doesn't fill this field and it causes
		 * to return everything.
		 *
		 */
		if (idx < start) {
			idx++;
			continue;
		}

		nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
				cb->nlh->nlmsg_seq,
				RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
						 RDMA_NLDEV_CMD_PORT_GET),
				0, NLM_F_MULTI);

812
		if (fill_port_info(skb, device, p, sock_net(skb->sk))) {
813 814 815 816 817 818 819
			nlmsg_cancel(skb, nlh);
			goto out;
		}
		idx++;
		nlmsg_end(skb, nlh);
	}

820
out:
821
	ib_device_put(device);
822
	cb->args[0] = idx;
823 824 825
	return skb->len;
}

826 827 828 829 830 831 832 833 834 835 836 837 838 839 840
static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
			      struct netlink_ext_ack *extack)
{
	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
	struct ib_device *device;
	struct sk_buff *msg;
	u32 index;
	int ret;

	ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
			  nldev_policy, extack);
	if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
		return -EINVAL;

	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
841
	device = ib_device_get_by_index(sock_net(skb->sk), index);
842 843 844 845
	if (!device)
		return -EINVAL;

	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
846 847
	if (!msg) {
		ret = -ENOMEM;
848
		goto err;
849
	}
850 851 852 853 854 855 856 857 858 859

	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
			0, 0);

	ret = fill_res_info(msg, device);
	if (ret)
		goto err_free;

	nlmsg_end(msg, nlh);
860
	ib_device_put(device);
861 862 863 864 865
	return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);

err_free:
	nlmsg_free(msg);
err:
866
	ib_device_put(device);
867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904
	return ret;
}

static int _nldev_res_get_dumpit(struct ib_device *device,
				 struct sk_buff *skb,
				 struct netlink_callback *cb,
				 unsigned int idx)
{
	int start = cb->args[0];
	struct nlmsghdr *nlh;

	if (idx < start)
		return 0;

	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
			0, NLM_F_MULTI);

	if (fill_res_info(skb, device)) {
		nlmsg_cancel(skb, nlh);
		goto out;
	}

	nlmsg_end(skb, nlh);

	idx++;

out:
	cb->args[0] = idx;
	return skb->len;
}

static int nldev_res_get_dumpit(struct sk_buff *skb,
				struct netlink_callback *cb)
{
	return ib_enum_all_devs(_nldev_res_get_dumpit, skb, cb);
}

905
struct nldev_fill_res_entry {
906
	int (*fill_res_func)(struct sk_buff *msg, bool has_cap_net_admin,
907 908 909
			     struct rdma_restrack_entry *res, u32 port);
	enum rdma_nldev_attr nldev_attr;
	enum rdma_nldev_command nldev_cmd;
910 911 912 913 914 915 916
	u8 flags;
	u32 entry;
	u32 id;
};

enum nldev_res_flags {
	NLDEV_PER_DEV = 1 << 0,
917 918 919 920 921 922 923
};

static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
	[RDMA_RESTRACK_QP] = {
		.fill_res_func = fill_res_qp_entry,
		.nldev_cmd = RDMA_NLDEV_CMD_RES_QP_GET,
		.nldev_attr = RDMA_NLDEV_ATTR_RES_QP,
924
		.entry = RDMA_NLDEV_ATTR_RES_QP_ENTRY,
925
		.id = RDMA_NLDEV_ATTR_RES_LQPN,
926
	},
927 928 929 930
	[RDMA_RESTRACK_CM_ID] = {
		.fill_res_func = fill_res_cm_id_entry,
		.nldev_cmd = RDMA_NLDEV_CMD_RES_CM_ID_GET,
		.nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID,
931
		.entry = RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY,
932
		.id = RDMA_NLDEV_ATTR_RES_CM_IDN,
933
	},
934 935 936 937
	[RDMA_RESTRACK_CQ] = {
		.fill_res_func = fill_res_cq_entry,
		.nldev_cmd = RDMA_NLDEV_CMD_RES_CQ_GET,
		.nldev_attr = RDMA_NLDEV_ATTR_RES_CQ,
938 939
		.flags = NLDEV_PER_DEV,
		.entry = RDMA_NLDEV_ATTR_RES_CQ_ENTRY,
940
		.id = RDMA_NLDEV_ATTR_RES_CQN,
941
	},
942 943 944 945
	[RDMA_RESTRACK_MR] = {
		.fill_res_func = fill_res_mr_entry,
		.nldev_cmd = RDMA_NLDEV_CMD_RES_MR_GET,
		.nldev_attr = RDMA_NLDEV_ATTR_RES_MR,
946 947
		.flags = NLDEV_PER_DEV,
		.entry = RDMA_NLDEV_ATTR_RES_MR_ENTRY,
948
		.id = RDMA_NLDEV_ATTR_RES_MRN,
949
	},
950 951 952 953
	[RDMA_RESTRACK_PD] = {
		.fill_res_func = fill_res_pd_entry,
		.nldev_cmd = RDMA_NLDEV_CMD_RES_PD_GET,
		.nldev_attr = RDMA_NLDEV_ATTR_RES_PD,
954 955
		.flags = NLDEV_PER_DEV,
		.entry = RDMA_NLDEV_ATTR_RES_PD_ENTRY,
956
		.id = RDMA_NLDEV_ATTR_RES_PDN,
957
	},
958 959
};

960 961 962 963 964 965 966 967 968 969 970
static bool is_visible_in_pid_ns(struct rdma_restrack_entry *res)
{
	/*
	 * 1. Kern resources should be visible in init name space only
	 * 2. Present only resources visible in the current namespace
	 */
	if (rdma_is_kernel_res(res))
		return task_active_pid_ns(current) == &init_pid_ns;
	return task_active_pid_ns(current) == task_active_pid_ns(res->task);
}

971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989
static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
			       struct netlink_ext_ack *extack,
			       enum rdma_restrack_type res_type)
{
	const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
	struct rdma_restrack_entry *res;
	struct ib_device *device;
	u32 index, id, port = 0;
	bool has_cap_net_admin;
	struct sk_buff *msg;
	int ret;

	ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
			  nldev_policy, extack);
	if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !fe->id || !tb[fe->id])
		return -EINVAL;

	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
990
	device = ib_device_get_by_index(sock_net(skb->sk), index);
991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
	if (!device)
		return -EINVAL;

	if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
		port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
		if (!rdma_is_port_valid(device, port)) {
			ret = -EINVAL;
			goto err;
		}
	}

	if ((port && fe->flags & NLDEV_PER_DEV) ||
	    (!port && ~fe->flags & NLDEV_PER_DEV)) {
		ret = -EINVAL;
		goto err;
	}

	id = nla_get_u32(tb[fe->id]);
	res = rdma_restrack_get_byid(device, res_type, id);
	if (IS_ERR(res)) {
		ret = PTR_ERR(res);
		goto err;
	}

	if (!is_visible_in_pid_ns(res)) {
		ret = -ENOENT;
		goto err_get;
	}

	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
	if (!msg) {
		ret = -ENOMEM;
		goto err;
	}

	nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, fe->nldev_cmd),
			0, 0);

	if (fill_nldev_handle(msg, device)) {
		ret = -EMSGSIZE;
		goto err_free;
	}

	has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN);
	ret = fe->fill_res_func(msg, has_cap_net_admin, res, port);
	rdma_restrack_put(res);
	if (ret)
		goto err_free;

	nlmsg_end(msg, nlh);
	ib_device_put(device);
	return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);

err_free:
	nlmsg_free(msg);
err_get:
	rdma_restrack_put(res);
err:
	ib_device_put(device);
	return ret;
}

1054 1055 1056
static int res_get_common_dumpit(struct sk_buff *skb,
				 struct netlink_callback *cb,
				 enum rdma_restrack_type res_type)
1057
{
1058
	const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
1059 1060
	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
	struct rdma_restrack_entry *res;
1061
	struct rdma_restrack_root *rt;
1062 1063
	int err, ret = 0, idx = 0;
	struct nlattr *table_attr;
1064
	struct nlattr *entry_attr;
1065 1066
	struct ib_device *device;
	int start = cb->args[0];
1067
	bool has_cap_net_admin;
1068
	struct nlmsghdr *nlh;
1069
	unsigned long id;
1070
	u32 index, port = 0;
1071
	bool filled = false;
1072 1073 1074 1075

	err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
			  nldev_policy, NULL);
	/*
1076
	 * Right now, we are expecting the device index to get res information,
1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
	 * but it is possible to extend this code to return all devices in
	 * one shot by checking the existence of RDMA_NLDEV_ATTR_DEV_INDEX.
	 * if it doesn't exist, we will iterate over all devices.
	 *
	 * But it is not needed for now.
	 */
	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
		return -EINVAL;

	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1087
	device = ib_device_get_by_index(sock_net(skb->sk), index);
1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102
	if (!device)
		return -EINVAL;

	/*
	 * If no PORT_INDEX is supplied, we will return all QPs from that device
	 */
	if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
		port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
		if (!rdma_is_port_valid(device, port)) {
			ret = -EINVAL;
			goto err_index;
		}
	}

	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1103
			RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, fe->nldev_cmd),
1104 1105 1106 1107 1108 1109 1110
			0, NLM_F_MULTI);

	if (fill_nldev_handle(skb, device)) {
		ret = -EMSGSIZE;
		goto err;
	}

1111
	table_attr = nla_nest_start(skb, fe->nldev_attr);
1112 1113 1114 1115 1116
	if (!table_attr) {
		ret = -EMSGSIZE;
		goto err;
	}

1117 1118
	has_cap_net_admin = netlink_capable(cb->skb, CAP_NET_ADMIN);

1119 1120
	rt = &device->res[res_type];
	xa_lock(&rt->xa);
1121 1122 1123 1124 1125
	/*
	 * FIXME: if the skip ahead is something common this loop should
	 * use xas_for_each & xas_pause to optimize, we can have a lot of
	 * objects.
	 */
1126
	xa_for_each(&rt->xa, id, res) {
1127
		if (!is_visible_in_pid_ns(res))
1128
			continue;
1129

1130
		if (idx < start || !rdma_restrack_get(res))
1131 1132
			goto next;

1133 1134
		xa_unlock(&rt->xa);

1135
		filled = true;
1136

1137 1138 1139 1140
		entry_attr = nla_nest_start(skb, fe->entry);
		if (!entry_attr) {
			ret = -EMSGSIZE;
			rdma_restrack_put(res);
1141
			goto msg_full;
1142 1143
		}

1144
		ret = fe->fill_res_func(skb, has_cap_net_admin, res, port);
1145 1146
		rdma_restrack_put(res);

1147
		if (ret) {
1148
			nla_nest_cancel(skb, entry_attr);
1149 1150 1151 1152
			if (ret == -EMSGSIZE)
				goto msg_full;
			if (ret == -EAGAIN)
				goto again;
1153
			goto res_err;
1154
		}
1155
		nla_nest_end(skb, entry_attr);
1156
again:		xa_lock(&rt->xa);
1157 1158
next:		idx++;
	}
1159
	xa_unlock(&rt->xa);
1160

1161
msg_full:
1162 1163 1164 1165 1166
	nla_nest_end(skb, table_attr);
	nlmsg_end(skb, nlh);
	cb->args[0] = idx;

	/*
1167
	 * No more entries to fill, cancel the message and
1168 1169
	 * return 0 to mark end of dumpit.
	 */
1170
	if (!filled)
1171 1172
		goto err;

1173
	ib_device_put(device);
1174 1175 1176 1177 1178 1179 1180 1181 1182
	return skb->len;

res_err:
	nla_nest_cancel(skb, table_attr);

err:
	nlmsg_cancel(skb, nlh);

err_index:
1183
	ib_device_put(device);
1184 1185 1186
	return ret;
}

1187 1188 1189 1190 1191
#define RES_GET_FUNCS(name, type)                                              \
	static int nldev_res_get_##name##_dumpit(struct sk_buff *skb,          \
						 struct netlink_callback *cb)  \
	{                                                                      \
		return res_get_common_dumpit(skb, cb, type);                   \
1192 1193 1194 1195 1196 1197
	}                                                                      \
	static int nldev_res_get_##name##_doit(struct sk_buff *skb,            \
					       struct nlmsghdr *nlh,           \
					       struct netlink_ext_ack *extack) \
	{                                                                      \
		return res_get_common_doit(skb, nlh, extack, type);            \
1198
	}
1199

1200 1201 1202 1203 1204
RES_GET_FUNCS(qp, RDMA_RESTRACK_QP);
RES_GET_FUNCS(cm_id, RDMA_RESTRACK_CM_ID);
RES_GET_FUNCS(cq, RDMA_RESTRACK_CQ);
RES_GET_FUNCS(pd, RDMA_RESTRACK_PD);
RES_GET_FUNCS(mr, RDMA_RESTRACK_MR);
1205

1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224
static LIST_HEAD(link_ops);
static DECLARE_RWSEM(link_ops_rwsem);

static const struct rdma_link_ops *link_ops_get(const char *type)
{
	const struct rdma_link_ops *ops;

	list_for_each_entry(ops, &link_ops, list) {
		if (!strcmp(ops->type, type))
			goto out;
	}
	ops = NULL;
out:
	return ops;
}

void rdma_link_register(struct rdma_link_ops *ops)
{
	down_write(&link_ops_rwsem);
D
Dan Carpenter 已提交
1225
	if (WARN_ON_ONCE(link_ops_get(ops->type)))
1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301
		goto out;
	list_add(&ops->list, &link_ops);
out:
	up_write(&link_ops_rwsem);
}
EXPORT_SYMBOL(rdma_link_register);

void rdma_link_unregister(struct rdma_link_ops *ops)
{
	down_write(&link_ops_rwsem);
	list_del(&ops->list);
	up_write(&link_ops_rwsem);
}
EXPORT_SYMBOL(rdma_link_unregister);

static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
			  struct netlink_ext_ack *extack)
{
	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
	char ibdev_name[IB_DEVICE_NAME_MAX];
	const struct rdma_link_ops *ops;
	char ndev_name[IFNAMSIZ];
	struct net_device *ndev;
	char type[IFNAMSIZ];
	int err;

	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
			  nldev_policy, extack);
	if (err || !tb[RDMA_NLDEV_ATTR_DEV_NAME] ||
	    !tb[RDMA_NLDEV_ATTR_LINK_TYPE] || !tb[RDMA_NLDEV_ATTR_NDEV_NAME])
		return -EINVAL;

	nla_strlcpy(ibdev_name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
		    sizeof(ibdev_name));
	if (strchr(ibdev_name, '%'))
		return -EINVAL;

	nla_strlcpy(type, tb[RDMA_NLDEV_ATTR_LINK_TYPE], sizeof(type));
	nla_strlcpy(ndev_name, tb[RDMA_NLDEV_ATTR_NDEV_NAME],
		    sizeof(ndev_name));

	ndev = dev_get_by_name(&init_net, ndev_name);
	if (!ndev)
		return -ENODEV;

	down_read(&link_ops_rwsem);
	ops = link_ops_get(type);
#ifdef CONFIG_MODULES
	if (!ops) {
		up_read(&link_ops_rwsem);
		request_module("rdma-link-%s", type);
		down_read(&link_ops_rwsem);
		ops = link_ops_get(type);
	}
#endif
	err = ops ? ops->newlink(ibdev_name, ndev) : -EINVAL;
	up_read(&link_ops_rwsem);
	dev_put(ndev);

	return err;
}

static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
			  struct netlink_ext_ack *extack)
{
	struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
	struct ib_device *device;
	u32 index;
	int err;

	err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
			  nldev_policy, extack);
	if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
		return -EINVAL;

	index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1302
	device = ib_device_get_by_index(sock_net(skb->sk), index);
1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314
	if (!device)
		return -EINVAL;

	if (!(device->attrs.device_cap_flags & IB_DEVICE_ALLOW_USER_UNREG)) {
		ib_device_put(device);
		return -EINVAL;
	}

	ib_unregister_device_and_put(device);
	return 0;
}

1315
static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
1316
	[RDMA_NLDEV_CMD_GET] = {
1317
		.doit = nldev_get_doit,
1318 1319
		.dump = nldev_get_dumpit,
	},
1320 1321 1322 1323
	[RDMA_NLDEV_CMD_SET] = {
		.doit = nldev_set_doit,
		.flags = RDMA_NL_ADMIN_PERM,
	},
1324 1325 1326 1327 1328 1329 1330 1331
	[RDMA_NLDEV_CMD_NEWLINK] = {
		.doit = nldev_newlink,
		.flags = RDMA_NL_ADMIN_PERM,
	},
	[RDMA_NLDEV_CMD_DELLINK] = {
		.doit = nldev_dellink,
		.flags = RDMA_NL_ADMIN_PERM,
	},
1332
	[RDMA_NLDEV_CMD_PORT_GET] = {
1333
		.doit = nldev_port_get_doit,
1334 1335
		.dump = nldev_port_get_dumpit,
	},
1336 1337 1338 1339
	[RDMA_NLDEV_CMD_RES_GET] = {
		.doit = nldev_res_get_doit,
		.dump = nldev_res_get_dumpit,
	},
1340
	[RDMA_NLDEV_CMD_RES_QP_GET] = {
1341
		.doit = nldev_res_get_qp_doit,
1342 1343
		.dump = nldev_res_get_qp_dumpit,
	},
1344
	[RDMA_NLDEV_CMD_RES_CM_ID_GET] = {
1345
		.doit = nldev_res_get_cm_id_doit,
1346 1347
		.dump = nldev_res_get_cm_id_dumpit,
	},
1348
	[RDMA_NLDEV_CMD_RES_CQ_GET] = {
1349
		.doit = nldev_res_get_cq_doit,
1350 1351
		.dump = nldev_res_get_cq_dumpit,
	},
1352
	[RDMA_NLDEV_CMD_RES_MR_GET] = {
1353
		.doit = nldev_res_get_mr_doit,
1354 1355
		.dump = nldev_res_get_mr_dumpit,
	},
1356
	[RDMA_NLDEV_CMD_RES_PD_GET] = {
1357
		.doit = nldev_res_get_pd_doit,
1358 1359
		.dump = nldev_res_get_pd_dumpit,
	},
1360 1361
};

1362 1363
void __init nldev_init(void)
{
1364
	rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table);
1365 1366 1367 1368 1369 1370
}

void __exit nldev_exit(void)
{
	rdma_nl_unregister(RDMA_NL_NLDEV);
}
1371 1372

MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_NLDEV, 5);