user_mad.c 24.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3
 * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 
4
 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 *
34
 * $Id: user_mad.c 4010 2005-11-09 23:11:56Z roland $
L
Linus Torvalds 已提交
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
 */

#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/poll.h>
#include <linux/rwsem.h>
#include <linux/kref.h>

#include <asm/uaccess.h>
#include <asm/semaphore.h>

52 53
#include <rdma/ib_mad.h>
#include <rdma/ib_user_mad.h>
L
Linus Torvalds 已提交
54 55 56 57 58 59 60 61 62 63 64 65 66

MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("InfiniBand userspace MAD packet access");
MODULE_LICENSE("Dual BSD/GPL");

enum {
	IB_UMAD_MAX_PORTS  = 64,
	IB_UMAD_MAX_AGENTS = 32,

	IB_UMAD_MAJOR      = 231,
	IB_UMAD_MINOR_BASE = 0
};

67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
/*
 * Our lifetime rules for these structs are the following: each time a
 * device special file is opened, we look up the corresponding struct
 * ib_umad_port by minor in the umad_port[] table while holding the
 * port_lock.  If this lookup succeeds, we take a reference on the
 * ib_umad_port's struct ib_umad_device while still holding the
 * port_lock; if the lookup fails, we fail the open().  We drop these
 * references in the corresponding close().
 *
 * In addition to references coming from open character devices, there
 * is one more reference to each ib_umad_device representing the
 * module's reference taken when allocating the ib_umad_device in
 * ib_umad_add_one().
 *
 * When destroying an ib_umad_device, we clear all of its
 * ib_umad_ports from umad_port[] while holding port_lock before
 * dropping the module's reference to the ib_umad_device.  This is
 * always safe because any open() calls will either succeed and obtain
 * a reference before we clear the umad_port[] entries, or fail after
 * we clear the umad_port[] entries.
 */

L
Linus Torvalds 已提交
89
struct ib_umad_port {
90 91
	struct cdev           *dev;
	struct class_device   *class_dev;
L
Linus Torvalds 已提交
92

93 94
	struct cdev           *sm_dev;
	struct class_device   *sm_class_dev;
L
Linus Torvalds 已提交
95 96
	struct semaphore       sm_sem;

97 98 99
	struct rw_semaphore    mutex;
	struct list_head       file_list;

L
Linus Torvalds 已提交
100 101
	struct ib_device      *ib_dev;
	struct ib_umad_device *umad_dev;
102
	int                    dev_num;
L
Linus Torvalds 已提交
103 104 105 106 107 108 109 110 111 112
	u8                     port_num;
};

struct ib_umad_device {
	int                  start_port, end_port;
	struct kref          ref;
	struct ib_umad_port  port[0];
};

struct ib_umad_file {
113 114 115 116 117 118 119
	struct ib_umad_port    *port;
	struct list_head	recv_list;
	struct list_head	port_list;
	spinlock_t		recv_lock;
	wait_queue_head_t	recv_wait;
	struct ib_mad_agent    *agent[IB_UMAD_MAX_AGENTS];
	int			agents_dead;
L
Linus Torvalds 已提交
120 121 122
};

struct ib_umad_packet {
123
	struct ib_mad_send_buf *msg;
L
Linus Torvalds 已提交
124
	struct list_head   list;
125 126
	int		   length;
	struct ib_user_mad mad;
L
Linus Torvalds 已提交
127 128
};

129 130
static struct class *umad_class;

L
Linus Torvalds 已提交
131
static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE);
132 133 134

static DEFINE_SPINLOCK(port_lock);
static struct ib_umad_port *umad_port[IB_UMAD_MAX_PORTS];
L
Linus Torvalds 已提交
135 136 137 138 139
static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS * 2);

static void ib_umad_add_one(struct ib_device *device);
static void ib_umad_remove_one(struct ib_device *device);

140 141 142 143 144 145 146 147
static void ib_umad_release_dev(struct kref *ref)
{
	struct ib_umad_device *dev =
		container_of(ref, struct ib_umad_device, ref);

	kfree(dev);
}

148 149 150 151 152 153
/* caller must hold port->mutex at least for reading */
static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id)
{
	return file->agents_dead ? NULL : file->agent[id];
}

L
Linus Torvalds 已提交
154 155 156 157 158 159
static int queue_packet(struct ib_umad_file *file,
			struct ib_mad_agent *agent,
			struct ib_umad_packet *packet)
{
	int ret = 1;

160
	down_read(&file->port->mutex);
161

162 163 164
	for (packet->mad.hdr.id = 0;
	     packet->mad.hdr.id < IB_UMAD_MAX_AGENTS;
	     packet->mad.hdr.id++)
165
		if (agent == __get_agent(file, packet->mad.hdr.id)) {
L
Linus Torvalds 已提交
166 167 168 169 170 171 172 173
			spin_lock_irq(&file->recv_lock);
			list_add_tail(&packet->list, &file->recv_list);
			spin_unlock_irq(&file->recv_lock);
			wake_up_interruptible(&file->recv_wait);
			ret = 0;
			break;
		}

174
	up_read(&file->port->mutex);
L
Linus Torvalds 已提交
175 176 177 178 179 180 181 182

	return ret;
}

static void send_handler(struct ib_mad_agent *agent,
			 struct ib_mad_send_wc *send_wc)
{
	struct ib_umad_file *file = agent->context;
183 184
	struct ib_umad_packet *timeout;
	struct ib_umad_packet *packet = send_wc->send_buf->context[0];
L
Linus Torvalds 已提交
185

186
	ib_destroy_ah(packet->msg->ah);
187
	ib_free_send_mad(packet->msg);
L
Linus Torvalds 已提交
188 189

	if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) {
S
Sean Hefty 已提交
190
		timeout = kzalloc(sizeof *timeout + IB_MGMT_MAD_HDR, GFP_KERNEL);
191 192
		if (!timeout)
			goto out;
L
Linus Torvalds 已提交
193

S
Sean Hefty 已提交
194 195
		timeout->length 	= IB_MGMT_MAD_HDR;
		timeout->mad.hdr.id 	= packet->mad.hdr.id;
196 197 198 199
		timeout->mad.hdr.status = ETIMEDOUT;
		memcpy(timeout->mad.data, packet->mad.data,
		       sizeof (struct ib_mad_hdr));

J
Jack Morgenstein 已提交
200 201
		if (queue_packet(file, agent, timeout))
			kfree(timeout);
202 203
	}
out:
L
Linus Torvalds 已提交
204 205 206 207 208 209 210 211
	kfree(packet);
}

static void recv_handler(struct ib_mad_agent *agent,
			 struct ib_mad_recv_wc *mad_recv_wc)
{
	struct ib_umad_file *file = agent->context;
	struct ib_umad_packet *packet;
212
	int length;
L
Linus Torvalds 已提交
213 214 215 216

	if (mad_recv_wc->wc->status != IB_WC_SUCCESS)
		goto out;

217
	length = mad_recv_wc->mad_len;
S
Sean Hefty 已提交
218
	packet = kzalloc(sizeof *packet + length, GFP_KERNEL);
L
Linus Torvalds 已提交
219 220 221
	if (!packet)
		goto out;

222 223 224
	packet->length = length;

	ib_coalesce_recv_mad(mad_recv_wc, packet->mad.data);
L
Linus Torvalds 已提交
225

226 227 228 229 230 231 232 233
	packet->mad.hdr.status    = 0;
	packet->mad.hdr.length    = length + sizeof (struct ib_user_mad);
	packet->mad.hdr.qpn 	  = cpu_to_be32(mad_recv_wc->wc->src_qp);
	packet->mad.hdr.lid 	  = cpu_to_be16(mad_recv_wc->wc->slid);
	packet->mad.hdr.sl  	  = mad_recv_wc->wc->sl;
	packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits;
	packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH);
	if (packet->mad.hdr.grh_present) {
L
Linus Torvalds 已提交
234
		/* XXX parse GRH */
235 236 237 238 239
		packet->mad.hdr.gid_index 	= 0;
		packet->mad.hdr.hop_limit 	= 0;
		packet->mad.hdr.traffic_class	= 0;
		memset(packet->mad.hdr.gid, 0, 16);
		packet->mad.hdr.flow_label	= 0;
L
Linus Torvalds 已提交
240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
	}

	if (queue_packet(file, agent, packet))
		kfree(packet);

out:
	ib_free_recv_mad(mad_recv_wc);
}

static ssize_t ib_umad_read(struct file *filp, char __user *buf,
			    size_t count, loff_t *pos)
{
	struct ib_umad_file *file = filp->private_data;
	struct ib_umad_packet *packet;
	ssize_t ret;

256
	if (count < sizeof (struct ib_user_mad) + sizeof (struct ib_mad))
L
Linus Torvalds 已提交
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
		return -EINVAL;

	spin_lock_irq(&file->recv_lock);

	while (list_empty(&file->recv_list)) {
		spin_unlock_irq(&file->recv_lock);

		if (filp->f_flags & O_NONBLOCK)
			return -EAGAIN;

		if (wait_event_interruptible(file->recv_wait,
					     !list_empty(&file->recv_list)))
			return -ERESTARTSYS;

		spin_lock_irq(&file->recv_lock);
	}

	packet = list_entry(file->recv_list.next, struct ib_umad_packet, list);
	list_del(&packet->list);

	spin_unlock_irq(&file->recv_lock);

279 280 281 282 283 284 285 286
	if (count < packet->length + sizeof (struct ib_user_mad)) {
		/* Return length needed (and first RMPP segment) if too small */
		if (copy_to_user(buf, &packet->mad,
				 sizeof (struct ib_user_mad) + sizeof (struct ib_mad)))
			ret = -EFAULT;
		else
			ret = -ENOSPC;
	} else if (copy_to_user(buf, &packet->mad,
S
Sean Hefty 已提交
287
				packet->length + sizeof (struct ib_user_mad)))
L
Linus Torvalds 已提交
288 289
		ret = -EFAULT;
	else
290 291 292 293 294 295 296 297
		ret = packet->length + sizeof (struct ib_user_mad);
	if (ret < 0) {
		/* Requeue packet */
		spin_lock_irq(&file->recv_lock);
		list_add(&packet->list, &file->recv_list);
		spin_unlock_irq(&file->recv_lock);
	} else
		kfree(packet);
L
Linus Torvalds 已提交
298 299 300 301 302 303 304 305 306 307
	return ret;
}

static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
			     size_t count, loff_t *pos)
{
	struct ib_umad_file *file = filp->private_data;
	struct ib_umad_packet *packet;
	struct ib_mad_agent *agent;
	struct ib_ah_attr ah_attr;
308
	struct ib_ah *ah;
309
	struct ib_rmpp_mad *rmpp_mad;
L
Linus Torvalds 已提交
310
	u8 method;
311
	__be64 *tid;
S
Sean Hefty 已提交
312
	int ret, length, hdr_len, copy_offset;
M
Michael S. Tsirkin 已提交
313
	int rmpp_active, has_rmpp_header;
L
Linus Torvalds 已提交
314

315
	if (count < sizeof (struct ib_user_mad) + IB_MGMT_RMPP_HDR)
L
Linus Torvalds 已提交
316 317
		return -EINVAL;

318
	length = count - sizeof (struct ib_user_mad);
S
Sean Hefty 已提交
319
	packet = kmalloc(sizeof *packet + IB_MGMT_RMPP_HDR, GFP_KERNEL);
L
Linus Torvalds 已提交
320 321 322
	if (!packet)
		return -ENOMEM;

323
	if (copy_from_user(&packet->mad, buf,
S
Sean Hefty 已提交
324
			    sizeof (struct ib_user_mad) + IB_MGMT_RMPP_HDR)) {
325 326
		ret = -EFAULT;
		goto err;
L
Linus Torvalds 已提交
327 328
	}

329 330
	if (packet->mad.hdr.id < 0 ||
	    packet->mad.hdr.id >= IB_UMAD_MAX_AGENTS) {
L
Linus Torvalds 已提交
331 332 333 334
		ret = -EINVAL;
		goto err;
	}

335
	down_read(&file->port->mutex);
L
Linus Torvalds 已提交
336

337
	agent = __get_agent(file, packet->mad.hdr.id);
L
Linus Torvalds 已提交
338 339 340 341 342 343
	if (!agent) {
		ret = -EINVAL;
		goto err_up;
	}

	memset(&ah_attr, 0, sizeof ah_attr);
344 345 346
	ah_attr.dlid          = be16_to_cpu(packet->mad.hdr.lid);
	ah_attr.sl            = packet->mad.hdr.sl;
	ah_attr.src_path_bits = packet->mad.hdr.path_bits;
L
Linus Torvalds 已提交
347
	ah_attr.port_num      = file->port->port_num;
348
	if (packet->mad.hdr.grh_present) {
L
Linus Torvalds 已提交
349
		ah_attr.ah_flags = IB_AH_GRH;
350
		memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16);
351
		ah_attr.grh.flow_label 	   = be32_to_cpu(packet->mad.hdr.flow_label);
352 353
		ah_attr.grh.hop_limit  	   = packet->mad.hdr.hop_limit;
		ah_attr.grh.traffic_class  = packet->mad.hdr.traffic_class;
L
Linus Torvalds 已提交
354 355
	}

356 357 358
	ah = ib_create_ah(agent->qp->pd, &ah_attr);
	if (IS_ERR(ah)) {
		ret = PTR_ERR(ah);
L
Linus Torvalds 已提交
359 360 361
		goto err_up;
	}

362
	rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data;
M
Michael S. Tsirkin 已提交
363 364
	if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) {
		hdr_len = IB_MGMT_SA_HDR;
S
Sean Hefty 已提交
365
		copy_offset = IB_MGMT_RMPP_HDR;
M
Michael S. Tsirkin 已提交
366 367 368 369 370 371
		has_rmpp_header = 1;
	} else if (rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START &&
		   rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END) {
			hdr_len = IB_MGMT_VENDOR_HDR;
			copy_offset = IB_MGMT_RMPP_HDR;
			has_rmpp_header = 1;
372
	} else {
373
		hdr_len = IB_MGMT_MAD_HDR;
S
Sean Hefty 已提交
374
		copy_offset = IB_MGMT_MAD_HDR;
M
Michael S. Tsirkin 已提交
375 376 377 378 379 380 381 382 383 384 385 386 387
		has_rmpp_header = 0;
	}

	if (has_rmpp_header)
		rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
			      IB_MGMT_RMPP_FLAG_ACTIVE;
	else
		rmpp_active = 0;

	/* Validate that the management class can support RMPP */
	if (rmpp_active && !agent->rmpp_version) {
		ret = -EINVAL;
		goto err_ah;
388 389 390 391
	}

	packet->msg = ib_create_send_mad(agent,
					 be32_to_cpu(packet->mad.hdr.qpn),
392 393
					 0, rmpp_active,
					 hdr_len, length - hdr_len,
394 395 396 397 398
					 GFP_KERNEL);
	if (IS_ERR(packet->msg)) {
		ret = PTR_ERR(packet->msg);
		goto err_ah;
	}
L
Linus Torvalds 已提交
399

400 401 402 403
	packet->msg->ah 	= ah;
	packet->msg->timeout_ms = packet->mad.hdr.timeout_ms;
	packet->msg->retries 	= packet->mad.hdr.retries;
	packet->msg->context[0] = packet;
L
Linus Torvalds 已提交
404

S
Sean Hefty 已提交
405 406 407 408 409 410 411 412
	/* Copy MAD headers (RMPP header in place) */
	memcpy(packet->msg->mad, packet->mad.data, IB_MGMT_MAD_HDR);
	/* Now, copy rest of message from user into send buffer */
	if (copy_from_user(packet->msg->mad + copy_offset,
			   buf + sizeof (struct ib_user_mad) + copy_offset,
			   length - copy_offset)) {
		ret = -EFAULT;
		goto err_msg;
413 414 415 416 417 418 419 420
	}

	/*
	 * If userspace is generating a request that will generate a
	 * response, we need to make sure the high-order part of the
	 * transaction ID matches the agent being used to send the
	 * MAD.
	 */
421
	method = ((struct ib_mad_hdr *) packet->msg->mad)->method;
422 423 424 425

	if (!(method & IB_MGMT_METHOD_RESP)       &&
	    method != IB_MGMT_METHOD_TRAP_REPRESS &&
	    method != IB_MGMT_METHOD_SEND) {
426
		tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid;
427 428
		*tid = cpu_to_be64(((u64) agent->hi_tid) << 32 |
				   (be64_to_cpup(tid) & 0xffffffff));
L
Linus Torvalds 已提交
429 430
	}

431
	ret = ib_post_send_mad(packet->msg, NULL);
432 433 434
	if (ret)
		goto err_msg;

435
	up_read(&file->port->mutex);
L
Linus Torvalds 已提交
436

S
Sean Hefty 已提交
437
	return count;
438 439 440 441 442

err_msg:
	ib_free_send_mad(packet->msg);

err_ah:
443
	ib_destroy_ah(ah);
L
Linus Torvalds 已提交
444 445

err_up:
446
	up_read(&file->port->mutex);
L
Linus Torvalds 已提交
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475

err:
	kfree(packet);
	return ret;
}

static unsigned int ib_umad_poll(struct file *filp, struct poll_table_struct *wait)
{
	struct ib_umad_file *file = filp->private_data;

	/* we will always be able to post a MAD send */
	unsigned int mask = POLLOUT | POLLWRNORM;

	poll_wait(filp, &file->recv_wait, wait);

	if (!list_empty(&file->recv_list))
		mask |= POLLIN | POLLRDNORM;

	return mask;
}

static int ib_umad_reg_agent(struct ib_umad_file *file, unsigned long arg)
{
	struct ib_user_mad_reg_req ureq;
	struct ib_mad_reg_req req;
	struct ib_mad_agent *agent;
	int agent_id;
	int ret;

476 477 478 479 480 481
	down_write(&file->port->mutex);

	if (!file->port->ib_dev) {
		ret = -EPIPE;
		goto out;
	}
L
Linus Torvalds 已提交
482 483 484 485 486 487 488 489 490 491 492 493

	if (copy_from_user(&ureq, (void __user *) arg, sizeof ureq)) {
		ret = -EFAULT;
		goto out;
	}

	if (ureq.qpn != 0 && ureq.qpn != 1) {
		ret = -EINVAL;
		goto out;
	}

	for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id)
494
		if (!__get_agent(file, agent_id))
L
Linus Torvalds 已提交
495 496 497 498 499 500
			goto found;

	ret = -ENOMEM;
	goto out;

found:
501 502 503 504 505 506
	if (ureq.mgmt_class) {
		req.mgmt_class         = ureq.mgmt_class;
		req.mgmt_class_version = ureq.mgmt_class_version;
		memcpy(req.method_mask, ureq.method_mask, sizeof req.method_mask);
		memcpy(req.oui,         ureq.oui,         sizeof req.oui);
	}
L
Linus Torvalds 已提交
507 508 509

	agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num,
				      ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI,
510
				      ureq.mgmt_class ? &req : NULL,
511 512
				      ureq.rmpp_version,
				      send_handler, recv_handler, file);
L
Linus Torvalds 已提交
513 514 515 516 517 518 519 520
	if (IS_ERR(agent)) {
		ret = PTR_ERR(agent);
		goto out;
	}

	if (put_user(agent_id,
		     (u32 __user *) (arg + offsetof(struct ib_user_mad_reg_req, id)))) {
		ret = -EFAULT;
521 522
		ib_unregister_mad_agent(agent);
		goto out;
L
Linus Torvalds 已提交
523 524
	}

525
	file->agent[agent_id] = agent;
L
Linus Torvalds 已提交
526
	ret = 0;
527

L
Linus Torvalds 已提交
528
out:
529
	up_write(&file->port->mutex);
L
Linus Torvalds 已提交
530 531 532 533 534
	return ret;
}

static int ib_umad_unreg_agent(struct ib_umad_file *file, unsigned long arg)
{
535
	struct ib_mad_agent *agent = NULL;
L
Linus Torvalds 已提交
536 537 538
	u32 id;
	int ret = 0;

539 540
	if (get_user(id, (u32 __user *) arg))
		return -EFAULT;
L
Linus Torvalds 已提交
541

542
	down_write(&file->port->mutex);
L
Linus Torvalds 已提交
543

544
	if (id < 0 || id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) {
L
Linus Torvalds 已提交
545 546 547 548
		ret = -EINVAL;
		goto out;
	}

549
	agent = file->agent[id];
L
Linus Torvalds 已提交
550 551 552
	file->agent[id] = NULL;

out:
553
	up_write(&file->port->mutex);
554

555
	if (agent)
556 557
		ib_unregister_mad_agent(agent);

L
Linus Torvalds 已提交
558 559 560
	return ret;
}

561 562
static long ib_umad_ioctl(struct file *filp, unsigned int cmd,
			  unsigned long arg)
L
Linus Torvalds 已提交
563 564 565 566 567 568 569 570 571 572 573 574 575
{
	switch (cmd) {
	case IB_USER_MAD_REGISTER_AGENT:
		return ib_umad_reg_agent(filp->private_data, arg);
	case IB_USER_MAD_UNREGISTER_AGENT:
		return ib_umad_unreg_agent(filp->private_data, arg);
	default:
		return -ENOIOCTLCMD;
	}
}

static int ib_umad_open(struct inode *inode, struct file *filp)
{
576
	struct ib_umad_port *port;
L
Linus Torvalds 已提交
577
	struct ib_umad_file *file;
578
	int ret = 0;
L
Linus Torvalds 已提交
579

580 581 582 583 584 585 586 587 588
	spin_lock(&port_lock);
	port = umad_port[iminor(inode) - IB_UMAD_MINOR_BASE];
	if (port)
		kref_get(&port->umad_dev->ref);
	spin_unlock(&port_lock);

	if (!port)
		return -ENXIO;

589 590 591 592 593 594 595
	down_write(&port->mutex);

	if (!port->ib_dev) {
		ret = -ENXIO;
		goto out;
	}

S
Sean Hefty 已提交
596
	file = kzalloc(sizeof *file, GFP_KERNEL);
597 598
	if (!file) {
		kref_put(&port->umad_dev->ref, ib_umad_release_dev);
599 600
		ret = -ENOMEM;
		goto out;
601
	}
L
Linus Torvalds 已提交
602 603 604 605 606 607 608 609

	spin_lock_init(&file->recv_lock);
	INIT_LIST_HEAD(&file->recv_list);
	init_waitqueue_head(&file->recv_wait);

	file->port = port;
	filp->private_data = file;

610 611 612 613 614
	list_add_tail(&file->port_list, &port->file_list);

out:
	up_write(&port->mutex);
	return ret;
L
Linus Torvalds 已提交
615 616 617 618 619
}

static int ib_umad_close(struct inode *inode, struct file *filp)
{
	struct ib_umad_file *file = filp->private_data;
620
	struct ib_umad_device *dev = file->port->umad_dev;
621
	struct ib_umad_packet *packet, *tmp;
622
	int already_dead;
L
Linus Torvalds 已提交
623 624
	int i;

625 626 627 628
	down_write(&file->port->mutex);

	already_dead = file->agents_dead;
	file->agents_dead = 1;
L
Linus Torvalds 已提交
629

630 631 632
	list_for_each_entry_safe(packet, tmp, &file->recv_list, list)
		kfree(packet);

633 634
	list_del(&file->port_list);

635 636 637 638 639 640
	downgrade_write(&file->port->mutex);

	if (!already_dead)
		for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i)
			if (file->agent[i])
				ib_unregister_mad_agent(file->agent[i]);
L
Linus Torvalds 已提交
641

642 643 644
	up_read(&file->port->mutex);

	kfree(file);
645 646
	kref_put(&dev->ref, ib_umad_release_dev);

L
Linus Torvalds 已提交
647 648 649 650
	return 0;
}

static struct file_operations umad_fops = {
651 652 653 654
	.owner 	 	= THIS_MODULE,
	.read 	 	= ib_umad_read,
	.write 	 	= ib_umad_write,
	.poll 	 	= ib_umad_poll,
L
Linus Torvalds 已提交
655
	.unlocked_ioctl = ib_umad_ioctl,
656 657 658
	.compat_ioctl 	= ib_umad_ioctl,
	.open 	 	= ib_umad_open,
	.release 	= ib_umad_close
L
Linus Torvalds 已提交
659 660 661 662
};

static int ib_umad_sm_open(struct inode *inode, struct file *filp)
{
663
	struct ib_umad_port *port;
L
Linus Torvalds 已提交
664 665 666 667 668
	struct ib_port_modify props = {
		.set_port_cap_mask = IB_PORT_SM
	};
	int ret;

669 670 671 672 673 674 675 676 677
	spin_lock(&port_lock);
	port = umad_port[iminor(inode) - IB_UMAD_MINOR_BASE - IB_UMAD_MAX_PORTS];
	if (port)
		kref_get(&port->umad_dev->ref);
	spin_unlock(&port_lock);

	if (!port)
		return -ENXIO;

L
Linus Torvalds 已提交
678
	if (filp->f_flags & O_NONBLOCK) {
679 680 681 682
		if (down_trylock(&port->sm_sem)) {
			ret = -EAGAIN;
			goto fail;
		}
L
Linus Torvalds 已提交
683
	} else {
684 685 686 687
		if (down_interruptible(&port->sm_sem)) {
			ret = -ERESTARTSYS;
			goto fail;
		}
L
Linus Torvalds 已提交
688 689 690 691 692
	}

	ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
	if (ret) {
		up(&port->sm_sem);
693
		goto fail;
L
Linus Torvalds 已提交
694 695 696 697 698
	}

	filp->private_data = port;

	return 0;
699 700 701 702

fail:
	kref_put(&port->umad_dev->ref, ib_umad_release_dev);
	return ret;
L
Linus Torvalds 已提交
703 704 705 706 707 708 709 710
}

static int ib_umad_sm_close(struct inode *inode, struct file *filp)
{
	struct ib_umad_port *port = filp->private_data;
	struct ib_port_modify props = {
		.clr_port_cap_mask = IB_PORT_SM
	};
711 712 713 714 715 716
	int ret = 0;

	down_write(&port->mutex);
	if (port->ib_dev)
		ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
	up_write(&port->mutex);
L
Linus Torvalds 已提交
717 718 719

	up(&port->sm_sem);

720 721
	kref_put(&port->umad_dev->ref, ib_umad_release_dev);

L
Linus Torvalds 已提交
722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740
	return ret;
}

static struct file_operations umad_sm_fops = {
	.owner 	 = THIS_MODULE,
	.open 	 = ib_umad_sm_open,
	.release = ib_umad_sm_close
};

static struct ib_client umad_client = {
	.name   = "umad",
	.add    = ib_umad_add_one,
	.remove = ib_umad_remove_one
};

static ssize_t show_ibdev(struct class_device *class_dev, char *buf)
{
	struct ib_umad_port *port = class_get_devdata(class_dev);

741 742 743
	if (!port)
		return -ENODEV;

L
Linus Torvalds 已提交
744 745 746 747 748 749 750 751
	return sprintf(buf, "%s\n", port->ib_dev->name);
}
static CLASS_DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);

static ssize_t show_port(struct class_device *class_dev, char *buf)
{
	struct ib_umad_port *port = class_get_devdata(class_dev);

752 753 754
	if (!port)
		return -ENODEV;

L
Linus Torvalds 已提交
755 756 757 758 759 760 761 762 763 764 765 766 767
	return sprintf(buf, "%d\n", port->port_num);
}
static CLASS_DEVICE_ATTR(port, S_IRUGO, show_port, NULL);

static ssize_t show_abi_version(struct class *class, char *buf)
{
	return sprintf(buf, "%d\n", IB_USER_MAD_ABI_VERSION);
}
static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);

static int ib_umad_init_port(struct ib_device *device, int port_num,
			     struct ib_umad_port *port)
{
768 769 770 771
	spin_lock(&port_lock);
	port->dev_num = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS);
	if (port->dev_num >= IB_UMAD_MAX_PORTS) {
		spin_unlock(&port_lock);
L
Linus Torvalds 已提交
772 773
		return -1;
	}
774 775
	set_bit(port->dev_num, dev_map);
	spin_unlock(&port_lock);
L
Linus Torvalds 已提交
776 777 778 779

	port->ib_dev   = device;
	port->port_num = port_num;
	init_MUTEX(&port->sm_sem);
780 781
	init_rwsem(&port->mutex);
	INIT_LIST_HEAD(&port->file_list);
L
Linus Torvalds 已提交
782

783 784
	port->dev = cdev_alloc();
	if (!port->dev)
L
Linus Torvalds 已提交
785
		return -1;
786 787 788 789
	port->dev->owner = THIS_MODULE;
	port->dev->ops   = &umad_fops;
	kobject_set_name(&port->dev->kobj, "umad%d", port->dev_num);
	if (cdev_add(port->dev, base_dev + port->dev_num, 1))
L
Linus Torvalds 已提交
790 791
		goto err_cdev;

792
	port->class_dev = class_device_create(umad_class, NULL, port->dev->dev,
793 794 795 796
					      device->dma_device,
					      "umad%d", port->dev_num);
	if (IS_ERR(port->class_dev))
		goto err_cdev;
L
Linus Torvalds 已提交
797

798
	if (class_device_create_file(port->class_dev, &class_device_attr_ibdev))
L
Linus Torvalds 已提交
799
		goto err_class;
800
	if (class_device_create_file(port->class_dev, &class_device_attr_port))
L
Linus Torvalds 已提交
801 802
		goto err_class;

803 804 805 806 807
	port->sm_dev = cdev_alloc();
	if (!port->sm_dev)
		goto err_class;
	port->sm_dev->owner = THIS_MODULE;
	port->sm_dev->ops   = &umad_sm_fops;
M
Michael S. Tsirkin 已提交
808
	kobject_set_name(&port->sm_dev->kobj, "issm%d", port->dev_num);
809 810
	if (cdev_add(port->sm_dev, base_dev + port->dev_num + IB_UMAD_MAX_PORTS, 1))
		goto err_sm_cdev;
L
Linus Torvalds 已提交
811

812
	port->sm_class_dev = class_device_create(umad_class, NULL, port->sm_dev->dev,
813 814 815
						 device->dma_device,
						 "issm%d", port->dev_num);
	if (IS_ERR(port->sm_class_dev))
L
Linus Torvalds 已提交
816 817
		goto err_sm_cdev;

818 819
	class_set_devdata(port->class_dev,    port);
	class_set_devdata(port->sm_class_dev, port);
L
Linus Torvalds 已提交
820

821
	if (class_device_create_file(port->sm_class_dev, &class_device_attr_ibdev))
L
Linus Torvalds 已提交
822
		goto err_sm_class;
823
	if (class_device_create_file(port->sm_class_dev, &class_device_attr_port))
L
Linus Torvalds 已提交
824 825
		goto err_sm_class;

826 827 828 829
	spin_lock(&port_lock);
	umad_port[port->dev_num] = port;
	spin_unlock(&port_lock);

L
Linus Torvalds 已提交
830 831 832
	return 0;

err_sm_class:
833
	class_device_destroy(umad_class, port->sm_dev->dev);
L
Linus Torvalds 已提交
834 835

err_sm_cdev:
836
	cdev_del(port->sm_dev);
L
Linus Torvalds 已提交
837 838

err_class:
839
	class_device_destroy(umad_class, port->dev->dev);
L
Linus Torvalds 已提交
840 841

err_cdev:
842 843
	cdev_del(port->dev);
	clear_bit(port->dev_num, dev_map);
L
Linus Torvalds 已提交
844 845 846 847

	return -1;
}

848 849
static void ib_umad_kill_port(struct ib_umad_port *port)
{
850 851 852
	struct ib_umad_file *file;
	int id;

853 854 855 856 857 858 859 860 861 862 863 864 865
	class_set_devdata(port->class_dev,    NULL);
	class_set_devdata(port->sm_class_dev, NULL);

	class_device_destroy(umad_class, port->dev->dev);
	class_device_destroy(umad_class, port->sm_dev->dev);

	cdev_del(port->dev);
	cdev_del(port->sm_dev);

	spin_lock(&port_lock);
	umad_port[port->dev_num] = NULL;
	spin_unlock(&port_lock);

866 867 868 869
	down_write(&port->mutex);

	port->ib_dev = NULL;

870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899
	/*
	 * Now go through the list of files attached to this port and
	 * unregister all of their MAD agents.  We need to hold
	 * port->mutex while doing this to avoid racing with
	 * ib_umad_close(), but we can't hold the mutex for writing
	 * while calling ib_unregister_mad_agent(), since that might
	 * deadlock by calling back into queue_packet().  So we
	 * downgrade our lock to a read lock, and then drop and
	 * reacquire the write lock for the next iteration.
	 *
	 * We do list_del_init() on the file's list_head so that the
	 * list_del in ib_umad_close() is still OK, even after the
	 * file is removed from the list.
	 */
	while (!list_empty(&port->file_list)) {
		file = list_entry(port->file_list.next, struct ib_umad_file,
				  port_list);

		file->agents_dead = 1;
		list_del_init(&file->port_list);

		downgrade_write(&port->mutex);

		for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id)
			if (file->agent[id])
				ib_unregister_mad_agent(file->agent[id]);

		up_read(&port->mutex);
		down_write(&port->mutex);
	}
900 901 902

	up_write(&port->mutex);

903 904 905
	clear_bit(port->dev_num, dev_map);
}

L
Linus Torvalds 已提交
906 907 908 909 910 911 912 913 914 915 916 917
static void ib_umad_add_one(struct ib_device *device)
{
	struct ib_umad_device *umad_dev;
	int s, e, i;

	if (device->node_type == IB_NODE_SWITCH)
		s = e = 0;
	else {
		s = 1;
		e = device->phys_port_cnt;
	}

S
Sean Hefty 已提交
918
	umad_dev = kzalloc(sizeof *umad_dev +
L
Linus Torvalds 已提交
919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940
			   (e - s + 1) * sizeof (struct ib_umad_port),
			   GFP_KERNEL);
	if (!umad_dev)
		return;

	kref_init(&umad_dev->ref);

	umad_dev->start_port = s;
	umad_dev->end_port   = e;

	for (i = s; i <= e; ++i) {
		umad_dev->port[i - s].umad_dev = umad_dev;

		if (ib_umad_init_port(device, i, &umad_dev->port[i - s]))
			goto err;
	}

	ib_set_client_data(device, &umad_client, umad_dev);

	return;

err:
941
	while (--i >= s)
M
Michael S. Tsirkin 已提交
942
		ib_umad_kill_port(&umad_dev->port[i - s]);
L
Linus Torvalds 已提交
943 944 945 946 947 948 949 950 951 952 953 954

	kref_put(&umad_dev->ref, ib_umad_release_dev);
}

static void ib_umad_remove_one(struct ib_device *device)
{
	struct ib_umad_device *umad_dev = ib_get_client_data(device, &umad_client);
	int i;

	if (!umad_dev)
		return;

955 956
	for (i = 0; i <= umad_dev->end_port - umad_dev->start_port; ++i)
		ib_umad_kill_port(&umad_dev->port[i]);
L
Linus Torvalds 已提交
957 958 959 960 961 962 963 964 965 966 967 968 969 970 971

	kref_put(&umad_dev->ref, ib_umad_release_dev);
}

static int __init ib_umad_init(void)
{
	int ret;

	ret = register_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2,
				     "infiniband_mad");
	if (ret) {
		printk(KERN_ERR "user_mad: couldn't register device number\n");
		goto out;
	}

972 973 974
	umad_class = class_create(THIS_MODULE, "infiniband_mad");
	if (IS_ERR(umad_class)) {
		ret = PTR_ERR(umad_class);
L
Linus Torvalds 已提交
975 976 977 978
		printk(KERN_ERR "user_mad: couldn't create class infiniband_mad\n");
		goto out_chrdev;
	}

979
	ret = class_create_file(umad_class, &class_attr_abi_version);
L
Linus Torvalds 已提交
980 981 982 983 984 985 986 987 988 989 990 991 992 993
	if (ret) {
		printk(KERN_ERR "user_mad: couldn't create abi_version attribute\n");
		goto out_class;
	}

	ret = ib_register_client(&umad_client);
	if (ret) {
		printk(KERN_ERR "user_mad: couldn't register ib_umad client\n");
		goto out_class;
	}

	return 0;

out_class:
994
	class_destroy(umad_class);
L
Linus Torvalds 已提交
995 996 997 998 999 1000 1001 1002 1003 1004 1005

out_chrdev:
	unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2);

out:
	return ret;
}

static void __exit ib_umad_cleanup(void)
{
	ib_unregister_client(&umad_client);
1006
	class_destroy(umad_class);
L
Linus Torvalds 已提交
1007 1008 1009 1010 1011
	unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2);
}

module_init(ib_umad_init);
module_exit(ib_umad_cleanup);