core-cdev.c 37.1 KB
Newer Older
1 2
/*
 * Char device for device raw access
3
 *
4
 * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software Foundation,
 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 */

S
Stefan Richter 已提交
21 22 23 24
#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
25
#include <linux/firewire.h>
S
Stefan Richter 已提交
26 27
#include <linux/firewire-cdev.h>
#include <linux/idr.h>
28
#include <linux/jiffies.h>
29
#include <linux/kernel.h>
30
#include <linux/kref.h>
S
Stefan Richter 已提交
31 32
#include <linux/mm.h>
#include <linux/module.h>
33
#include <linux/mutex.h>
34
#include <linux/poll.h>
35
#include <linux/preempt.h>
36
#include <linux/sched.h>
J
Jay Fenlason 已提交
37
#include <linux/spinlock.h>
S
Stefan Richter 已提交
38
#include <linux/time.h>
39
#include <linux/uaccess.h>
S
Stefan Richter 已提交
40 41
#include <linux/vmalloc.h>
#include <linux/wait.h>
42
#include <linux/workqueue.h>
S
Stefan Richter 已提交
43

44
#include <asm/system.h>
S
Stefan Richter 已提交
45

46
#include "core.h"
47 48

struct client {
49
	u32 version;
50
	struct fw_device *device;
51

52
	spinlock_t lock;
53 54
	bool in_shutdown;
	struct idr resource_idr;
55 56
	struct list_head event_list;
	wait_queue_head_t wait;
57
	u64 bus_reset_closure;
58

59
	struct fw_iso_context *iso_context;
60
	u64 iso_closure;
61 62
	struct fw_iso_buffer buffer;
	unsigned long vm_start;
63 64

	struct list_head link;
65
	struct kref kref;
66 67
};

68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
static inline void client_get(struct client *client)
{
	kref_get(&client->kref);
}

static void client_release(struct kref *kref)
{
	struct client *client = container_of(kref, struct client, kref);

	fw_device_put(client->device);
	kfree(client);
}

static void client_put(struct client *client)
{
	kref_put(&client->kref, client_release);
}

86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
struct client_resource;
typedef void (*client_resource_release_fn_t)(struct client *,
					     struct client_resource *);
struct client_resource {
	client_resource_release_fn_t release;
	int handle;
};

struct address_handler_resource {
	struct client_resource resource;
	struct fw_address_handler handler;
	__u64 closure;
	struct client *client;
};

struct outbound_transaction_resource {
	struct client_resource resource;
	struct fw_transaction transaction;
};

struct inbound_transaction_resource {
	struct client_resource resource;
	struct fw_request *request;
	void *data;
	size_t length;
};

struct descriptor_resource {
	struct client_resource resource;
	struct fw_descriptor descriptor;
	u32 data[0];
};

119 120 121 122 123
struct iso_resource {
	struct client_resource resource;
	struct client *client;
	/* Schedule work and access todo only with client->lock held. */
	struct delayed_work work;
124 125
	enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
	      ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
126 127 128
	int generation;
	u64 channels;
	s32 bandwidth;
129
	__be32 transaction_data[2];
130 131 132 133 134
	struct iso_resource_event *e_alloc, *e_dealloc;
};

static void release_iso_resource(struct client *, struct client_resource *);

135 136 137 138 139 140 141 142 143 144 145 146 147 148
static void schedule_iso_resource(struct iso_resource *r, unsigned long delay)
{
	client_get(r->client);
	if (!schedule_delayed_work(&r->work, delay))
		client_put(r->client);
}

static void schedule_if_iso_resource(struct client_resource *resource)
{
	if (resource->release == release_iso_resource)
		schedule_iso_resource(container_of(resource,
					struct iso_resource, resource), 0);
}

149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
/*
 * dequeue_event() just kfree()'s the event, so the event has to be
 * the first field in a struct XYZ_event.
 */
struct event {
	struct { void *data; size_t size; } v[2];
	struct list_head link;
};

struct bus_reset_event {
	struct event event;
	struct fw_cdev_event_bus_reset reset;
};

struct outbound_transaction_event {
	struct event event;
	struct client *client;
	struct outbound_transaction_resource r;
	struct fw_cdev_event_response response;
};

struct inbound_transaction_event {
	struct event event;
	struct fw_cdev_event_request request;
};

struct iso_interrupt_event {
	struct event event;
	struct fw_cdev_event_iso_interrupt interrupt;
};

180 181
struct iso_resource_event {
	struct event event;
182
	struct fw_cdev_event_iso_resource iso_resource;
183 184
};

185
static inline void __user *u64_to_uptr(__u64 value)
186 187 188 189
{
	return (void __user *)(unsigned long)value;
}

190
static inline __u64 uptr_to_u64(void __user *ptr)
191 192 193 194 195 196 197 198 199
{
	return (__u64)(unsigned long)ptr;
}

static int fw_device_op_open(struct inode *inode, struct file *file)
{
	struct fw_device *device;
	struct client *client;

200
	device = fw_device_get_by_devt(inode->i_rdev);
201 202
	if (device == NULL)
		return -ENODEV;
203

204 205 206 207 208
	if (fw_device_is_shutdown(device)) {
		fw_device_put(device);
		return -ENODEV;
	}

209
	client = kzalloc(sizeof(*client), GFP_KERNEL);
210 211
	if (client == NULL) {
		fw_device_put(device);
212
		return -ENOMEM;
213
	}
214

215
	client->device = device;
216
	spin_lock_init(&client->lock);
217 218
	idr_init(&client->resource_idr);
	INIT_LIST_HEAD(&client->event_list);
219
	init_waitqueue_head(&client->wait);
220
	kref_init(&client->kref);
221 222 223

	file->private_data = client;

224
	mutex_lock(&device->client_list_mutex);
225
	list_add_tail(&client->link, &device->client_list);
226
	mutex_unlock(&device->client_list_mutex);
227

228 229 230 231 232 233 234 235 236 237 238 239 240 241
	return 0;
}

static void queue_event(struct client *client, struct event *event,
			void *data0, size_t size0, void *data1, size_t size1)
{
	unsigned long flags;

	event->v[0].data = data0;
	event->v[0].size = size0;
	event->v[1].data = data1;
	event->v[1].size = size1;

	spin_lock_irqsave(&client->lock, flags);
242 243 244 245
	if (client->in_shutdown)
		kfree(event);
	else
		list_add_tail(&event->link, &client->event_list);
246
	spin_unlock_irqrestore(&client->lock, flags);
247 248

	wake_up_interruptible(&client->wait);
249 250
}

251 252
static int dequeue_event(struct client *client,
			 char __user *buffer, size_t count)
253 254 255
{
	struct event *event;
	size_t size, total;
256
	int i, ret;
257

258 259 260 261 262
	ret = wait_event_interruptible(client->wait,
			!list_empty(&client->event_list) ||
			fw_device_is_shutdown(client->device));
	if (ret < 0)
		return ret;
263

264 265 266
	if (list_empty(&client->event_list) &&
		       fw_device_is_shutdown(client->device))
		return -ENODEV;
267

268
	spin_lock_irq(&client->lock);
269
	event = list_first_entry(&client->event_list, struct event, link);
270
	list_del(&event->link);
271
	spin_unlock_irq(&client->lock);
272 273 274 275

	total = 0;
	for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
		size = min(event->v[i].size, count - total);
276
		if (copy_to_user(buffer + total, event->v[i].data, size)) {
277
			ret = -EFAULT;
278
			goto out;
279
		}
280 281
		total += size;
	}
282
	ret = total;
283 284 285 286

 out:
	kfree(event);

287
	return ret;
288 289
}

290 291
static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
				 size_t count, loff_t *offset)
292 293 294 295 296 297
{
	struct client *client = file->private_data;

	return dequeue_event(client, buffer, count);
}

298 299
static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
				 struct client *client)
300
{
301
	struct fw_card *card = client->device->card;
J
Jay Fenlason 已提交
302

303
	spin_lock_irq(&card->lock);
304

305
	event->closure	     = client->bus_reset_closure;
306
	event->type          = FW_CDEV_EVENT_BUS_RESET;
307
	event->generation    = client->device->generation;
308
	event->node_id       = client->device->node_id;
309 310 311 312
	event->local_node_id = card->local_node->node_id;
	event->bm_node_id    = 0; /* FIXME: We don't track the BM. */
	event->irm_node_id   = card->irm_node->node_id;
	event->root_node_id  = card->root_node->node_id;
J
Jay Fenlason 已提交
313

314
	spin_unlock_irq(&card->lock);
315 316
}

317 318
static void for_each_client(struct fw_device *device,
			    void (*callback)(struct client *client))
319 320 321
{
	struct client *c;

322
	mutex_lock(&device->client_list_mutex);
323 324
	list_for_each_entry(c, &device->client_list, link)
		callback(c);
325
	mutex_unlock(&device->client_list_mutex);
326 327
}

328 329
static int schedule_reallocations(int id, void *p, void *data)
{
330
	schedule_if_iso_resource(p);
331 332 333 334

	return 0;
}

335
static void queue_bus_reset_event(struct client *client)
336
{
337
	struct bus_reset_event *e;
338

339 340
	e = kzalloc(sizeof(*e), GFP_KERNEL);
	if (e == NULL) {
341 342 343 344
		fw_notify("Out of memory when allocating bus reset event\n");
		return;
	}

345
	fill_bus_reset_event(&e->reset, client);
346

347 348
	queue_event(client, &e->event,
		    &e->reset, sizeof(e->reset), NULL, 0);
349 350 351 352

	spin_lock_irq(&client->lock);
	idr_for_each(&client->resource_idr, schedule_reallocations, client);
	spin_unlock_irq(&client->lock);
353 354 355 356
}

void fw_device_cdev_update(struct fw_device *device)
{
357 358
	for_each_client(device, queue_bus_reset_event);
}
359

360 361 362 363
static void wake_up_client(struct client *client)
{
	wake_up_interruptible(&client->wait);
}
364

365 366 367
void fw_device_cdev_remove(struct fw_device *device)
{
	for_each_client(device, wake_up_client);
368 369
}

370
static int ioctl_get_info(struct client *client, void *buffer)
371
{
372
	struct fw_cdev_get_info *get_info = buffer;
373
	struct fw_cdev_event_bus_reset bus_reset;
374
	unsigned long ret = 0;
375

376 377
	client->version = get_info->version;
	get_info->version = FW_CDEV_VERSION;
J
Jay Fenlason 已提交
378
	get_info->card = client->device->card->index;
379

380 381
	down_read(&fw_device_rwsem);

382 383 384
	if (get_info->rom != 0) {
		void __user *uptr = u64_to_uptr(get_info->rom);
		size_t want = get_info->rom_length;
385
		size_t have = client->device->config_rom_length * 4;
386

387 388
		ret = copy_to_user(uptr, client->device->config_rom,
				   min(want, have));
389
	}
390
	get_info->rom_length = client->device->config_rom_length * 4;
391

392 393 394 395 396
	up_read(&fw_device_rwsem);

	if (ret != 0)
		return -EFAULT;

397 398 399
	client->bus_reset_closure = get_info->bus_reset_closure;
	if (get_info->bus_reset != 0) {
		void __user *uptr = u64_to_uptr(get_info->bus_reset);
400

401
		fill_bus_reset_event(&bus_reset, client);
402
		if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
403 404
			return -EFAULT;
	}
405 406 407 408

	return 0;
}

409 410
static int add_client_resource(struct client *client,
			       struct client_resource *resource, gfp_t gfp_mask)
411 412
{
	unsigned long flags;
413 414 415 416 417
	int ret;

 retry:
	if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
		return -ENOMEM;
418 419

	spin_lock_irqsave(&client->lock, flags);
420 421 422 423 424
	if (client->in_shutdown)
		ret = -ECANCELED;
	else
		ret = idr_get_new(&client->resource_idr, resource,
				  &resource->handle);
425
	if (ret >= 0) {
426
		client_get(client);
427
		schedule_if_iso_resource(resource);
428
	}
429
	spin_unlock_irqrestore(&client->lock, flags);
430 431 432 433 434

	if (ret == -EAGAIN)
		goto retry;

	return ret < 0 ? ret : 0;
435 436
}

437 438
static int release_client_resource(struct client *client, u32 handle,
				   client_resource_release_fn_t release,
439
				   struct client_resource **return_resource)
440
{
441
	struct client_resource *resource;
442

443
	spin_lock_irq(&client->lock);
444
	if (client->in_shutdown)
445
		resource = NULL;
446
	else
447 448
		resource = idr_find(&client->resource_idr, handle);
	if (resource && resource->release == release)
449
		idr_remove(&client->resource_idr, handle);
450
	spin_unlock_irq(&client->lock);
451

452
	if (!(resource && resource->release == release))
453 454
		return -EINVAL;

455 456
	if (return_resource)
		*return_resource = resource;
457
	else
458
		resource->release(client, resource);
459

460 461
	client_put(client);

462 463 464
	return 0;
}

465 466
static void release_transaction(struct client *client,
				struct client_resource *resource)
467
{
468 469
	struct outbound_transaction_resource *r = container_of(resource,
			struct outbound_transaction_resource, resource);
470

471
	fw_cancel_transaction(client->device->card, &r->transaction);
472 473
}

474 475
static void complete_transaction(struct fw_card *card, int rcode,
				 void *payload, size_t length, void *data)
476
{
477 478 479
	struct outbound_transaction_event *e = data;
	struct fw_cdev_event_response *rsp = &e->response;
	struct client *client = e->client;
480
	unsigned long flags;
481

482 483
	if (length < rsp->length)
		rsp->length = length;
484
	if (rcode == RCODE_COMPLETE)
485
		memcpy(rsp->data, payload, rsp->length);
486

487
	spin_lock_irqsave(&client->lock, flags);
488
	/*
489 490 491 492 493 494 495 496
	 * 1. If called while in shutdown, the idr tree must be left untouched.
	 *    The idr handle will be removed and the client reference will be
	 *    dropped later.
	 * 2. If the call chain was release_client_resource ->
	 *    release_transaction -> complete_transaction (instead of a normal
	 *    conclusion of the transaction), i.e. if this resource was already
	 *    unregistered from the idr, the client reference will be dropped
	 *    by release_client_resource and we must not drop it here.
497
	 */
498
	if (!client->in_shutdown &&
499 500
	    idr_find(&client->resource_idr, e->r.resource.handle)) {
		idr_remove(&client->resource_idr, e->r.resource.handle);
501 502 503
		/* Drop the idr's reference */
		client_put(client);
	}
504 505
	spin_unlock_irqrestore(&client->lock, flags);

506 507
	rsp->type = FW_CDEV_EVENT_RESPONSE;
	rsp->rcode = rcode;
508 509

	/*
510
	 * In the case that sizeof(*rsp) doesn't align with the position of the
511 512 513 514 515
	 * data, and the read is short, preserve an extra copy of the data
	 * to stay compatible with a pre-2.6.27 bug.  Since the bug is harmless
	 * for short reads and some apps depended on it, this is both safe
	 * and prudent for compatibility.
	 */
516 517 518
	if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
		queue_event(client, &e->event, rsp, sizeof(*rsp),
			    rsp->data, rsp->length);
519
	else
520
		queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
521
			    NULL, 0);
522 523 524

	/* Drop the transaction callback's reference */
	client_put(client);
525 526
}

527 528 529
static int init_request(struct client *client,
			struct fw_cdev_send_request *request,
			int destination_id, int speed)
530
{
531
	struct outbound_transaction_event *e;
532
	int ret;
533

534 535
	if (request->tcode != TCODE_STREAM_DATA &&
	    (request->length > 4096 || request->length > 512 << speed))
536
		return -EIO;
537

538 539
	e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
	if (e == NULL)
540 541
		return -ENOMEM;

542 543 544
	e->client = client;
	e->response.length = request->length;
	e->response.closure = request->closure;
545

546
	if (request->data &&
547
	    copy_from_user(e->response.data,
548
			   u64_to_uptr(request->data), request->length)) {
549
		ret = -EFAULT;
550
		goto failed;
551 552
	}

553 554
	e->r.resource.release = release_transaction;
	ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
555 556
	if (ret < 0)
		goto failed;
557

558 559 560
	/* Get a reference for the transaction callback */
	client_get(client);

561
	fw_send_request(client->device->card, &e->r.transaction,
562 563 564 565
			request->tcode, destination_id, request->generation,
			speed, request->offset, e->response.data,
			request->length, complete_transaction, e);
	return 0;
566

567
 failed:
568
	kfree(e);
569 570

	return ret;
571 572
}

573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593
static int ioctl_send_request(struct client *client, void *buffer)
{
	struct fw_cdev_send_request *request = buffer;

	switch (request->tcode) {
	case TCODE_WRITE_QUADLET_REQUEST:
	case TCODE_WRITE_BLOCK_REQUEST:
	case TCODE_READ_QUADLET_REQUEST:
	case TCODE_READ_BLOCK_REQUEST:
	case TCODE_LOCK_MASK_SWAP:
	case TCODE_LOCK_COMPARE_SWAP:
	case TCODE_LOCK_FETCH_ADD:
	case TCODE_LOCK_LITTLE_ADD:
	case TCODE_LOCK_BOUNDED_ADD:
	case TCODE_LOCK_WRAP_ADD:
	case TCODE_LOCK_VENDOR_DEPENDENT:
		break;
	default:
		return -EINVAL;
	}

594
	return init_request(client, request, client->device->node_id,
595 596 597
			    client->device->max_speed);
}

598 599
static void release_request(struct client *client,
			    struct client_resource *resource)
600
{
601 602
	struct inbound_transaction_resource *r = container_of(resource,
			struct inbound_transaction_resource, resource);
603

604
	fw_send_response(client->device->card, r->request,
605
			 RCODE_CONFLICT_ERROR);
606
	kfree(r);
607 608
}

609
static void handle_request(struct fw_card *card, struct fw_request *request,
610 611 612 613
			   int tcode, int destination, int source,
			   int generation, int speed,
			   unsigned long long offset,
			   void *payload, size_t length, void *callback_data)
614
{
615 616 617
	struct address_handler_resource *handler = callback_data;
	struct inbound_transaction_resource *r;
	struct inbound_transaction_event *e;
618
	int ret;
619

620
	r = kmalloc(sizeof(*r), GFP_ATOMIC);
621
	e = kmalloc(sizeof(*e), GFP_ATOMIC);
622
	if (r == NULL || e == NULL)
623
		goto failed;
624

625 626 627
	r->request = request;
	r->data    = payload;
	r->length  = length;
628

629 630
	r->resource.release = release_request;
	ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
631 632
	if (ret < 0)
		goto failed;
633 634 635 636 637

	e->request.type    = FW_CDEV_EVENT_REQUEST;
	e->request.tcode   = tcode;
	e->request.offset  = offset;
	e->request.length  = length;
638
	e->request.handle  = r->resource.handle;
639 640
	e->request.closure = handler->closure;

641
	queue_event(handler->client, &e->event,
642
		    &e->request, sizeof(e->request), payload, length);
643 644 645
	return;

 failed:
646
	kfree(r);
647
	kfree(e);
648
	fw_send_response(card, request, RCODE_CONFLICT_ERROR);
649 650
}

651 652
static void release_address_handler(struct client *client,
				    struct client_resource *resource)
653
{
654 655
	struct address_handler_resource *r =
	    container_of(resource, struct address_handler_resource, resource);
656

657 658
	fw_core_remove_address_handler(&r->handler);
	kfree(r);
659 660
}

661
static int ioctl_allocate(struct client *client, void *buffer)
662
{
663
	struct fw_cdev_allocate *request = buffer;
664
	struct address_handler_resource *r;
665
	struct fw_address_region region;
666
	int ret;
667

668 669
	r = kmalloc(sizeof(*r), GFP_KERNEL);
	if (r == NULL)
670 671
		return -ENOMEM;

672 673
	region.start = request->offset;
	region.end = request->offset + request->length;
674 675 676 677 678
	r->handler.length = request->length;
	r->handler.address_callback = handle_request;
	r->handler.callback_data = r;
	r->closure = request->closure;
	r->client = client;
679

680
	ret = fw_core_add_address_handler(&r->handler, &region);
681
	if (ret < 0) {
682
		kfree(r);
683
		return ret;
684 685
	}

686 687
	r->resource.release = release_address_handler;
	ret = add_client_resource(client, &r->resource, GFP_KERNEL);
688
	if (ret < 0) {
689
		release_address_handler(client, &r->resource);
690 691
		return ret;
	}
692
	request->handle = r->resource.handle;
693 694 695 696

	return 0;
}

697
static int ioctl_deallocate(struct client *client, void *buffer)
698
{
699
	struct fw_cdev_deallocate *request = buffer;
700

701 702
	return release_client_resource(client, request->handle,
				       release_address_handler, NULL);
703 704
}

705
static int ioctl_send_response(struct client *client, void *buffer)
706
{
707
	struct fw_cdev_send_response *request = buffer;
708
	struct client_resource *resource;
709
	struct inbound_transaction_resource *r;
710
	int ret = 0;
711

712 713
	if (release_client_resource(client, request->handle,
				    release_request, &resource) < 0)
714
		return -EINVAL;
715

716 717
	r = container_of(resource, struct inbound_transaction_resource,
			 resource);
718 719
	if (request->length < r->length)
		r->length = request->length;
720 721 722 723 724

	if (copy_from_user(r->data, u64_to_uptr(request->data), r->length)) {
		ret = -EFAULT;
		goto out;
	}
725

726
	fw_send_response(client->device->card, r->request, request->rcode);
727
 out:
728 729
	kfree(r);

730
	return ret;
731 732
}

733
static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
734
{
735
	struct fw_cdev_initiate_bus_reset *request = buffer;
736 737
	int short_reset;

738
	short_reset = (request->type == FW_CDEV_SHORT_RESET);
739 740 741 742

	return fw_core_initiate_bus_reset(client->device->card, short_reset);
}

743 744 745
static void release_descriptor(struct client *client,
			       struct client_resource *resource)
{
746 747
	struct descriptor_resource *r =
		container_of(resource, struct descriptor_resource, resource);
748

749 750
	fw_core_remove_descriptor(&r->descriptor);
	kfree(r);
751 752
}

753
static int ioctl_add_descriptor(struct client *client, void *buffer)
754
{
755
	struct fw_cdev_add_descriptor *request = buffer;
756
	struct descriptor_resource *r;
757
	int ret;
758

759
	/* Access policy: Allow this ioctl only on local nodes' device files. */
760
	if (!client->device->is_local)
761 762
		return -ENOSYS;

763
	if (request->length > 256)
764 765
		return -EINVAL;

766 767
	r = kmalloc(sizeof(*r) + request->length * 4, GFP_KERNEL);
	if (r == NULL)
768 769
		return -ENOMEM;

770
	if (copy_from_user(r->data,
771
			   u64_to_uptr(request->data), request->length * 4)) {
772 773
		ret = -EFAULT;
		goto failed;
774 775
	}

776 777 778 779
	r->descriptor.length    = request->length;
	r->descriptor.immediate = request->immediate;
	r->descriptor.key       = request->key;
	r->descriptor.data      = r->data;
780

781
	ret = fw_core_add_descriptor(&r->descriptor);
782 783
	if (ret < 0)
		goto failed;
784

785 786
	r->resource.release = release_descriptor;
	ret = add_client_resource(client, &r->resource, GFP_KERNEL);
787
	if (ret < 0) {
788
		fw_core_remove_descriptor(&r->descriptor);
789 790
		goto failed;
	}
791
	request->handle = r->resource.handle;
792 793

	return 0;
794
 failed:
795
	kfree(r);
796 797

	return ret;
798 799
}

800
static int ioctl_remove_descriptor(struct client *client, void *buffer)
801
{
802
	struct fw_cdev_remove_descriptor *request = buffer;
803

804 805
	return release_client_resource(client, request->handle,
				       release_descriptor, NULL);
806 807
}

808 809
static void iso_callback(struct fw_iso_context *context, u32 cycle,
			 size_t header_length, void *header, void *data)
810 811
{
	struct client *client = data;
812
	struct iso_interrupt_event *e;
813

814 815
	e = kzalloc(sizeof(*e) + header_length, GFP_ATOMIC);
	if (e == NULL)
816 817
		return;

818 819 820 821 822 823 824
	e->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT;
	e->interrupt.closure   = client->iso_closure;
	e->interrupt.cycle     = cycle;
	e->interrupt.header_length = header_length;
	memcpy(e->interrupt.header, header, header_length);
	queue_event(client, &e->event, &e->interrupt,
		    sizeof(e->interrupt) + header_length, NULL, 0);
825 826
}

827
static int ioctl_create_iso_context(struct client *client, void *buffer)
828
{
829
	struct fw_cdev_create_iso_context *request = buffer;
830
	struct fw_iso_context *context;
831

832 833 834 835
	/* We only support one context at this time. */
	if (client->iso_context != NULL)
		return -EBUSY;

836
	if (request->channel > 63)
837 838
		return -EINVAL;

839
	switch (request->type) {
840
	case FW_ISO_CONTEXT_RECEIVE:
841
		if (request->header_size < 4 || (request->header_size & 3))
842
			return -EINVAL;
843

844 845 846
		break;

	case FW_ISO_CONTEXT_TRANSMIT:
847
		if (request->speed > SCODE_3200)
848 849 850 851 852
			return -EINVAL;

		break;

	default:
853
		return -EINVAL;
854 855
	}

856 857 858 859 860 861 862 863 864
	context =  fw_iso_context_create(client->device->card,
					 request->type,
					 request->channel,
					 request->speed,
					 request->header_size,
					 iso_callback, client);
	if (IS_ERR(context))
		return PTR_ERR(context);

865
	client->iso_closure = request->closure;
866
	client->iso_context = context;
867

868 869 870
	/* We only support one context at this time. */
	request->handle = 0;

871 872 873
	return 0;
}

874 875 876 877
/* Macros for decoding the iso packet control header. */
#define GET_PAYLOAD_LENGTH(v)	((v) & 0xffff)
#define GET_INTERRUPT(v)	(((v) >> 16) & 0x01)
#define GET_SKIP(v)		(((v) >> 17) & 0x01)
878 879
#define GET_TAG(v)		(((v) >> 18) & 0x03)
#define GET_SY(v)		(((v) >> 20) & 0x0f)
880 881
#define GET_HEADER_LENGTH(v)	(((v) >> 24) & 0xff)

882
static int ioctl_queue_iso(struct client *client, void *buffer)
883
{
884
	struct fw_cdev_queue_iso *request = buffer;
885
	struct fw_cdev_iso_packet __user *p, *end, *next;
886
	struct fw_iso_context *ctx = client->iso_context;
887
	unsigned long payload, buffer_end, header_length;
888
	u32 control;
889 890 891 892 893 894
	int count;
	struct {
		struct fw_iso_packet packet;
		u8 header[256];
	} u;

895
	if (ctx == NULL || request->handle != 0)
896 897
		return -EINVAL;

898 899
	/*
	 * If the user passes a non-NULL data pointer, has mmap()'ed
900 901
	 * the iso buffer, and the pointer points inside the buffer,
	 * we setup the payload pointers accordingly.  Otherwise we
902
	 * set them both to 0, which will still let packets with
903 904
	 * payload_length == 0 through.  In other words, if no packets
	 * use the indirect payload, the iso buffer need not be mapped
905 906
	 * and the request->data pointer is ignored.
	 */
907

908
	payload = (unsigned long)request->data - client->vm_start;
909
	buffer_end = client->buffer.page_count << PAGE_SHIFT;
910
	if (request->data == 0 || client->buffer.pages == NULL ||
911
	    payload >= buffer_end) {
912
		payload = 0;
913
		buffer_end = 0;
914 915
	}

A
Al Viro 已提交
916 917 918
	p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets);

	if (!access_ok(VERIFY_READ, p, request->size))
919 920
		return -EFAULT;

921
	end = (void __user *)p + request->size;
922 923
	count = 0;
	while (p < end) {
924
		if (get_user(control, &p->control))
925
			return -EFAULT;
926 927 928 929 930 931
		u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
		u.packet.interrupt = GET_INTERRUPT(control);
		u.packet.skip = GET_SKIP(control);
		u.packet.tag = GET_TAG(control);
		u.packet.sy = GET_SY(control);
		u.packet.header_length = GET_HEADER_LENGTH(control);
932

933
		if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
934 935
			header_length = u.packet.header_length;
		} else {
936 937 938 939
			/*
			 * We require that header_length is a multiple of
			 * the fixed header size, ctx->header_size.
			 */
940 941 942 943
			if (ctx->header_size == 0) {
				if (u.packet.header_length > 0)
					return -EINVAL;
			} else if (u.packet.header_length % ctx->header_size != 0) {
944
				return -EINVAL;
945
			}
946 947 948
			header_length = 0;
		}

949
		next = (struct fw_cdev_iso_packet __user *)
950
			&p->header[header_length / 4];
951 952 953
		if (next > end)
			return -EINVAL;
		if (__copy_from_user
954
		    (u.packet.header, p->header, header_length))
955
			return -EFAULT;
956
		if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
957 958
		    u.packet.header_length + u.packet.payload_length > 0)
			return -EINVAL;
959
		if (payload + u.packet.payload_length > buffer_end)
960 961
			return -EINVAL;

962 963
		if (fw_iso_context_queue(ctx, &u.packet,
					 &client->buffer, payload))
964 965 966 967 968 969 970
			break;

		p = next;
		payload += u.packet.payload_length;
		count++;
	}

971 972 973
	request->size    -= uptr_to_u64(p) - request->packets;
	request->packets  = uptr_to_u64(p);
	request->data     = client->vm_start + payload;
974 975 976 977

	return count;
}

978
static int ioctl_start_iso(struct client *client, void *buffer)
979
{
980
	struct fw_cdev_start_iso *request = buffer;
981

982
	if (client->iso_context == NULL || request->handle != 0)
983
		return -EINVAL;
984

985
	if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) {
986
		if (request->tags == 0 || request->tags > 15)
987 988
			return -EINVAL;

989
		if (request->sync > 15)
990 991 992
			return -EINVAL;
	}

993 994
	return fw_iso_context_start(client->iso_context, request->cycle,
				    request->sync, request->tags);
995 996
}

997
static int ioctl_stop_iso(struct client *client, void *buffer)
998
{
999 1000
	struct fw_cdev_stop_iso *request = buffer;

1001
	if (client->iso_context == NULL || request->handle != 0)
1002 1003
		return -EINVAL;

1004 1005 1006
	return fw_iso_context_stop(client->iso_context);
}

1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
static int ioctl_get_cycle_timer(struct client *client, void *buffer)
{
	struct fw_cdev_get_cycle_timer *request = buffer;
	struct fw_card *card = client->device->card;
	unsigned long long bus_time;
	struct timeval tv;
	unsigned long flags;

	preempt_disable();
	local_irq_save(flags);

	bus_time = card->driver->get_bus_time(card);
	do_gettimeofday(&tv);

	local_irq_restore(flags);
	preempt_enable();

	request->local_time = tv.tv_sec * 1000000ULL + tv.tv_usec;
	request->cycle_timer = bus_time & 0xffffffff;
	return 0;
}

1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043
static void iso_resource_work(struct work_struct *work)
{
	struct iso_resource_event *e;
	struct iso_resource *r =
			container_of(work, struct iso_resource, work.work);
	struct client *client = r->client;
	int generation, channel, bandwidth, todo;
	bool skip, free, success;

	spin_lock_irq(&client->lock);
	generation = client->device->generation;
	todo = r->todo;
	/* Allow 1000ms grace period for other reallocations. */
	if (todo == ISO_RES_ALLOC &&
	    time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) {
1044
		schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
1045 1046 1047 1048 1049 1050
		skip = true;
	} else {
		/* We could be called twice within the same generation. */
		skip = todo == ISO_RES_REALLOC &&
		       r->generation == generation;
	}
1051 1052 1053
	free = todo == ISO_RES_DEALLOC ||
	       todo == ISO_RES_ALLOC_ONCE ||
	       todo == ISO_RES_DEALLOC_ONCE;
1054 1055 1056 1057 1058 1059 1060 1061 1062 1063
	r->generation = generation;
	spin_unlock_irq(&client->lock);

	if (skip)
		goto out;

	bandwidth = r->bandwidth;

	fw_iso_resource_manage(client->device->card, generation,
			r->channels, &channel, &bandwidth,
1064 1065
			todo == ISO_RES_ALLOC ||
			todo == ISO_RES_REALLOC ||
1066 1067
			todo == ISO_RES_ALLOC_ONCE,
			r->transaction_data);
1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099
	/*
	 * Is this generation outdated already?  As long as this resource sticks
	 * in the idr, it will be scheduled again for a newer generation or at
	 * shutdown.
	 */
	if (channel == -EAGAIN &&
	    (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
		goto out;

	success = channel >= 0 || bandwidth > 0;

	spin_lock_irq(&client->lock);
	/*
	 * Transit from allocation to reallocation, except if the client
	 * requested deallocation in the meantime.
	 */
	if (r->todo == ISO_RES_ALLOC)
		r->todo = ISO_RES_REALLOC;
	/*
	 * Allocation or reallocation failure?  Pull this resource out of the
	 * idr and prepare for deletion, unless the client is shutting down.
	 */
	if (r->todo == ISO_RES_REALLOC && !success &&
	    !client->in_shutdown &&
	    idr_find(&client->resource_idr, r->resource.handle)) {
		idr_remove(&client->resource_idr, r->resource.handle);
		client_put(client);
		free = true;
	}
	spin_unlock_irq(&client->lock);

	if (todo == ISO_RES_ALLOC && channel >= 0)
1100
		r->channels = 1ULL << channel;
1101 1102 1103 1104

	if (todo == ISO_RES_REALLOC && success)
		goto out;

1105
	if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
1106 1107 1108 1109 1110 1111
		e = r->e_alloc;
		r->e_alloc = NULL;
	} else {
		e = r->e_dealloc;
		r->e_dealloc = NULL;
	}
1112 1113 1114
	e->iso_resource.handle    = r->resource.handle;
	e->iso_resource.channel   = channel;
	e->iso_resource.bandwidth = bandwidth;
1115 1116

	queue_event(client, &e->event,
1117
		    &e->iso_resource, sizeof(e->iso_resource), NULL, 0);
1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136

	if (free) {
		cancel_delayed_work(&r->work);
		kfree(r->e_alloc);
		kfree(r->e_dealloc);
		kfree(r);
	}
 out:
	client_put(client);
}

static void release_iso_resource(struct client *client,
				 struct client_resource *resource)
{
	struct iso_resource *r =
		container_of(resource, struct iso_resource, resource);

	spin_lock_irq(&client->lock);
	r->todo = ISO_RES_DEALLOC;
1137
	schedule_iso_resource(r, 0);
1138 1139 1140
	spin_unlock_irq(&client->lock);
}

1141 1142
static int init_iso_resource(struct client *client,
		struct fw_cdev_allocate_iso_resource *request, int todo)
1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162
{
	struct iso_resource_event *e1, *e2;
	struct iso_resource *r;
	int ret;

	if ((request->channels == 0 && request->bandwidth == 0) ||
	    request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
	    request->bandwidth < 0)
		return -EINVAL;

	r  = kmalloc(sizeof(*r), GFP_KERNEL);
	e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
	e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
	if (r == NULL || e1 == NULL || e2 == NULL) {
		ret = -ENOMEM;
		goto fail;
	}

	INIT_DELAYED_WORK(&r->work, iso_resource_work);
	r->client	= client;
1163
	r->todo		= todo;
1164 1165 1166 1167 1168 1169
	r->generation	= -1;
	r->channels	= request->channels;
	r->bandwidth	= request->bandwidth;
	r->e_alloc	= e1;
	r->e_dealloc	= e2;

1170 1171 1172 1173
	e1->iso_resource.closure = request->closure;
	e1->iso_resource.type    = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
	e2->iso_resource.closure = request->closure;
	e2->iso_resource.type    = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
1174

1175 1176 1177
	if (todo == ISO_RES_ALLOC) {
		r->resource.release = release_iso_resource;
		ret = add_client_resource(client, &r->resource, GFP_KERNEL);
1178 1179
		if (ret < 0)
			goto fail;
1180 1181 1182
	} else {
		r->resource.release = NULL;
		r->resource.handle = -1;
1183
		schedule_iso_resource(r, 0);
1184
	}
1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195
	request->handle = r->resource.handle;

	return 0;
 fail:
	kfree(r);
	kfree(e1);
	kfree(e2);

	return ret;
}

1196 1197 1198 1199 1200 1201 1202
static int ioctl_allocate_iso_resource(struct client *client, void *buffer)
{
	struct fw_cdev_allocate_iso_resource *request = buffer;

	return init_iso_resource(client, request, ISO_RES_ALLOC);
}

1203 1204 1205 1206 1207 1208 1209 1210
static int ioctl_deallocate_iso_resource(struct client *client, void *buffer)
{
	struct fw_cdev_deallocate *request = buffer;

	return release_client_resource(client, request->handle,
				       release_iso_resource, NULL);
}

1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224
static int ioctl_allocate_iso_resource_once(struct client *client, void *buffer)
{
	struct fw_cdev_allocate_iso_resource *request = buffer;

	return init_iso_resource(client, request, ISO_RES_ALLOC_ONCE);
}

static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffer)
{
	struct fw_cdev_allocate_iso_resource *request = buffer;

	return init_iso_resource(client, request, ISO_RES_DEALLOC_ONCE);
}

1225 1226 1227 1228 1229
/*
 * Returns a speed code:  Maximum speed to or from this device,
 * limited by the device's link speed, the local node's link speed,
 * and all PHY port speeds between the two links.
 */
1230 1231
static int ioctl_get_speed(struct client *client, void *buffer)
{
1232
	return client->device->max_speed;
1233 1234
}

1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246
static int ioctl_send_broadcast_request(struct client *client, void *buffer)
{
	struct fw_cdev_send_request *request = buffer;

	switch (request->tcode) {
	case TCODE_WRITE_QUADLET_REQUEST:
	case TCODE_WRITE_BLOCK_REQUEST:
		break;
	default:
		return -EINVAL;
	}

1247 1248 1249 1250
	/* Security policy: Only allow accesses to Units Space. */
	if (request->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
		return -EACCES;

1251 1252 1253
	return init_request(client, request, LOCAL_BUS | 0x3f, SCODE_100);
}

1254 1255
static int ioctl_send_stream_packet(struct client *client, void *buffer)
{
1256 1257 1258
	struct fw_cdev_send_stream_packet *p = buffer;
	struct fw_cdev_send_request request;
	int dest;
1259

1260 1261 1262
	if (p->speed > client->device->card->link_speed ||
	    p->length > 1024 << p->speed)
		return -EIO;
1263

1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274
	if (p->tag > 3 || p->channel > 63 || p->sy > 15)
		return -EINVAL;

	dest = fw_stream_packet_destination_id(p->tag, p->channel, p->sy);
	request.tcode		= TCODE_STREAM_DATA;
	request.length		= p->length;
	request.closure		= p->closure;
	request.data		= p->data;
	request.generation	= p->generation;

	return init_request(client, &request, dest, p->speed);
1275 1276
}

1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289
static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
	ioctl_get_info,
	ioctl_send_request,
	ioctl_allocate,
	ioctl_deallocate,
	ioctl_send_response,
	ioctl_initiate_bus_reset,
	ioctl_add_descriptor,
	ioctl_remove_descriptor,
	ioctl_create_iso_context,
	ioctl_queue_iso,
	ioctl_start_iso,
	ioctl_stop_iso,
1290
	ioctl_get_cycle_timer,
1291 1292
	ioctl_allocate_iso_resource,
	ioctl_deallocate_iso_resource,
1293 1294
	ioctl_allocate_iso_resource_once,
	ioctl_deallocate_iso_resource_once,
1295
	ioctl_get_speed,
1296
	ioctl_send_broadcast_request,
1297
	ioctl_send_stream_packet,
1298 1299
};

1300 1301
static int dispatch_ioctl(struct client *client,
			  unsigned int cmd, void __user *arg)
1302
{
1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319
	char buffer[sizeof(union {
		struct fw_cdev_get_info			_00;
		struct fw_cdev_send_request		_01;
		struct fw_cdev_allocate			_02;
		struct fw_cdev_deallocate		_03;
		struct fw_cdev_send_response		_04;
		struct fw_cdev_initiate_bus_reset	_05;
		struct fw_cdev_add_descriptor		_06;
		struct fw_cdev_remove_descriptor	_07;
		struct fw_cdev_create_iso_context	_08;
		struct fw_cdev_queue_iso		_09;
		struct fw_cdev_start_iso		_0a;
		struct fw_cdev_stop_iso			_0b;
		struct fw_cdev_get_cycle_timer		_0c;
		struct fw_cdev_allocate_iso_resource	_0d;
		struct fw_cdev_send_stream_packet	_13;
	})];
1320
	int ret;
1321 1322 1323

	if (_IOC_TYPE(cmd) != '#' ||
	    _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
1324
		return -EINVAL;
1325 1326

	if (_IOC_DIR(cmd) & _IOC_WRITE) {
1327
		if (_IOC_SIZE(cmd) > sizeof(buffer) ||
1328 1329 1330 1331
		    copy_from_user(buffer, arg, _IOC_SIZE(cmd)))
			return -EFAULT;
	}

1332 1333 1334
	ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
	if (ret < 0)
		return ret;
1335 1336

	if (_IOC_DIR(cmd) & _IOC_READ) {
1337
		if (_IOC_SIZE(cmd) > sizeof(buffer) ||
1338 1339
		    copy_to_user(arg, buffer, _IOC_SIZE(cmd)))
			return -EFAULT;
1340
	}
1341

1342
	return ret;
1343 1344
}

1345 1346
static long fw_device_op_ioctl(struct file *file,
			       unsigned int cmd, unsigned long arg)
1347 1348 1349
{
	struct client *client = file->private_data;

1350 1351 1352
	if (fw_device_is_shutdown(client->device))
		return -ENODEV;

1353 1354 1355 1356
	return dispatch_ioctl(client, cmd, (void __user *) arg);
}

#ifdef CONFIG_COMPAT
1357 1358
static long fw_device_op_compat_ioctl(struct file *file,
				      unsigned int cmd, unsigned long arg)
1359 1360 1361
{
	struct client *client = file->private_data;

1362 1363 1364
	if (fw_device_is_shutdown(client->device))
		return -ENODEV;

1365 1366 1367 1368 1369 1370 1371
	return dispatch_ioctl(client, cmd, compat_ptr(arg));
}
#endif

static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
{
	struct client *client = file->private_data;
1372 1373
	enum dma_data_direction direction;
	unsigned long size;
1374
	int page_count, ret;
1375

1376 1377 1378
	if (fw_device_is_shutdown(client->device))
		return -ENODEV;

1379 1380 1381 1382 1383 1384
	/* FIXME: We could support multiple buffers, but we don't. */
	if (client->buffer.pages != NULL)
		return -EBUSY;

	if (!(vma->vm_flags & VM_SHARED))
		return -EINVAL;
1385

1386
	if (vma->vm_start & ~PAGE_MASK)
1387 1388 1389
		return -EINVAL;

	client->vm_start = vma->vm_start;
1390 1391 1392 1393 1394 1395 1396 1397 1398 1399
	size = vma->vm_end - vma->vm_start;
	page_count = size >> PAGE_SHIFT;
	if (size & ~PAGE_MASK)
		return -EINVAL;

	if (vma->vm_flags & VM_WRITE)
		direction = DMA_TO_DEVICE;
	else
		direction = DMA_FROM_DEVICE;

1400 1401 1402 1403
	ret = fw_iso_buffer_init(&client->buffer, client->device->card,
				 page_count, direction);
	if (ret < 0)
		return ret;
1404

1405 1406
	ret = fw_iso_buffer_map(&client->buffer, vma);
	if (ret < 0)
1407 1408
		fw_iso_buffer_destroy(&client->buffer, client->device->card);

1409
	return ret;
1410 1411
}

1412 1413
static int shutdown_resource(int id, void *p, void *data)
{
1414
	struct client_resource *resource = p;
1415 1416
	struct client *client = data;

1417
	resource->release(client, resource);
1418
	client_put(client);
1419 1420 1421 1422

	return 0;
}

1423 1424 1425
static int fw_device_op_release(struct inode *inode, struct file *file)
{
	struct client *client = file->private_data;
1426
	struct event *event, *next_event;
1427

1428 1429 1430 1431
	mutex_lock(&client->device->client_list_mutex);
	list_del(&client->link);
	mutex_unlock(&client->device->client_list_mutex);

1432 1433 1434
	if (client->iso_context)
		fw_iso_context_destroy(client->iso_context);

1435 1436 1437
	if (client->buffer.pages)
		fw_iso_buffer_destroy(&client->buffer, client->device->card);

1438
	/* Freeze client->resource_idr and client->event_list */
1439
	spin_lock_irq(&client->lock);
1440
	client->in_shutdown = true;
1441
	spin_unlock_irq(&client->lock);
1442

1443 1444 1445
	idr_for_each(&client->resource_idr, shutdown_resource, client);
	idr_remove_all(&client->resource_idr);
	idr_destroy(&client->resource_idr);
1446

1447 1448
	list_for_each_entry_safe(event, next_event, &client->event_list, link)
		kfree(event);
1449

1450
	client_put(client);
1451 1452 1453 1454 1455 1456 1457

	return 0;
}

static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
{
	struct client *client = file->private_data;
1458
	unsigned int mask = 0;
1459 1460 1461

	poll_wait(file, &client->wait, pt);

1462 1463
	if (fw_device_is_shutdown(client->device))
		mask |= POLLHUP | POLLERR;
1464
	if (!list_empty(&client->event_list))
1465 1466 1467
		mask |= POLLIN | POLLRDNORM;

	return mask;
1468 1469
}

1470
const struct file_operations fw_device_ops = {
1471 1472 1473 1474 1475 1476 1477 1478 1479
	.owner		= THIS_MODULE,
	.open		= fw_device_op_open,
	.read		= fw_device_op_read,
	.unlocked_ioctl	= fw_device_op_ioctl,
	.poll		= fw_device_op_poll,
	.release	= fw_device_op_release,
	.mmap		= fw_device_op_mmap,

#ifdef CONFIG_COMPAT
1480
	.compat_ioctl	= fw_device_op_compat_ioctl,
1481 1482
#endif
};