core-cdev.c 36.3 KB
Newer Older
1 2
/*
 * Char device for device raw access
3
 *
4
 * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software Foundation,
 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 */

S
Stefan Richter 已提交
21 22 23 24
#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
25
#include <linux/firewire.h>
S
Stefan Richter 已提交
26 27
#include <linux/firewire-cdev.h>
#include <linux/idr.h>
28
#include <linux/jiffies.h>
29
#include <linux/kernel.h>
30
#include <linux/kref.h>
S
Stefan Richter 已提交
31 32
#include <linux/mm.h>
#include <linux/module.h>
33
#include <linux/mutex.h>
34
#include <linux/poll.h>
35
#include <linux/preempt.h>
J
Jay Fenlason 已提交
36
#include <linux/spinlock.h>
S
Stefan Richter 已提交
37 38 39
#include <linux/time.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
40
#include <linux/workqueue.h>
S
Stefan Richter 已提交
41

42
#include <asm/system.h>
43
#include <asm/uaccess.h>
S
Stefan Richter 已提交
44

45
#include "core.h"
46 47

struct client {
48
	u32 version;
49
	struct fw_device *device;
50

51
	spinlock_t lock;
52 53
	bool in_shutdown;
	struct idr resource_idr;
54 55
	struct list_head event_list;
	wait_queue_head_t wait;
56
	u64 bus_reset_closure;
57

58
	struct fw_iso_context *iso_context;
59
	u64 iso_closure;
60 61
	struct fw_iso_buffer buffer;
	unsigned long vm_start;
62 63

	struct list_head link;
64
	struct kref kref;
65 66
};

67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
static inline void client_get(struct client *client)
{
	kref_get(&client->kref);
}

static void client_release(struct kref *kref)
{
	struct client *client = container_of(kref, struct client, kref);

	fw_device_put(client->device);
	kfree(client);
}

static void client_put(struct client *client)
{
	kref_put(&client->kref, client_release);
}

85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
struct client_resource;
typedef void (*client_resource_release_fn_t)(struct client *,
					     struct client_resource *);
struct client_resource {
	client_resource_release_fn_t release;
	int handle;
};

struct address_handler_resource {
	struct client_resource resource;
	struct fw_address_handler handler;
	__u64 closure;
	struct client *client;
};

struct outbound_transaction_resource {
	struct client_resource resource;
	struct fw_transaction transaction;
};

struct inbound_transaction_resource {
	struct client_resource resource;
	struct fw_request *request;
	void *data;
	size_t length;
};

struct descriptor_resource {
	struct client_resource resource;
	struct fw_descriptor descriptor;
	u32 data[0];
};

118 119 120 121 122
struct iso_resource {
	struct client_resource resource;
	struct client *client;
	/* Schedule work and access todo only with client->lock held. */
	struct delayed_work work;
123 124
	enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
	      ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
125 126 127 128 129 130
	int generation;
	u64 channels;
	s32 bandwidth;
	struct iso_resource_event *e_alloc, *e_dealloc;
};

131
static void schedule_iso_resource(struct iso_resource *);
132 133
static void release_iso_resource(struct client *, struct client_resource *);

134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
/*
 * dequeue_event() just kfree()'s the event, so the event has to be
 * the first field in a struct XYZ_event.
 */
struct event {
	struct { void *data; size_t size; } v[2];
	struct list_head link;
};

struct bus_reset_event {
	struct event event;
	struct fw_cdev_event_bus_reset reset;
};

struct outbound_transaction_event {
	struct event event;
	struct client *client;
	struct outbound_transaction_resource r;
	struct fw_cdev_event_response response;
};

struct inbound_transaction_event {
	struct event event;
	struct fw_cdev_event_request request;
};

struct iso_interrupt_event {
	struct event event;
	struct fw_cdev_event_iso_interrupt interrupt;
};

165 166 167 168 169
struct iso_resource_event {
	struct event event;
	struct fw_cdev_event_iso_resource resource;
};

170
static inline void __user *u64_to_uptr(__u64 value)
171 172 173 174
{
	return (void __user *)(unsigned long)value;
}

175
static inline __u64 uptr_to_u64(void __user *ptr)
176 177 178 179 180 181 182 183 184
{
	return (__u64)(unsigned long)ptr;
}

static int fw_device_op_open(struct inode *inode, struct file *file)
{
	struct fw_device *device;
	struct client *client;

185
	device = fw_device_get_by_devt(inode->i_rdev);
186 187
	if (device == NULL)
		return -ENODEV;
188

189 190 191 192 193
	if (fw_device_is_shutdown(device)) {
		fw_device_put(device);
		return -ENODEV;
	}

194
	client = kzalloc(sizeof(*client), GFP_KERNEL);
195 196
	if (client == NULL) {
		fw_device_put(device);
197
		return -ENOMEM;
198
	}
199

200
	client->device = device;
201
	spin_lock_init(&client->lock);
202 203
	idr_init(&client->resource_idr);
	INIT_LIST_HEAD(&client->event_list);
204
	init_waitqueue_head(&client->wait);
205
	kref_init(&client->kref);
206 207 208

	file->private_data = client;

209
	mutex_lock(&device->client_list_mutex);
210
	list_add_tail(&client->link, &device->client_list);
211
	mutex_unlock(&device->client_list_mutex);
212

213 214 215 216 217 218 219 220 221 222 223 224 225 226
	return 0;
}

static void queue_event(struct client *client, struct event *event,
			void *data0, size_t size0, void *data1, size_t size1)
{
	unsigned long flags;

	event->v[0].data = data0;
	event->v[0].size = size0;
	event->v[1].data = data1;
	event->v[1].size = size1;

	spin_lock_irqsave(&client->lock, flags);
227 228 229 230
	if (client->in_shutdown)
		kfree(event);
	else
		list_add_tail(&event->link, &client->event_list);
231
	spin_unlock_irqrestore(&client->lock, flags);
232 233

	wake_up_interruptible(&client->wait);
234 235
}

236 237
static int dequeue_event(struct client *client,
			 char __user *buffer, size_t count)
238 239 240
{
	struct event *event;
	size_t size, total;
241
	int i, ret;
242

243 244 245 246 247
	ret = wait_event_interruptible(client->wait,
			!list_empty(&client->event_list) ||
			fw_device_is_shutdown(client->device));
	if (ret < 0)
		return ret;
248

249 250 251
	if (list_empty(&client->event_list) &&
		       fw_device_is_shutdown(client->device))
		return -ENODEV;
252

253
	spin_lock_irq(&client->lock);
254
	event = list_first_entry(&client->event_list, struct event, link);
255
	list_del(&event->link);
256
	spin_unlock_irq(&client->lock);
257 258 259 260

	total = 0;
	for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
		size = min(event->v[i].size, count - total);
261
		if (copy_to_user(buffer + total, event->v[i].data, size)) {
262
			ret = -EFAULT;
263
			goto out;
264
		}
265 266
		total += size;
	}
267
	ret = total;
268 269 270 271

 out:
	kfree(event);

272
	return ret;
273 274
}

275 276
static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
				 size_t count, loff_t *offset)
277 278 279 280 281 282
{
	struct client *client = file->private_data;

	return dequeue_event(client, buffer, count);
}

283 284
static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
				 struct client *client)
285
{
286
	struct fw_card *card = client->device->card;
J
Jay Fenlason 已提交
287

288
	spin_lock_irq(&card->lock);
289

290
	event->closure	     = client->bus_reset_closure;
291
	event->type          = FW_CDEV_EVENT_BUS_RESET;
292
	event->generation    = client->device->generation;
293
	event->node_id       = client->device->node_id;
294 295 296 297
	event->local_node_id = card->local_node->node_id;
	event->bm_node_id    = 0; /* FIXME: We don't track the BM. */
	event->irm_node_id   = card->irm_node->node_id;
	event->root_node_id  = card->root_node->node_id;
J
Jay Fenlason 已提交
298

299
	spin_unlock_irq(&card->lock);
300 301
}

302 303
static void for_each_client(struct fw_device *device,
			    void (*callback)(struct client *client))
304 305 306
{
	struct client *c;

307
	mutex_lock(&device->client_list_mutex);
308 309
	list_for_each_entry(c, &device->client_list, link)
		callback(c);
310
	mutex_unlock(&device->client_list_mutex);
311 312
}

313 314 315 316 317 318 319 320 321 322
static int schedule_reallocations(int id, void *p, void *data)
{
	struct client_resource *r = p;

	if (r->release == release_iso_resource)
		schedule_iso_resource(container_of(r,
					struct iso_resource, resource));
	return 0;
}

323
static void queue_bus_reset_event(struct client *client)
324
{
325
	struct bus_reset_event *e;
326

327 328
	e = kzalloc(sizeof(*e), GFP_KERNEL);
	if (e == NULL) {
329 330 331 332
		fw_notify("Out of memory when allocating bus reset event\n");
		return;
	}

333
	fill_bus_reset_event(&e->reset, client);
334

335 336
	queue_event(client, &e->event,
		    &e->reset, sizeof(e->reset), NULL, 0);
337 338 339 340

	spin_lock_irq(&client->lock);
	idr_for_each(&client->resource_idr, schedule_reallocations, client);
	spin_unlock_irq(&client->lock);
341 342 343 344
}

void fw_device_cdev_update(struct fw_device *device)
{
345 346
	for_each_client(device, queue_bus_reset_event);
}
347

348 349 350 351
static void wake_up_client(struct client *client)
{
	wake_up_interruptible(&client->wait);
}
352

353 354 355
void fw_device_cdev_remove(struct fw_device *device)
{
	for_each_client(device, wake_up_client);
356 357
}

358
static int ioctl_get_info(struct client *client, void *buffer)
359
{
360
	struct fw_cdev_get_info *get_info = buffer;
361
	struct fw_cdev_event_bus_reset bus_reset;
362
	unsigned long ret = 0;
363

364 365
	client->version = get_info->version;
	get_info->version = FW_CDEV_VERSION;
J
Jay Fenlason 已提交
366
	get_info->card = client->device->card->index;
367

368 369
	down_read(&fw_device_rwsem);

370 371 372
	if (get_info->rom != 0) {
		void __user *uptr = u64_to_uptr(get_info->rom);
		size_t want = get_info->rom_length;
373
		size_t have = client->device->config_rom_length * 4;
374

375 376
		ret = copy_to_user(uptr, client->device->config_rom,
				   min(want, have));
377
	}
378
	get_info->rom_length = client->device->config_rom_length * 4;
379

380 381 382 383 384
	up_read(&fw_device_rwsem);

	if (ret != 0)
		return -EFAULT;

385 386 387
	client->bus_reset_closure = get_info->bus_reset_closure;
	if (get_info->bus_reset != 0) {
		void __user *uptr = u64_to_uptr(get_info->bus_reset);
388

389
		fill_bus_reset_event(&bus_reset, client);
390
		if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
391 392
			return -EFAULT;
	}
393 394 395 396

	return 0;
}

397 398
static int add_client_resource(struct client *client,
			       struct client_resource *resource, gfp_t gfp_mask)
399 400
{
	unsigned long flags;
401 402 403 404 405
	int ret;

 retry:
	if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
		return -ENOMEM;
406 407

	spin_lock_irqsave(&client->lock, flags);
408 409 410 411 412
	if (client->in_shutdown)
		ret = -ECANCELED;
	else
		ret = idr_get_new(&client->resource_idr, resource,
				  &resource->handle);
413
	if (ret >= 0) {
414
		client_get(client);
415 416 417 418
		if (resource->release == release_iso_resource)
			schedule_iso_resource(container_of(resource,
						struct iso_resource, resource));
	}
419
	spin_unlock_irqrestore(&client->lock, flags);
420 421 422 423 424

	if (ret == -EAGAIN)
		goto retry;

	return ret < 0 ? ret : 0;
425 426
}

427 428 429
static int release_client_resource(struct client *client, u32 handle,
				   client_resource_release_fn_t release,
				   struct client_resource **resource)
430 431 432
{
	struct client_resource *r;

433
	spin_lock_irq(&client->lock);
434 435 436 437 438 439
	if (client->in_shutdown)
		r = NULL;
	else
		r = idr_find(&client->resource_idr, handle);
	if (r && r->release == release)
		idr_remove(&client->resource_idr, handle);
440
	spin_unlock_irq(&client->lock);
441

442
	if (!(r && r->release == release))
443 444 445 446 447 448 449
		return -EINVAL;

	if (resource)
		*resource = r;
	else
		r->release(client, r);

450 451
	client_put(client);

452 453 454
	return 0;
}

455 456
static void release_transaction(struct client *client,
				struct client_resource *resource)
457
{
458 459
	struct outbound_transaction_resource *r = container_of(resource,
			struct outbound_transaction_resource, resource);
460

461
	fw_cancel_transaction(client->device->card, &r->transaction);
462 463
}

464 465
static void complete_transaction(struct fw_card *card, int rcode,
				 void *payload, size_t length, void *data)
466
{
467 468 469
	struct outbound_transaction_event *e = data;
	struct fw_cdev_event_response *rsp = &e->response;
	struct client *client = e->client;
470
	unsigned long flags;
471

472 473
	if (length < rsp->length)
		rsp->length = length;
474
	if (rcode == RCODE_COMPLETE)
475
		memcpy(rsp->data, payload, rsp->length);
476

477
	spin_lock_irqsave(&client->lock, flags);
478
	/*
479 480 481 482 483 484 485 486
	 * 1. If called while in shutdown, the idr tree must be left untouched.
	 *    The idr handle will be removed and the client reference will be
	 *    dropped later.
	 * 2. If the call chain was release_client_resource ->
	 *    release_transaction -> complete_transaction (instead of a normal
	 *    conclusion of the transaction), i.e. if this resource was already
	 *    unregistered from the idr, the client reference will be dropped
	 *    by release_client_resource and we must not drop it here.
487
	 */
488
	if (!client->in_shutdown &&
489 490
	    idr_find(&client->resource_idr, e->r.resource.handle)) {
		idr_remove(&client->resource_idr, e->r.resource.handle);
491 492 493
		/* Drop the idr's reference */
		client_put(client);
	}
494 495
	spin_unlock_irqrestore(&client->lock, flags);

496 497
	rsp->type = FW_CDEV_EVENT_RESPONSE;
	rsp->rcode = rcode;
498 499

	/*
500
	 * In the case that sizeof(*rsp) doesn't align with the position of the
501 502 503 504 505
	 * data, and the read is short, preserve an extra copy of the data
	 * to stay compatible with a pre-2.6.27 bug.  Since the bug is harmless
	 * for short reads and some apps depended on it, this is both safe
	 * and prudent for compatibility.
	 */
506 507 508
	if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
		queue_event(client, &e->event, rsp, sizeof(*rsp),
			    rsp->data, rsp->length);
509
	else
510
		queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
511
			    NULL, 0);
512 513 514

	/* Drop the transaction callback's reference */
	client_put(client);
515 516
}

517 518 519
static int init_request(struct client *client,
			struct fw_cdev_send_request *request,
			int destination_id, int speed)
520
{
521
	struct outbound_transaction_event *e;
522
	int ret;
523

524 525
	if (request->tcode != TCODE_STREAM_DATA &&
	    (request->length > 4096 || request->length > 512 << speed))
526
		return -EIO;
527

528 529
	e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
	if (e == NULL)
530 531
		return -ENOMEM;

532 533 534
	e->client = client;
	e->response.length = request->length;
	e->response.closure = request->closure;
535

536
	if (request->data &&
537
	    copy_from_user(e->response.data,
538
			   u64_to_uptr(request->data), request->length)) {
539
		ret = -EFAULT;
540
		goto failed;
541 542
	}

543 544
	e->r.resource.release = release_transaction;
	ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
545 546
	if (ret < 0)
		goto failed;
547

548 549 550
	/* Get a reference for the transaction callback */
	client_get(client);

551
	fw_send_request(client->device->card, &e->r.transaction,
552 553 554 555
			request->tcode, destination_id, request->generation,
			speed, request->offset, e->response.data,
			request->length, complete_transaction, e);
	return 0;
556

557
 failed:
558
	kfree(e);
559 560

	return ret;
561 562
}

563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583
static int ioctl_send_request(struct client *client, void *buffer)
{
	struct fw_cdev_send_request *request = buffer;

	switch (request->tcode) {
	case TCODE_WRITE_QUADLET_REQUEST:
	case TCODE_WRITE_BLOCK_REQUEST:
	case TCODE_READ_QUADLET_REQUEST:
	case TCODE_READ_BLOCK_REQUEST:
	case TCODE_LOCK_MASK_SWAP:
	case TCODE_LOCK_COMPARE_SWAP:
	case TCODE_LOCK_FETCH_ADD:
	case TCODE_LOCK_LITTLE_ADD:
	case TCODE_LOCK_BOUNDED_ADD:
	case TCODE_LOCK_WRAP_ADD:
	case TCODE_LOCK_VENDOR_DEPENDENT:
		break;
	default:
		return -EINVAL;
	}

584
	return init_request(client, request, client->device->node_id,
585 586 587
			    client->device->max_speed);
}

588 589
static void release_request(struct client *client,
			    struct client_resource *resource)
590
{
591 592
	struct inbound_transaction_resource *r = container_of(resource,
			struct inbound_transaction_resource, resource);
593

594
	fw_send_response(client->device->card, r->request,
595
			 RCODE_CONFLICT_ERROR);
596
	kfree(r);
597 598
}

599
static void handle_request(struct fw_card *card, struct fw_request *request,
600 601 602 603
			   int tcode, int destination, int source,
			   int generation, int speed,
			   unsigned long long offset,
			   void *payload, size_t length, void *callback_data)
604
{
605 606 607
	struct address_handler_resource *handler = callback_data;
	struct inbound_transaction_resource *r;
	struct inbound_transaction_event *e;
608
	int ret;
609

610
	r = kmalloc(sizeof(*r), GFP_ATOMIC);
611
	e = kmalloc(sizeof(*e), GFP_ATOMIC);
612
	if (r == NULL || e == NULL)
613
		goto failed;
614

615 616 617
	r->request = request;
	r->data    = payload;
	r->length  = length;
618

619 620
	r->resource.release = release_request;
	ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
621 622
	if (ret < 0)
		goto failed;
623 624 625 626 627

	e->request.type    = FW_CDEV_EVENT_REQUEST;
	e->request.tcode   = tcode;
	e->request.offset  = offset;
	e->request.length  = length;
628
	e->request.handle  = r->resource.handle;
629 630
	e->request.closure = handler->closure;

631
	queue_event(handler->client, &e->event,
632
		    &e->request, sizeof(e->request), payload, length);
633 634 635
	return;

 failed:
636
	kfree(r);
637
	kfree(e);
638
	fw_send_response(card, request, RCODE_CONFLICT_ERROR);
639 640
}

641 642
static void release_address_handler(struct client *client,
				    struct client_resource *resource)
643
{
644 645
	struct address_handler_resource *r =
	    container_of(resource, struct address_handler_resource, resource);
646

647 648
	fw_core_remove_address_handler(&r->handler);
	kfree(r);
649 650
}

651
static int ioctl_allocate(struct client *client, void *buffer)
652
{
653
	struct fw_cdev_allocate *request = buffer;
654
	struct address_handler_resource *r;
655
	struct fw_address_region region;
656
	int ret;
657

658 659
	r = kmalloc(sizeof(*r), GFP_KERNEL);
	if (r == NULL)
660 661
		return -ENOMEM;

662 663
	region.start = request->offset;
	region.end = request->offset + request->length;
664 665 666 667 668
	r->handler.length = request->length;
	r->handler.address_callback = handle_request;
	r->handler.callback_data = r;
	r->closure = request->closure;
	r->client = client;
669

670
	ret = fw_core_add_address_handler(&r->handler, &region);
671
	if (ret < 0) {
672
		kfree(r);
673
		return ret;
674 675
	}

676 677
	r->resource.release = release_address_handler;
	ret = add_client_resource(client, &r->resource, GFP_KERNEL);
678
	if (ret < 0) {
679
		release_address_handler(client, &r->resource);
680 681
		return ret;
	}
682
	request->handle = r->resource.handle;
683 684 685 686

	return 0;
}

687
static int ioctl_deallocate(struct client *client, void *buffer)
688
{
689
	struct fw_cdev_deallocate *request = buffer;
690

691 692
	return release_client_resource(client, request->handle,
				       release_address_handler, NULL);
693 694
}

695
static int ioctl_send_response(struct client *client, void *buffer)
696
{
697
	struct fw_cdev_send_response *request = buffer;
698
	struct client_resource *resource;
699
	struct inbound_transaction_resource *r;
700

701 702
	if (release_client_resource(client, request->handle,
				    release_request, &resource) < 0)
703
		return -EINVAL;
704

705 706
	r = container_of(resource, struct inbound_transaction_resource,
			 resource);
707 708 709
	if (request->length < r->length)
		r->length = request->length;
	if (copy_from_user(r->data, u64_to_uptr(request->data), r->length))
710 711
		return -EFAULT;

712
	fw_send_response(client->device->card, r->request, request->rcode);
713 714 715 716 717
	kfree(r);

	return 0;
}

718
static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
719
{
720
	struct fw_cdev_initiate_bus_reset *request = buffer;
721 722
	int short_reset;

723
	short_reset = (request->type == FW_CDEV_SHORT_RESET);
724 725 726 727

	return fw_core_initiate_bus_reset(client->device->card, short_reset);
}

728 729 730
static void release_descriptor(struct client *client,
			       struct client_resource *resource)
{
731 732
	struct descriptor_resource *r =
		container_of(resource, struct descriptor_resource, resource);
733

734 735
	fw_core_remove_descriptor(&r->descriptor);
	kfree(r);
736 737
}

738
static int ioctl_add_descriptor(struct client *client, void *buffer)
739
{
740
	struct fw_cdev_add_descriptor *request = buffer;
741
	struct descriptor_resource *r;
742
	int ret;
743

744
	/* Access policy: Allow this ioctl only on local nodes' device files. */
745
	if (!client->device->is_local)
746 747
		return -ENOSYS;

748
	if (request->length > 256)
749 750
		return -EINVAL;

751 752
	r = kmalloc(sizeof(*r) + request->length * 4, GFP_KERNEL);
	if (r == NULL)
753 754
		return -ENOMEM;

755
	if (copy_from_user(r->data,
756
			   u64_to_uptr(request->data), request->length * 4)) {
757 758
		ret = -EFAULT;
		goto failed;
759 760
	}

761 762 763 764
	r->descriptor.length    = request->length;
	r->descriptor.immediate = request->immediate;
	r->descriptor.key       = request->key;
	r->descriptor.data      = r->data;
765

766
	ret = fw_core_add_descriptor(&r->descriptor);
767 768
	if (ret < 0)
		goto failed;
769

770 771
	r->resource.release = release_descriptor;
	ret = add_client_resource(client, &r->resource, GFP_KERNEL);
772
	if (ret < 0) {
773
		fw_core_remove_descriptor(&r->descriptor);
774 775
		goto failed;
	}
776
	request->handle = r->resource.handle;
777 778

	return 0;
779
 failed:
780
	kfree(r);
781 782

	return ret;
783 784
}

785
static int ioctl_remove_descriptor(struct client *client, void *buffer)
786
{
787
	struct fw_cdev_remove_descriptor *request = buffer;
788

789 790
	return release_client_resource(client, request->handle,
				       release_descriptor, NULL);
791 792
}

793 794
static void iso_callback(struct fw_iso_context *context, u32 cycle,
			 size_t header_length, void *header, void *data)
795 796
{
	struct client *client = data;
797
	struct iso_interrupt_event *e;
798

799 800
	e = kzalloc(sizeof(*e) + header_length, GFP_ATOMIC);
	if (e == NULL)
801 802
		return;

803 804 805 806 807 808 809
	e->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT;
	e->interrupt.closure   = client->iso_closure;
	e->interrupt.cycle     = cycle;
	e->interrupt.header_length = header_length;
	memcpy(e->interrupt.header, header, header_length);
	queue_event(client, &e->event, &e->interrupt,
		    sizeof(e->interrupt) + header_length, NULL, 0);
810 811
}

812
static int ioctl_create_iso_context(struct client *client, void *buffer)
813
{
814
	struct fw_cdev_create_iso_context *request = buffer;
815
	struct fw_iso_context *context;
816

817 818 819 820
	/* We only support one context at this time. */
	if (client->iso_context != NULL)
		return -EBUSY;

821
	if (request->channel > 63)
822 823
		return -EINVAL;

824
	switch (request->type) {
825
	case FW_ISO_CONTEXT_RECEIVE:
826
		if (request->header_size < 4 || (request->header_size & 3))
827
			return -EINVAL;
828

829 830 831
		break;

	case FW_ISO_CONTEXT_TRANSMIT:
832
		if (request->speed > SCODE_3200)
833 834 835 836 837
			return -EINVAL;

		break;

	default:
838
		return -EINVAL;
839 840
	}

841 842 843 844 845 846 847 848 849
	context =  fw_iso_context_create(client->device->card,
					 request->type,
					 request->channel,
					 request->speed,
					 request->header_size,
					 iso_callback, client);
	if (IS_ERR(context))
		return PTR_ERR(context);

850
	client->iso_closure = request->closure;
851
	client->iso_context = context;
852

853 854 855
	/* We only support one context at this time. */
	request->handle = 0;

856 857 858
	return 0;
}

859 860 861 862
/* Macros for decoding the iso packet control header. */
#define GET_PAYLOAD_LENGTH(v)	((v) & 0xffff)
#define GET_INTERRUPT(v)	(((v) >> 16) & 0x01)
#define GET_SKIP(v)		(((v) >> 17) & 0x01)
863 864
#define GET_TAG(v)		(((v) >> 18) & 0x03)
#define GET_SY(v)		(((v) >> 20) & 0x0f)
865 866
#define GET_HEADER_LENGTH(v)	(((v) >> 24) & 0xff)

867
static int ioctl_queue_iso(struct client *client, void *buffer)
868
{
869
	struct fw_cdev_queue_iso *request = buffer;
870
	struct fw_cdev_iso_packet __user *p, *end, *next;
871
	struct fw_iso_context *ctx = client->iso_context;
872
	unsigned long payload, buffer_end, header_length;
873
	u32 control;
874 875 876 877 878 879
	int count;
	struct {
		struct fw_iso_packet packet;
		u8 header[256];
	} u;

880
	if (ctx == NULL || request->handle != 0)
881 882
		return -EINVAL;

883 884
	/*
	 * If the user passes a non-NULL data pointer, has mmap()'ed
885 886
	 * the iso buffer, and the pointer points inside the buffer,
	 * we setup the payload pointers accordingly.  Otherwise we
887
	 * set them both to 0, which will still let packets with
888 889
	 * payload_length == 0 through.  In other words, if no packets
	 * use the indirect payload, the iso buffer need not be mapped
890 891
	 * and the request->data pointer is ignored.
	 */
892

893
	payload = (unsigned long)request->data - client->vm_start;
894
	buffer_end = client->buffer.page_count << PAGE_SHIFT;
895
	if (request->data == 0 || client->buffer.pages == NULL ||
896
	    payload >= buffer_end) {
897
		payload = 0;
898
		buffer_end = 0;
899 900
	}

A
Al Viro 已提交
901 902 903
	p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets);

	if (!access_ok(VERIFY_READ, p, request->size))
904 905
		return -EFAULT;

906
	end = (void __user *)p + request->size;
907 908
	count = 0;
	while (p < end) {
909
		if (get_user(control, &p->control))
910
			return -EFAULT;
911 912 913 914 915 916
		u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
		u.packet.interrupt = GET_INTERRUPT(control);
		u.packet.skip = GET_SKIP(control);
		u.packet.tag = GET_TAG(control);
		u.packet.sy = GET_SY(control);
		u.packet.header_length = GET_HEADER_LENGTH(control);
917

918
		if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
919 920
			header_length = u.packet.header_length;
		} else {
921 922 923 924
			/*
			 * We require that header_length is a multiple of
			 * the fixed header size, ctx->header_size.
			 */
925 926 927 928
			if (ctx->header_size == 0) {
				if (u.packet.header_length > 0)
					return -EINVAL;
			} else if (u.packet.header_length % ctx->header_size != 0) {
929
				return -EINVAL;
930
			}
931 932 933
			header_length = 0;
		}

934
		next = (struct fw_cdev_iso_packet __user *)
935
			&p->header[header_length / 4];
936 937 938
		if (next > end)
			return -EINVAL;
		if (__copy_from_user
939
		    (u.packet.header, p->header, header_length))
940
			return -EFAULT;
941
		if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
942 943
		    u.packet.header_length + u.packet.payload_length > 0)
			return -EINVAL;
944
		if (payload + u.packet.payload_length > buffer_end)
945 946
			return -EINVAL;

947 948
		if (fw_iso_context_queue(ctx, &u.packet,
					 &client->buffer, payload))
949 950 951 952 953 954 955
			break;

		p = next;
		payload += u.packet.payload_length;
		count++;
	}

956 957 958
	request->size    -= uptr_to_u64(p) - request->packets;
	request->packets  = uptr_to_u64(p);
	request->data     = client->vm_start + payload;
959 960 961 962

	return count;
}

963
static int ioctl_start_iso(struct client *client, void *buffer)
964
{
965
	struct fw_cdev_start_iso *request = buffer;
966

967
	if (client->iso_context == NULL || request->handle != 0)
968
		return -EINVAL;
969

970
	if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) {
971
		if (request->tags == 0 || request->tags > 15)
972 973
			return -EINVAL;

974
		if (request->sync > 15)
975 976 977
			return -EINVAL;
	}

978 979
	return fw_iso_context_start(client->iso_context, request->cycle,
				    request->sync, request->tags);
980 981
}

982
static int ioctl_stop_iso(struct client *client, void *buffer)
983
{
984 985
	struct fw_cdev_stop_iso *request = buffer;

986
	if (client->iso_context == NULL || request->handle != 0)
987 988
		return -EINVAL;

989 990 991
	return fw_iso_context_stop(client->iso_context);
}

992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013
static int ioctl_get_cycle_timer(struct client *client, void *buffer)
{
	struct fw_cdev_get_cycle_timer *request = buffer;
	struct fw_card *card = client->device->card;
	unsigned long long bus_time;
	struct timeval tv;
	unsigned long flags;

	preempt_disable();
	local_irq_save(flags);

	bus_time = card->driver->get_bus_time(card);
	do_gettimeofday(&tv);

	local_irq_restore(flags);
	preempt_enable();

	request->local_time = tv.tv_sec * 1000000ULL + tv.tv_usec;
	request->cycle_timer = bus_time & 0xffffffff;
	return 0;
}

1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036
static void iso_resource_work(struct work_struct *work)
{
	struct iso_resource_event *e;
	struct iso_resource *r =
			container_of(work, struct iso_resource, work.work);
	struct client *client = r->client;
	int generation, channel, bandwidth, todo;
	bool skip, free, success;

	spin_lock_irq(&client->lock);
	generation = client->device->generation;
	todo = r->todo;
	/* Allow 1000ms grace period for other reallocations. */
	if (todo == ISO_RES_ALLOC &&
	    time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) {
		if (schedule_delayed_work(&r->work, DIV_ROUND_UP(HZ, 3)))
			client_get(client);
		skip = true;
	} else {
		/* We could be called twice within the same generation. */
		skip = todo == ISO_RES_REALLOC &&
		       r->generation == generation;
	}
1037 1038 1039
	free = todo == ISO_RES_DEALLOC ||
	       todo == ISO_RES_ALLOC_ONCE ||
	       todo == ISO_RES_DEALLOC_ONCE;
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049
	r->generation = generation;
	spin_unlock_irq(&client->lock);

	if (skip)
		goto out;

	bandwidth = r->bandwidth;

	fw_iso_resource_manage(client->device->card, generation,
			r->channels, &channel, &bandwidth,
1050 1051 1052
			todo == ISO_RES_ALLOC ||
			todo == ISO_RES_REALLOC ||
			todo == ISO_RES_ALLOC_ONCE);
1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
	/*
	 * Is this generation outdated already?  As long as this resource sticks
	 * in the idr, it will be scheduled again for a newer generation or at
	 * shutdown.
	 */
	if (channel == -EAGAIN &&
	    (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
		goto out;

	success = channel >= 0 || bandwidth > 0;

	spin_lock_irq(&client->lock);
	/*
	 * Transit from allocation to reallocation, except if the client
	 * requested deallocation in the meantime.
	 */
	if (r->todo == ISO_RES_ALLOC)
		r->todo = ISO_RES_REALLOC;
	/*
	 * Allocation or reallocation failure?  Pull this resource out of the
	 * idr and prepare for deletion, unless the client is shutting down.
	 */
	if (r->todo == ISO_RES_REALLOC && !success &&
	    !client->in_shutdown &&
	    idr_find(&client->resource_idr, r->resource.handle)) {
		idr_remove(&client->resource_idr, r->resource.handle);
		client_put(client);
		free = true;
	}
	spin_unlock_irq(&client->lock);

	if (todo == ISO_RES_ALLOC && channel >= 0)
1085
		r->channels = 1ULL << channel;
1086 1087 1088 1089

	if (todo == ISO_RES_REALLOC && success)
		goto out;

1090
	if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113
		e = r->e_alloc;
		r->e_alloc = NULL;
	} else {
		e = r->e_dealloc;
		r->e_dealloc = NULL;
	}
	e->resource.handle	= r->resource.handle;
	e->resource.channel	= channel;
	e->resource.bandwidth	= bandwidth;

	queue_event(client, &e->event,
		    &e->resource, sizeof(e->resource), NULL, 0);

	if (free) {
		cancel_delayed_work(&r->work);
		kfree(r->e_alloc);
		kfree(r->e_dealloc);
		kfree(r);
	}
 out:
	client_put(client);
}

1114
static void schedule_iso_resource(struct iso_resource *r)
1115
{
1116
	client_get(r->client);
1117
	if (!schedule_delayed_work(&r->work, 0))
1118
		client_put(r->client);
1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132
}

static void release_iso_resource(struct client *client,
				 struct client_resource *resource)
{
	struct iso_resource *r =
		container_of(resource, struct iso_resource, resource);

	spin_lock_irq(&client->lock);
	r->todo = ISO_RES_DEALLOC;
	schedule_iso_resource(r);
	spin_unlock_irq(&client->lock);
}

1133 1134
static int init_iso_resource(struct client *client,
		struct fw_cdev_allocate_iso_resource *request, int todo)
1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154
{
	struct iso_resource_event *e1, *e2;
	struct iso_resource *r;
	int ret;

	if ((request->channels == 0 && request->bandwidth == 0) ||
	    request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
	    request->bandwidth < 0)
		return -EINVAL;

	r  = kmalloc(sizeof(*r), GFP_KERNEL);
	e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
	e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
	if (r == NULL || e1 == NULL || e2 == NULL) {
		ret = -ENOMEM;
		goto fail;
	}

	INIT_DELAYED_WORK(&r->work, iso_resource_work);
	r->client	= client;
1155
	r->todo		= todo;
1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166
	r->generation	= -1;
	r->channels	= request->channels;
	r->bandwidth	= request->bandwidth;
	r->e_alloc	= e1;
	r->e_dealloc	= e2;

	e1->resource.closure	= request->closure;
	e1->resource.type	= FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
	e2->resource.closure	= request->closure;
	e2->resource.type	= FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;

1167 1168 1169
	if (todo == ISO_RES_ALLOC) {
		r->resource.release = release_iso_resource;
		ret = add_client_resource(client, &r->resource, GFP_KERNEL);
1170 1171
		if (ret < 0)
			goto fail;
1172 1173 1174
	} else {
		r->resource.release = NULL;
		r->resource.handle = -1;
1175
		schedule_iso_resource(r);
1176
	}
1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187
	request->handle = r->resource.handle;

	return 0;
 fail:
	kfree(r);
	kfree(e1);
	kfree(e2);

	return ret;
}

1188 1189 1190 1191 1192 1193 1194
static int ioctl_allocate_iso_resource(struct client *client, void *buffer)
{
	struct fw_cdev_allocate_iso_resource *request = buffer;

	return init_iso_resource(client, request, ISO_RES_ALLOC);
}

1195 1196 1197 1198 1199 1200 1201 1202
static int ioctl_deallocate_iso_resource(struct client *client, void *buffer)
{
	struct fw_cdev_deallocate *request = buffer;

	return release_client_resource(client, request->handle,
				       release_iso_resource, NULL);
}

1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216
static int ioctl_allocate_iso_resource_once(struct client *client, void *buffer)
{
	struct fw_cdev_allocate_iso_resource *request = buffer;

	return init_iso_resource(client, request, ISO_RES_ALLOC_ONCE);
}

static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffer)
{
	struct fw_cdev_allocate_iso_resource *request = buffer;

	return init_iso_resource(client, request, ISO_RES_DEALLOC_ONCE);
}

1217 1218 1219 1220 1221
/*
 * Returns a speed code:  Maximum speed to or from this device,
 * limited by the device's link speed, the local node's link speed,
 * and all PHY port speeds between the two links.
 */
1222 1223
static int ioctl_get_speed(struct client *client, void *buffer)
{
1224
	return client->device->max_speed;
1225 1226
}

1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238
static int ioctl_send_broadcast_request(struct client *client, void *buffer)
{
	struct fw_cdev_send_request *request = buffer;

	switch (request->tcode) {
	case TCODE_WRITE_QUADLET_REQUEST:
	case TCODE_WRITE_BLOCK_REQUEST:
		break;
	default:
		return -EINVAL;
	}

1239 1240 1241 1242
	/* Security policy: Only allow accesses to Units Space. */
	if (request->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
		return -EACCES;

1243 1244 1245
	return init_request(client, request, LOCAL_BUS | 0x3f, SCODE_100);
}

1246 1247
static int ioctl_send_stream_packet(struct client *client, void *buffer)
{
1248 1249 1250
	struct fw_cdev_send_stream_packet *p = buffer;
	struct fw_cdev_send_request request;
	int dest;
1251

1252 1253 1254
	if (p->speed > client->device->card->link_speed ||
	    p->length > 1024 << p->speed)
		return -EIO;
1255

1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266
	if (p->tag > 3 || p->channel > 63 || p->sy > 15)
		return -EINVAL;

	dest = fw_stream_packet_destination_id(p->tag, p->channel, p->sy);
	request.tcode		= TCODE_STREAM_DATA;
	request.length		= p->length;
	request.closure		= p->closure;
	request.data		= p->data;
	request.generation	= p->generation;

	return init_request(client, &request, dest, p->speed);
1267 1268
}

1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281
static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
	ioctl_get_info,
	ioctl_send_request,
	ioctl_allocate,
	ioctl_deallocate,
	ioctl_send_response,
	ioctl_initiate_bus_reset,
	ioctl_add_descriptor,
	ioctl_remove_descriptor,
	ioctl_create_iso_context,
	ioctl_queue_iso,
	ioctl_start_iso,
	ioctl_stop_iso,
1282
	ioctl_get_cycle_timer,
1283 1284
	ioctl_allocate_iso_resource,
	ioctl_deallocate_iso_resource,
1285 1286
	ioctl_allocate_iso_resource_once,
	ioctl_deallocate_iso_resource_once,
1287
	ioctl_get_speed,
1288
	ioctl_send_broadcast_request,
1289
	ioctl_send_stream_packet,
1290 1291
};

1292 1293
static int dispatch_ioctl(struct client *client,
			  unsigned int cmd, void __user *arg)
1294
{
1295
	char buffer[256];
1296
	int ret;
1297 1298 1299

	if (_IOC_TYPE(cmd) != '#' ||
	    _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
1300
		return -EINVAL;
1301 1302

	if (_IOC_DIR(cmd) & _IOC_WRITE) {
1303
		if (_IOC_SIZE(cmd) > sizeof(buffer) ||
1304 1305 1306 1307
		    copy_from_user(buffer, arg, _IOC_SIZE(cmd)))
			return -EFAULT;
	}

1308 1309 1310
	ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
	if (ret < 0)
		return ret;
1311 1312

	if (_IOC_DIR(cmd) & _IOC_READ) {
1313
		if (_IOC_SIZE(cmd) > sizeof(buffer) ||
1314 1315
		    copy_to_user(arg, buffer, _IOC_SIZE(cmd)))
			return -EFAULT;
1316
	}
1317

1318
	return ret;
1319 1320
}

1321 1322
static long fw_device_op_ioctl(struct file *file,
			       unsigned int cmd, unsigned long arg)
1323 1324 1325
{
	struct client *client = file->private_data;

1326 1327 1328
	if (fw_device_is_shutdown(client->device))
		return -ENODEV;

1329 1330 1331 1332
	return dispatch_ioctl(client, cmd, (void __user *) arg);
}

#ifdef CONFIG_COMPAT
1333 1334
static long fw_device_op_compat_ioctl(struct file *file,
				      unsigned int cmd, unsigned long arg)
1335 1336 1337
{
	struct client *client = file->private_data;

1338 1339 1340
	if (fw_device_is_shutdown(client->device))
		return -ENODEV;

1341 1342 1343 1344 1345 1346 1347
	return dispatch_ioctl(client, cmd, compat_ptr(arg));
}
#endif

static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
{
	struct client *client = file->private_data;
1348 1349
	enum dma_data_direction direction;
	unsigned long size;
1350
	int page_count, ret;
1351

1352 1353 1354
	if (fw_device_is_shutdown(client->device))
		return -ENODEV;

1355 1356 1357 1358 1359 1360
	/* FIXME: We could support multiple buffers, but we don't. */
	if (client->buffer.pages != NULL)
		return -EBUSY;

	if (!(vma->vm_flags & VM_SHARED))
		return -EINVAL;
1361

1362
	if (vma->vm_start & ~PAGE_MASK)
1363 1364 1365
		return -EINVAL;

	client->vm_start = vma->vm_start;
1366 1367 1368 1369 1370 1371 1372 1373 1374 1375
	size = vma->vm_end - vma->vm_start;
	page_count = size >> PAGE_SHIFT;
	if (size & ~PAGE_MASK)
		return -EINVAL;

	if (vma->vm_flags & VM_WRITE)
		direction = DMA_TO_DEVICE;
	else
		direction = DMA_FROM_DEVICE;

1376 1377 1378 1379
	ret = fw_iso_buffer_init(&client->buffer, client->device->card,
				 page_count, direction);
	if (ret < 0)
		return ret;
1380

1381 1382
	ret = fw_iso_buffer_map(&client->buffer, vma);
	if (ret < 0)
1383 1384
		fw_iso_buffer_destroy(&client->buffer, client->device->card);

1385
	return ret;
1386 1387
}

1388 1389 1390 1391 1392 1393
static int shutdown_resource(int id, void *p, void *data)
{
	struct client_resource *r = p;
	struct client *client = data;

	r->release(client, r);
1394
	client_put(client);
1395 1396 1397 1398

	return 0;
}

1399 1400 1401
static int fw_device_op_release(struct inode *inode, struct file *file)
{
	struct client *client = file->private_data;
1402
	struct event *e, *next_e;
1403

1404 1405 1406 1407
	mutex_lock(&client->device->client_list_mutex);
	list_del(&client->link);
	mutex_unlock(&client->device->client_list_mutex);

1408 1409 1410
	if (client->iso_context)
		fw_iso_context_destroy(client->iso_context);

1411 1412 1413
	if (client->buffer.pages)
		fw_iso_buffer_destroy(&client->buffer, client->device->card);

1414
	/* Freeze client->resource_idr and client->event_list */
1415
	spin_lock_irq(&client->lock);
1416
	client->in_shutdown = true;
1417
	spin_unlock_irq(&client->lock);
1418

1419 1420 1421
	idr_for_each(&client->resource_idr, shutdown_resource, client);
	idr_remove_all(&client->resource_idr);
	idr_destroy(&client->resource_idr);
1422

1423 1424
	list_for_each_entry_safe(e, next_e, &client->event_list, link)
		kfree(e);
1425

1426
	client_put(client);
1427 1428 1429 1430 1431 1432 1433

	return 0;
}

static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
{
	struct client *client = file->private_data;
1434
	unsigned int mask = 0;
1435 1436 1437

	poll_wait(file, &client->wait, pt);

1438 1439
	if (fw_device_is_shutdown(client->device))
		mask |= POLLHUP | POLLERR;
1440
	if (!list_empty(&client->event_list))
1441 1442 1443
		mask |= POLLIN | POLLRDNORM;

	return mask;
1444 1445
}

1446
const struct file_operations fw_device_ops = {
1447 1448 1449 1450 1451 1452 1453 1454 1455
	.owner		= THIS_MODULE,
	.open		= fw_device_op_open,
	.read		= fw_device_op_read,
	.unlocked_ioctl	= fw_device_op_ioctl,
	.poll		= fw_device_op_poll,
	.release	= fw_device_op_release,
	.mmap		= fw_device_op_mmap,

#ifdef CONFIG_COMPAT
1456
	.compat_ioctl	= fw_device_op_compat_ioctl,
1457 1458
#endif
};