fw-cdev.c 25.9 KB
Newer Older
1 2
/*
 * Char device for device raw access
3
 *
4
 * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software Foundation,
 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 */

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/wait.h>
#include <linux/errno.h>
#include <linux/device.h>
#include <linux/vmalloc.h>
#include <linux/poll.h>
28 29
#include <linux/preempt.h>
#include <linux/time.h>
30 31
#include <linux/delay.h>
#include <linux/mm.h>
32
#include <linux/idr.h>
33
#include <linux/compat.h>
34
#include <linux/firewire-cdev.h>
35
#include <asm/system.h>
36 37 38 39 40
#include <asm/uaccess.h>
#include "fw-transaction.h"
#include "fw-topology.h"
#include "fw-device.h"

41 42 43 44 45 46 47
struct client;
struct client_resource {
	struct list_head link;
	void (*release)(struct client *client, struct client_resource *r);
	u32 handle;
};

48 49 50 51 52
/*
 * dequeue_event() just kfree()'s the event, so the event has to be
 * the first field in the struct.
 */

53 54 55 56 57
struct event {
	struct { void *data; size_t size; } v[2];
	struct list_head link;
};

58 59 60 61 62
struct bus_reset {
	struct event event;
	struct fw_cdev_event_bus_reset reset;
};

63 64 65 66
struct response {
	struct event event;
	struct fw_transaction transaction;
	struct client *client;
67
	struct client_resource resource;
68 69 70 71 72 73 74 75 76
	struct fw_cdev_event_response response;
};

struct iso_interrupt {
	struct event event;
	struct fw_cdev_event_iso_interrupt interrupt;
};

struct client {
77
	u32 version;
78 79
	struct fw_device *device;
	spinlock_t lock;
80
	u32 resource_handle;
81
	struct list_head resource_list;
82 83
	struct list_head event_list;
	wait_queue_head_t wait;
84
	u64 bus_reset_closure;
85

86
	struct fw_iso_context *iso_context;
87
	u64 iso_closure;
88 89
	struct fw_iso_buffer buffer;
	unsigned long vm_start;
90 91

	struct list_head link;
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
};

static inline void __user *
u64_to_uptr(__u64 value)
{
	return (void __user *)(unsigned long)value;
}

static inline __u64
uptr_to_u64(void __user *ptr)
{
	return (__u64)(unsigned long)ptr;
}

static int fw_device_op_open(struct inode *inode, struct file *file)
{
	struct fw_device *device;
	struct client *client;
110
	unsigned long flags;
111

112
	device = fw_device_get_by_devt(inode->i_rdev);
113 114
	if (device == NULL)
		return -ENODEV;
115

116 117 118 119 120
	if (fw_device_is_shutdown(device)) {
		fw_device_put(device);
		return -ENODEV;
	}

121
	client = kzalloc(sizeof(*client), GFP_KERNEL);
122 123
	if (client == NULL) {
		fw_device_put(device);
124
		return -ENOMEM;
125
	}
126

127
	client->device = device;
128
	INIT_LIST_HEAD(&client->event_list);
129
	INIT_LIST_HEAD(&client->resource_list);
130 131 132 133 134
	spin_lock_init(&client->lock);
	init_waitqueue_head(&client->wait);

	file->private_data = client;

135 136 137 138
	spin_lock_irqsave(&device->card->lock, flags);
	list_add_tail(&client->link, &device->client_list);
	spin_unlock_irqrestore(&device->card->lock, flags);

139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
	return 0;
}

static void queue_event(struct client *client, struct event *event,
			void *data0, size_t size0, void *data1, size_t size1)
{
	unsigned long flags;

	event->v[0].data = data0;
	event->v[0].size = size0;
	event->v[1].data = data1;
	event->v[1].size = size1;

	spin_lock_irqsave(&client->lock, flags);
	list_add_tail(&event->link, &client->event_list);
	spin_unlock_irqrestore(&client->lock, flags);
155 156

	wake_up_interruptible(&client->wait);
157 158
}

159 160
static int
dequeue_event(struct client *client, char __user *buffer, size_t count)
161 162 163 164
{
	unsigned long flags;
	struct event *event;
	size_t size, total;
165
	int i, retval;
166

167 168 169 170 171
	retval = wait_event_interruptible(client->wait,
					  !list_empty(&client->event_list) ||
					  fw_device_is_shutdown(client->device));
	if (retval < 0)
		return retval;
172

173 174 175
	if (list_empty(&client->event_list) &&
		       fw_device_is_shutdown(client->device))
		return -ENODEV;
176

177
	spin_lock_irqsave(&client->lock, flags);
178 179 180 181 182 183 184
	event = container_of(client->event_list.next, struct event, link);
	list_del(&event->link);
	spin_unlock_irqrestore(&client->lock, flags);

	total = 0;
	for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
		size = min(event->v[i].size, count - total);
185 186
		if (copy_to_user(buffer + total, event->v[i].data, size)) {
			retval = -EFAULT;
187
			goto out;
188
		}
189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
		total += size;
	}
	retval = total;

 out:
	kfree(event);

	return retval;
}

static ssize_t
fw_device_op_read(struct file *file,
		  char __user *buffer, size_t count, loff_t *offset)
{
	struct client *client = file->private_data;

	return dequeue_event(client, buffer, count);
}

208
/* caller must hold card->lock so that node pointers can be dereferenced here */
209 210
static void
fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
211
		     struct client *client)
212
{
213
	struct fw_card *card = client->device->card;
214

215
	event->closure	     = client->bus_reset_closure;
216
	event->type          = FW_CDEV_EVENT_BUS_RESET;
217
	event->generation    = client->device->generation;
218
	event->node_id       = client->device->node_id;
219 220 221 222 223 224
	event->local_node_id = card->local_node->node_id;
	event->bm_node_id    = 0; /* FIXME: We don't track the BM. */
	event->irm_node_id   = card->irm_node->node_id;
	event->root_node_id  = card->root_node->node_id;
}

225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
static void
for_each_client(struct fw_device *device,
		void (*callback)(struct client *client))
{
	struct fw_card *card = device->card;
	struct client *c;
	unsigned long flags;

	spin_lock_irqsave(&card->lock, flags);

	list_for_each_entry(c, &device->client_list, link)
		callback(c);

	spin_unlock_irqrestore(&card->lock, flags);
}

241 242 243 244 245
static void
queue_bus_reset_event(struct client *client)
{
	struct bus_reset *bus_reset;

246
	bus_reset = kzalloc(sizeof(*bus_reset), GFP_ATOMIC);
247 248 249 250 251
	if (bus_reset == NULL) {
		fw_notify("Out of memory when allocating bus reset event\n");
		return;
	}

252
	fill_bus_reset_event(&bus_reset->reset, client);
253 254

	queue_event(client, &bus_reset->event,
255
		    &bus_reset->reset, sizeof(bus_reset->reset), NULL, 0);
256 257 258 259
}

void fw_device_cdev_update(struct fw_device *device)
{
260 261
	for_each_client(device, queue_bus_reset_event);
}
262

263 264 265 266
static void wake_up_client(struct client *client)
{
	wake_up_interruptible(&client->wait);
}
267

268 269 270
void fw_device_cdev_remove(struct fw_device *device)
{
	for_each_client(device, wake_up_client);
271 272
}

273
static int ioctl_get_info(struct client *client, void *buffer)
274
{
275
	struct fw_cdev_get_info *get_info = buffer;
276
	struct fw_cdev_event_bus_reset bus_reset;
277
	struct fw_card *card = client->device->card;
278
	unsigned long ret = 0;
279

280 281
	client->version = get_info->version;
	get_info->version = FW_CDEV_VERSION;
282

283 284
	down_read(&fw_device_rwsem);

285 286 287
	if (get_info->rom != 0) {
		void __user *uptr = u64_to_uptr(get_info->rom);
		size_t want = get_info->rom_length;
288
		size_t have = client->device->config_rom_length * 4;
289

290 291
		ret = copy_to_user(uptr, client->device->config_rom,
				   min(want, have));
292
	}
293
	get_info->rom_length = client->device->config_rom_length * 4;
294

295 296 297 298 299
	up_read(&fw_device_rwsem);

	if (ret != 0)
		return -EFAULT;

300 301 302
	client->bus_reset_closure = get_info->bus_reset_closure;
	if (get_info->bus_reset != 0) {
		void __user *uptr = u64_to_uptr(get_info->bus_reset);
303
		unsigned long flags;
304

305
		spin_lock_irqsave(&card->lock, flags);
306
		fill_bus_reset_event(&bus_reset, client);
307 308
		spin_unlock_irqrestore(&card->lock, flags);

309
		if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
310 311
			return -EFAULT;
	}
312

313
	get_info->card = card->index;
314 315 316 317

	return 0;
}

318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364
static void
add_client_resource(struct client *client, struct client_resource *resource)
{
	unsigned long flags;

	spin_lock_irqsave(&client->lock, flags);
	list_add_tail(&resource->link, &client->resource_list);
	resource->handle = client->resource_handle++;
	spin_unlock_irqrestore(&client->lock, flags);
}

static int
release_client_resource(struct client *client, u32 handle,
			struct client_resource **resource)
{
	struct client_resource *r;
	unsigned long flags;

	spin_lock_irqsave(&client->lock, flags);
	list_for_each_entry(r, &client->resource_list, link) {
		if (r->handle == handle) {
			list_del(&r->link);
			break;
		}
	}
	spin_unlock_irqrestore(&client->lock, flags);

	if (&r->link == &client->resource_list)
		return -EINVAL;

	if (resource)
		*resource = r;
	else
		r->release(client, r);

	return 0;
}

static void
release_transaction(struct client *client, struct client_resource *resource)
{
	struct response *response =
		container_of(resource, struct response, resource);

	fw_cancel_transaction(client->device->card, &response->transaction);
}

365 366 367 368 369 370
static void
complete_transaction(struct fw_card *card, int rcode,
		     void *payload, size_t length, void *data)
{
	struct response *response = data;
	struct client *client = response->client;
371
	unsigned long flags;
372
	struct fw_cdev_event_response *r = &response->response;
373

374 375
	if (length < r->length)
		r->length = length;
376
	if (rcode == RCODE_COMPLETE)
377
		memcpy(r->data, payload, r->length);
378

379
	spin_lock_irqsave(&client->lock, flags);
380
	list_del(&response->resource.link);
381 382
	spin_unlock_irqrestore(&client->lock, flags);

383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
	r->type   = FW_CDEV_EVENT_RESPONSE;
	r->rcode  = rcode;

	/*
	 * In the case that sizeof(*r) doesn't align with the position of the
	 * data, and the read is short, preserve an extra copy of the data
	 * to stay compatible with a pre-2.6.27 bug.  Since the bug is harmless
	 * for short reads and some apps depended on it, this is both safe
	 * and prudent for compatibility.
	 */
	if (r->length <= sizeof(*r) - offsetof(typeof(*r), data))
		queue_event(client, &response->event, r, sizeof(*r),
			    r->data, r->length);
	else
		queue_event(client, &response->event, r, sizeof(*r) + r->length,
			    NULL, 0);
399 400
}

J
Jeff Garzik 已提交
401
static int ioctl_send_request(struct client *client, void *buffer)
402 403
{
	struct fw_device *device = client->device;
404
	struct fw_cdev_send_request *request = buffer;
405 406 407
	struct response *response;

	/* What is the biggest size we'll accept, really? */
408
	if (request->length > 4096)
409 410
		return -EINVAL;

411
	response = kmalloc(sizeof(*response) + request->length, GFP_KERNEL);
412 413 414 415
	if (response == NULL)
		return -ENOMEM;

	response->client = client;
416 417
	response->response.length = request->length;
	response->response.closure = request->closure;
418

419
	if (request->data &&
420
	    copy_from_user(response->response.data,
421
			   u64_to_uptr(request->data), request->length)) {
422 423 424 425
		kfree(response);
		return -EFAULT;
	}

426 427
	response->resource.release = release_transaction;
	add_client_resource(client, &response->resource);
428

429
	fw_send_request(device->card, &response->transaction,
430
			request->tcode & 0x1f,
431
			device->node->node_id,
432
			request->generation,
433
			device->max_speed,
434 435
			request->offset,
			response->response.data, request->length,
436 437
			complete_transaction, response);

438
	if (request->data)
439
		return sizeof(request) + request->length;
440
	else
441
		return sizeof(request);
442 443 444 445 446 447
}

struct address_handler {
	struct fw_address_handler handler;
	__u64 closure;
	struct client *client;
448
	struct client_resource resource;
449 450 451 452 453 454
};

struct request {
	struct fw_request *request;
	void *data;
	size_t length;
455
	struct client_resource resource;
456 457 458 459 460 461 462
};

struct request_event {
	struct event event;
	struct fw_cdev_event_request request;
};

463 464 465 466 467 468 469 470 471 472 473
static void
release_request(struct client *client, struct client_resource *resource)
{
	struct request *request =
		container_of(resource, struct request, resource);

	fw_send_response(client->device->card, request->request,
			 RCODE_CONFLICT_ERROR);
	kfree(request);
}

474 475 476 477 478 479 480 481 482 483 484 485
static void
handle_request(struct fw_card *card, struct fw_request *r,
	       int tcode, int destination, int source,
	       int generation, int speed,
	       unsigned long long offset,
	       void *payload, size_t length, void *callback_data)
{
	struct address_handler *handler = callback_data;
	struct request *request;
	struct request_event *e;
	struct client *client = handler->client;

486 487
	request = kmalloc(sizeof(*request), GFP_ATOMIC);
	e = kmalloc(sizeof(*e), GFP_ATOMIC);
488 489 490 491 492 493 494 495 496 497 498
	if (request == NULL || e == NULL) {
		kfree(request);
		kfree(e);
		fw_send_response(card, r, RCODE_CONFLICT_ERROR);
		return;
	}

	request->request = r;
	request->data    = payload;
	request->length  = length;

499 500
	request->resource.release = release_request;
	add_client_resource(client, &request->resource);
501 502 503 504 505

	e->request.type    = FW_CDEV_EVENT_REQUEST;
	e->request.tcode   = tcode;
	e->request.offset  = offset;
	e->request.length  = length;
506
	e->request.handle  = request->resource.handle;
507 508 509
	e->request.closure = handler->closure;

	queue_event(client, &e->event,
510
		    &e->request, sizeof(e->request), payload, length);
511 512
}

513 514 515 516 517 518 519 520 521 522 523
static void
release_address_handler(struct client *client,
			struct client_resource *resource)
{
	struct address_handler *handler =
		container_of(resource, struct address_handler, resource);

	fw_core_remove_address_handler(&handler->handler);
	kfree(handler);
}

524
static int ioctl_allocate(struct client *client, void *buffer)
525
{
526
	struct fw_cdev_allocate *request = buffer;
527 528 529
	struct address_handler *handler;
	struct fw_address_region region;

530
	handler = kmalloc(sizeof(*handler), GFP_KERNEL);
531 532 533
	if (handler == NULL)
		return -ENOMEM;

534 535 536
	region.start = request->offset;
	region.end = request->offset + request->length;
	handler->handler.length = request->length;
537 538
	handler->handler.address_callback = handle_request;
	handler->handler.callback_data = handler;
539
	handler->closure = request->closure;
540 541 542 543 544 545 546
	handler->client = client;

	if (fw_core_add_address_handler(&handler->handler, &region) < 0) {
		kfree(handler);
		return -EBUSY;
	}

547 548
	handler->resource.release = release_address_handler;
	add_client_resource(client, &handler->resource);
549
	request->handle = handler->resource.handle;
550 551 552 553

	return 0;
}

554
static int ioctl_deallocate(struct client *client, void *buffer)
555
{
556
	struct fw_cdev_deallocate *request = buffer;
557

558
	return release_client_resource(client, request->handle, NULL);
559 560
}

561
static int ioctl_send_response(struct client *client, void *buffer)
562
{
563
	struct fw_cdev_send_response *request = buffer;
564
	struct client_resource *resource;
565 566
	struct request *r;

567
	if (release_client_resource(client, request->handle, &resource) < 0)
568
		return -EINVAL;
569
	r = container_of(resource, struct request, resource);
570 571 572
	if (request->length < r->length)
		r->length = request->length;
	if (copy_from_user(r->data, u64_to_uptr(request->data), r->length))
573 574
		return -EFAULT;

575
	fw_send_response(client->device->card, r->request, request->rcode);
576 577 578 579 580
	kfree(r);

	return 0;
}

581
static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
582
{
583
	struct fw_cdev_initiate_bus_reset *request = buffer;
584 585
	int short_reset;

586
	short_reset = (request->type == FW_CDEV_SHORT_RESET);
587 588 589 590

	return fw_core_initiate_bus_reset(client->device->card, short_reset);
}

591 592
struct descriptor {
	struct fw_descriptor d;
593
	struct client_resource resource;
594 595 596
	u32 data[0];
};

597 598 599 600 601 602 603 604 605 606
static void release_descriptor(struct client *client,
			       struct client_resource *resource)
{
	struct descriptor *descriptor =
		container_of(resource, struct descriptor, resource);

	fw_core_remove_descriptor(&descriptor->d);
	kfree(descriptor);
}

607
static int ioctl_add_descriptor(struct client *client, void *buffer)
608
{
609
	struct fw_cdev_add_descriptor *request = buffer;
610 611 612
	struct descriptor *descriptor;
	int retval;

613
	if (request->length > 256)
614 615 616
		return -EINVAL;

	descriptor =
617
		kmalloc(sizeof(*descriptor) + request->length * 4, GFP_KERNEL);
618 619 620 621
	if (descriptor == NULL)
		return -ENOMEM;

	if (copy_from_user(descriptor->data,
622
			   u64_to_uptr(request->data), request->length * 4)) {
623 624 625 626
		kfree(descriptor);
		return -EFAULT;
	}

627 628 629
	descriptor->d.length = request->length;
	descriptor->d.immediate = request->immediate;
	descriptor->d.key = request->key;
630 631 632 633 634 635 636 637
	descriptor->d.data = descriptor->data;

	retval = fw_core_add_descriptor(&descriptor->d);
	if (retval < 0) {
		kfree(descriptor);
		return retval;
	}

638 639
	descriptor->resource.release = release_descriptor;
	add_client_resource(client, &descriptor->resource);
640
	request->handle = descriptor->resource.handle;
641 642 643 644

	return 0;
}

645
static int ioctl_remove_descriptor(struct client *client, void *buffer)
646
{
647
	struct fw_cdev_remove_descriptor *request = buffer;
648

649
	return release_client_resource(client, request->handle, NULL);
650 651
}

652
static void
653 654
iso_callback(struct fw_iso_context *context, u32 cycle,
	     size_t header_length, void *header, void *data)
655 656
{
	struct client *client = data;
657
	struct iso_interrupt *irq;
658

659 660
	irq = kzalloc(sizeof(*irq) + header_length, GFP_ATOMIC);
	if (irq == NULL)
661 662
		return;

663 664 665 666 667 668 669
	irq->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT;
	irq->interrupt.closure   = client->iso_closure;
	irq->interrupt.cycle     = cycle;
	irq->interrupt.header_length = header_length;
	memcpy(irq->interrupt.header, header, header_length);
	queue_event(client, &irq->event, &irq->interrupt,
		    sizeof(irq->interrupt) + header_length, NULL, 0);
670 671
}

672
static int ioctl_create_iso_context(struct client *client, void *buffer)
673
{
674
	struct fw_cdev_create_iso_context *request = buffer;
675
	struct fw_iso_context *context;
676

677 678 679 680
	/* We only support one context at this time. */
	if (client->iso_context != NULL)
		return -EBUSY;

681
	if (request->channel > 63)
682 683
		return -EINVAL;

684
	switch (request->type) {
685
	case FW_ISO_CONTEXT_RECEIVE:
686
		if (request->header_size < 4 || (request->header_size & 3))
687
			return -EINVAL;
688

689 690 691
		break;

	case FW_ISO_CONTEXT_TRANSMIT:
692
		if (request->speed > SCODE_3200)
693 694 695 696 697
			return -EINVAL;

		break;

	default:
698
		return -EINVAL;
699 700
	}

701 702 703 704 705 706 707 708 709
	context =  fw_iso_context_create(client->device->card,
					 request->type,
					 request->channel,
					 request->speed,
					 request->header_size,
					 iso_callback, client);
	if (IS_ERR(context))
		return PTR_ERR(context);

710
	client->iso_closure = request->closure;
711
	client->iso_context = context;
712

713 714 715
	/* We only support one context at this time. */
	request->handle = 0;

716 717 718
	return 0;
}

719 720 721 722 723 724 725 726
/* Macros for decoding the iso packet control header. */
#define GET_PAYLOAD_LENGTH(v)	((v) & 0xffff)
#define GET_INTERRUPT(v)	(((v) >> 16) & 0x01)
#define GET_SKIP(v)		(((v) >> 17) & 0x01)
#define GET_TAG(v)		(((v) >> 18) & 0x02)
#define GET_SY(v)		(((v) >> 20) & 0x04)
#define GET_HEADER_LENGTH(v)	(((v) >> 24) & 0xff)

727
static int ioctl_queue_iso(struct client *client, void *buffer)
728
{
729
	struct fw_cdev_queue_iso *request = buffer;
730
	struct fw_cdev_iso_packet __user *p, *end, *next;
731
	struct fw_iso_context *ctx = client->iso_context;
732
	unsigned long payload, buffer_end, header_length;
733
	u32 control;
734 735 736 737 738 739
	int count;
	struct {
		struct fw_iso_packet packet;
		u8 header[256];
	} u;

740
	if (ctx == NULL || request->handle != 0)
741 742
		return -EINVAL;

743 744
	/*
	 * If the user passes a non-NULL data pointer, has mmap()'ed
745 746
	 * the iso buffer, and the pointer points inside the buffer,
	 * we setup the payload pointers accordingly.  Otherwise we
747
	 * set them both to 0, which will still let packets with
748 749
	 * payload_length == 0 through.  In other words, if no packets
	 * use the indirect payload, the iso buffer need not be mapped
750 751
	 * and the request->data pointer is ignored.
	 */
752

753
	payload = (unsigned long)request->data - client->vm_start;
754
	buffer_end = client->buffer.page_count << PAGE_SHIFT;
755
	if (request->data == 0 || client->buffer.pages == NULL ||
756
	    payload >= buffer_end) {
757
		payload = 0;
758
		buffer_end = 0;
759 760
	}

A
Al Viro 已提交
761 762 763
	p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets);

	if (!access_ok(VERIFY_READ, p, request->size))
764 765
		return -EFAULT;

766
	end = (void __user *)p + request->size;
767 768
	count = 0;
	while (p < end) {
769
		if (get_user(control, &p->control))
770
			return -EFAULT;
771 772 773 774 775 776
		u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
		u.packet.interrupt = GET_INTERRUPT(control);
		u.packet.skip = GET_SKIP(control);
		u.packet.tag = GET_TAG(control);
		u.packet.sy = GET_SY(control);
		u.packet.header_length = GET_HEADER_LENGTH(control);
777

778
		if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
779 780
			header_length = u.packet.header_length;
		} else {
781 782 783 784
			/*
			 * We require that header_length is a multiple of
			 * the fixed header size, ctx->header_size.
			 */
785 786 787 788
			if (ctx->header_size == 0) {
				if (u.packet.header_length > 0)
					return -EINVAL;
			} else if (u.packet.header_length % ctx->header_size != 0) {
789
				return -EINVAL;
790
			}
791 792 793
			header_length = 0;
		}

794
		next = (struct fw_cdev_iso_packet __user *)
795
			&p->header[header_length / 4];
796 797 798
		if (next > end)
			return -EINVAL;
		if (__copy_from_user
799
		    (u.packet.header, p->header, header_length))
800
			return -EFAULT;
801
		if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
802 803
		    u.packet.header_length + u.packet.payload_length > 0)
			return -EINVAL;
804
		if (payload + u.packet.payload_length > buffer_end)
805 806
			return -EINVAL;

807 808
		if (fw_iso_context_queue(ctx, &u.packet,
					 &client->buffer, payload))
809 810 811 812 813 814 815
			break;

		p = next;
		payload += u.packet.payload_length;
		count++;
	}

816 817 818
	request->size    -= uptr_to_u64(p) - request->packets;
	request->packets  = uptr_to_u64(p);
	request->data     = client->vm_start + payload;
819 820 821 822

	return count;
}

823
static int ioctl_start_iso(struct client *client, void *buffer)
824
{
825
	struct fw_cdev_start_iso *request = buffer;
826

827
	if (client->iso_context == NULL || request->handle != 0)
828
		return -EINVAL;
829

830
	if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) {
831
		if (request->tags == 0 || request->tags > 15)
832 833
			return -EINVAL;

834
		if (request->sync > 15)
835 836 837
			return -EINVAL;
	}

838 839
	return fw_iso_context_start(client->iso_context, request->cycle,
				    request->sync, request->tags);
840 841
}

842
static int ioctl_stop_iso(struct client *client, void *buffer)
843
{
844 845
	struct fw_cdev_stop_iso *request = buffer;

846
	if (client->iso_context == NULL || request->handle != 0)
847 848
		return -EINVAL;

849 850 851
	return fw_iso_context_stop(client->iso_context);
}

852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873
static int ioctl_get_cycle_timer(struct client *client, void *buffer)
{
	struct fw_cdev_get_cycle_timer *request = buffer;
	struct fw_card *card = client->device->card;
	unsigned long long bus_time;
	struct timeval tv;
	unsigned long flags;

	preempt_disable();
	local_irq_save(flags);

	bus_time = card->driver->get_bus_time(card);
	do_gettimeofday(&tv);

	local_irq_restore(flags);
	preempt_enable();

	request->local_time = tv.tv_sec * 1000000ULL + tv.tv_usec;
	request->cycle_timer = bus_time & 0xffffffff;
	return 0;
}

874 875 876 877 878 879 880 881 882 883 884 885 886
static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
	ioctl_get_info,
	ioctl_send_request,
	ioctl_allocate,
	ioctl_deallocate,
	ioctl_send_response,
	ioctl_initiate_bus_reset,
	ioctl_add_descriptor,
	ioctl_remove_descriptor,
	ioctl_create_iso_context,
	ioctl_queue_iso,
	ioctl_start_iso,
	ioctl_stop_iso,
887
	ioctl_get_cycle_timer,
888 889
};

890 891 892
static int
dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg)
{
893 894 895 896 897
	char buffer[256];
	int retval;

	if (_IOC_TYPE(cmd) != '#' ||
	    _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
898
		return -EINVAL;
899 900

	if (_IOC_DIR(cmd) & _IOC_WRITE) {
901
		if (_IOC_SIZE(cmd) > sizeof(buffer) ||
902 903 904 905 906 907 908 909 910
		    copy_from_user(buffer, arg, _IOC_SIZE(cmd)))
			return -EFAULT;
	}

	retval = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
	if (retval < 0)
		return retval;

	if (_IOC_DIR(cmd) & _IOC_READ) {
911
		if (_IOC_SIZE(cmd) > sizeof(buffer) ||
912 913
		    copy_to_user(arg, buffer, _IOC_SIZE(cmd)))
			return -EFAULT;
914
	}
915 916

	return 0;
917 918 919 920 921 922 923 924
}

static long
fw_device_op_ioctl(struct file *file,
		   unsigned int cmd, unsigned long arg)
{
	struct client *client = file->private_data;

925 926 927
	if (fw_device_is_shutdown(client->device))
		return -ENODEV;

928 929 930 931 932 933 934 935 936 937
	return dispatch_ioctl(client, cmd, (void __user *) arg);
}

#ifdef CONFIG_COMPAT
static long
fw_device_op_compat_ioctl(struct file *file,
			  unsigned int cmd, unsigned long arg)
{
	struct client *client = file->private_data;

938 939 940
	if (fw_device_is_shutdown(client->device))
		return -ENODEV;

941 942 943 944 945 946 947
	return dispatch_ioctl(client, cmd, compat_ptr(arg));
}
#endif

static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
{
	struct client *client = file->private_data;
948 949 950 951
	enum dma_data_direction direction;
	unsigned long size;
	int page_count, retval;

952 953 954
	if (fw_device_is_shutdown(client->device))
		return -ENODEV;

955 956 957 958 959 960
	/* FIXME: We could support multiple buffers, but we don't. */
	if (client->buffer.pages != NULL)
		return -EBUSY;

	if (!(vma->vm_flags & VM_SHARED))
		return -EINVAL;
961

962
	if (vma->vm_start & ~PAGE_MASK)
963 964 965
		return -EINVAL;

	client->vm_start = vma->vm_start;
966 967 968 969 970 971 972 973 974 975 976 977 978 979
	size = vma->vm_end - vma->vm_start;
	page_count = size >> PAGE_SHIFT;
	if (size & ~PAGE_MASK)
		return -EINVAL;

	if (vma->vm_flags & VM_WRITE)
		direction = DMA_TO_DEVICE;
	else
		direction = DMA_FROM_DEVICE;

	retval = fw_iso_buffer_init(&client->buffer, client->device->card,
				    page_count, direction);
	if (retval < 0)
		return retval;
980

981 982 983 984 985
	retval = fw_iso_buffer_map(&client->buffer, vma);
	if (retval < 0)
		fw_iso_buffer_destroy(&client->buffer, client->device->card);

	return retval;
986 987 988 989 990
}

static int fw_device_op_release(struct inode *inode, struct file *file)
{
	struct client *client = file->private_data;
991
	struct event *e, *next_e;
992
	struct client_resource *r, *next_r;
993
	unsigned long flags;
994

995 996 997
	if (client->buffer.pages)
		fw_iso_buffer_destroy(&client->buffer, client->device->card);

998 999 1000
	if (client->iso_context)
		fw_iso_context_destroy(client->iso_context);

1001 1002
	list_for_each_entry_safe(r, next_r, &client->resource_list, link)
		r->release(client, r);
1003

1004 1005 1006 1007
	/*
	 * FIXME: We should wait for the async tasklets to stop
	 * running before freeing the memory.
	 */
1008

1009 1010
	list_for_each_entry_safe(e, next_e, &client->event_list, link)
		kfree(e);
1011

1012 1013 1014 1015
	spin_lock_irqsave(&client->device->card->lock, flags);
	list_del(&client->link);
	spin_unlock_irqrestore(&client->device->card->lock, flags);

1016 1017 1018 1019 1020 1021 1022 1023 1024
	fw_device_put(client->device);
	kfree(client);

	return 0;
}

static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
{
	struct client *client = file->private_data;
1025
	unsigned int mask = 0;
1026 1027 1028

	poll_wait(file, &client->wait, pt);

1029 1030
	if (fw_device_is_shutdown(client->device))
		mask |= POLLHUP | POLLERR;
1031
	if (!list_empty(&client->event_list))
1032 1033 1034
		mask |= POLLIN | POLLRDNORM;

	return mask;
1035 1036
}

1037
const struct file_operations fw_device_ops = {
1038 1039 1040 1041 1042 1043 1044 1045 1046
	.owner		= THIS_MODULE,
	.open		= fw_device_op_open,
	.read		= fw_device_op_read,
	.unlocked_ioctl	= fw_device_op_ioctl,
	.poll		= fw_device_op_poll,
	.release	= fw_device_op_release,
	.mmap		= fw_device_op_mmap,

#ifdef CONFIG_COMPAT
1047
	.compat_ioctl	= fw_device_op_compat_ioctl,
1048 1049
#endif
};