fw-cdev.c 25.4 KB
Newer Older
1 2
/*
 * Char device for device raw access
3
 *
4
 * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software Foundation,
 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 */

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/wait.h>
#include <linux/errno.h>
#include <linux/device.h>
#include <linux/vmalloc.h>
#include <linux/poll.h>
28 29
#include <linux/preempt.h>
#include <linux/time.h>
30 31
#include <linux/delay.h>
#include <linux/mm.h>
32
#include <linux/idr.h>
33
#include <linux/compat.h>
34
#include <linux/firewire-cdev.h>
35
#include <asm/system.h>
36 37 38 39 40
#include <asm/uaccess.h>
#include "fw-transaction.h"
#include "fw-topology.h"
#include "fw-device.h"

41 42 43 44 45 46 47
struct client;
struct client_resource {
	struct list_head link;
	void (*release)(struct client *client, struct client_resource *r);
	u32 handle;
};

48 49 50 51 52
/*
 * dequeue_event() just kfree()'s the event, so the event has to be
 * the first field in the struct.
 */

53 54 55 56 57
struct event {
	struct { void *data; size_t size; } v[2];
	struct list_head link;
};

58 59 60 61 62
struct bus_reset {
	struct event event;
	struct fw_cdev_event_bus_reset reset;
};

63 64 65 66
struct response {
	struct event event;
	struct fw_transaction transaction;
	struct client *client;
67
	struct client_resource resource;
68 69 70 71 72 73 74 75 76
	struct fw_cdev_event_response response;
};

struct iso_interrupt {
	struct event event;
	struct fw_cdev_event_iso_interrupt interrupt;
};

struct client {
77
	u32 version;
78 79
	struct fw_device *device;
	spinlock_t lock;
80
	u32 resource_handle;
81
	struct list_head resource_list;
82 83
	struct list_head event_list;
	wait_queue_head_t wait;
84
	u64 bus_reset_closure;
85

86
	struct fw_iso_context *iso_context;
87
	u64 iso_closure;
88 89
	struct fw_iso_buffer buffer;
	unsigned long vm_start;
90 91

	struct list_head link;
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
};

static inline void __user *
u64_to_uptr(__u64 value)
{
	return (void __user *)(unsigned long)value;
}

static inline __u64
uptr_to_u64(void __user *ptr)
{
	return (__u64)(unsigned long)ptr;
}

static int fw_device_op_open(struct inode *inode, struct file *file)
{
	struct fw_device *device;
	struct client *client;
110
	unsigned long flags;
111

112
	device = fw_device_get_by_devt(inode->i_rdev);
113 114
	if (device == NULL)
		return -ENODEV;
115

116 117 118 119 120
	if (fw_device_is_shutdown(device)) {
		fw_device_put(device);
		return -ENODEV;
	}

121
	client = kzalloc(sizeof(*client), GFP_KERNEL);
122 123
	if (client == NULL) {
		fw_device_put(device);
124
		return -ENOMEM;
125
	}
126

127
	client->device = device;
128
	INIT_LIST_HEAD(&client->event_list);
129
	INIT_LIST_HEAD(&client->resource_list);
130 131 132 133 134
	spin_lock_init(&client->lock);
	init_waitqueue_head(&client->wait);

	file->private_data = client;

135 136 137 138
	spin_lock_irqsave(&device->card->lock, flags);
	list_add_tail(&client->link, &device->client_list);
	spin_unlock_irqrestore(&device->card->lock, flags);

139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
	return 0;
}

static void queue_event(struct client *client, struct event *event,
			void *data0, size_t size0, void *data1, size_t size1)
{
	unsigned long flags;

	event->v[0].data = data0;
	event->v[0].size = size0;
	event->v[1].data = data1;
	event->v[1].size = size1;

	spin_lock_irqsave(&client->lock, flags);
	list_add_tail(&event->link, &client->event_list);
	spin_unlock_irqrestore(&client->lock, flags);
155 156

	wake_up_interruptible(&client->wait);
157 158
}

159 160
static int
dequeue_event(struct client *client, char __user *buffer, size_t count)
161 162 163 164
{
	unsigned long flags;
	struct event *event;
	size_t size, total;
165
	int i, retval;
166

167 168 169 170 171
	retval = wait_event_interruptible(client->wait,
					  !list_empty(&client->event_list) ||
					  fw_device_is_shutdown(client->device));
	if (retval < 0)
		return retval;
172

173 174 175
	if (list_empty(&client->event_list) &&
		       fw_device_is_shutdown(client->device))
		return -ENODEV;
176

177
	spin_lock_irqsave(&client->lock, flags);
178 179 180 181 182 183 184
	event = container_of(client->event_list.next, struct event, link);
	list_del(&event->link);
	spin_unlock_irqrestore(&client->lock, flags);

	total = 0;
	for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
		size = min(event->v[i].size, count - total);
185 186
		if (copy_to_user(buffer + total, event->v[i].data, size)) {
			retval = -EFAULT;
187
			goto out;
188
		}
189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
		total += size;
	}
	retval = total;

 out:
	kfree(event);

	return retval;
}

static ssize_t
fw_device_op_read(struct file *file,
		  char __user *buffer, size_t count, loff_t *offset)
{
	struct client *client = file->private_data;

	return dequeue_event(client, buffer, count);
}

208 209
static void
fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
210
		     struct client *client)
211
{
212
	struct fw_card *card = client->device->card;
213

214
	event->closure	     = client->bus_reset_closure;
215
	event->type          = FW_CDEV_EVENT_BUS_RESET;
216
	event->generation    = client->device->generation;
217
	smp_rmb();           /* node_id must not be older than generation */
218
	event->node_id       = client->device->node_id;
219 220 221 222 223 224
	event->local_node_id = card->local_node->node_id;
	event->bm_node_id    = 0; /* FIXME: We don't track the BM. */
	event->irm_node_id   = card->irm_node->node_id;
	event->root_node_id  = card->root_node->node_id;
}

225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
static void
for_each_client(struct fw_device *device,
		void (*callback)(struct client *client))
{
	struct fw_card *card = device->card;
	struct client *c;
	unsigned long flags;

	spin_lock_irqsave(&card->lock, flags);

	list_for_each_entry(c, &device->client_list, link)
		callback(c);

	spin_unlock_irqrestore(&card->lock, flags);
}

241 242 243 244 245
static void
queue_bus_reset_event(struct client *client)
{
	struct bus_reset *bus_reset;

246
	bus_reset = kzalloc(sizeof(*bus_reset), GFP_ATOMIC);
247 248 249 250 251
	if (bus_reset == NULL) {
		fw_notify("Out of memory when allocating bus reset event\n");
		return;
	}

252
	fill_bus_reset_event(&bus_reset->reset, client);
253 254

	queue_event(client, &bus_reset->event,
255
		    &bus_reset->reset, sizeof(bus_reset->reset), NULL, 0);
256 257 258 259
}

void fw_device_cdev_update(struct fw_device *device)
{
260 261
	for_each_client(device, queue_bus_reset_event);
}
262

263 264 265 266
static void wake_up_client(struct client *client)
{
	wake_up_interruptible(&client->wait);
}
267

268 269 270
void fw_device_cdev_remove(struct fw_device *device)
{
	for_each_client(device, wake_up_client);
271 272
}

273
static int ioctl_get_info(struct client *client, void *buffer)
274
{
275
	struct fw_cdev_get_info *get_info = buffer;
276
	struct fw_cdev_event_bus_reset bus_reset;
277
	unsigned long ret = 0;
278

279 280
	client->version = get_info->version;
	get_info->version = FW_CDEV_VERSION;
281

282 283
	down_read(&fw_device_rwsem);

284 285 286
	if (get_info->rom != 0) {
		void __user *uptr = u64_to_uptr(get_info->rom);
		size_t want = get_info->rom_length;
287
		size_t have = client->device->config_rom_length * 4;
288

289 290
		ret = copy_to_user(uptr, client->device->config_rom,
				   min(want, have));
291
	}
292
	get_info->rom_length = client->device->config_rom_length * 4;
293

294 295 296 297 298
	up_read(&fw_device_rwsem);

	if (ret != 0)
		return -EFAULT;

299 300 301
	client->bus_reset_closure = get_info->bus_reset_closure;
	if (get_info->bus_reset != 0) {
		void __user *uptr = u64_to_uptr(get_info->bus_reset);
302

303
		fill_bus_reset_event(&bus_reset, client);
304
		if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
305 306
			return -EFAULT;
	}
307

308
	get_info->card = client->device->card->index;
309 310 311 312

	return 0;
}

313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
static void
add_client_resource(struct client *client, struct client_resource *resource)
{
	unsigned long flags;

	spin_lock_irqsave(&client->lock, flags);
	list_add_tail(&resource->link, &client->resource_list);
	resource->handle = client->resource_handle++;
	spin_unlock_irqrestore(&client->lock, flags);
}

static int
release_client_resource(struct client *client, u32 handle,
			struct client_resource **resource)
{
	struct client_resource *r;
	unsigned long flags;

	spin_lock_irqsave(&client->lock, flags);
	list_for_each_entry(r, &client->resource_list, link) {
		if (r->handle == handle) {
			list_del(&r->link);
			break;
		}
	}
	spin_unlock_irqrestore(&client->lock, flags);

	if (&r->link == &client->resource_list)
		return -EINVAL;

	if (resource)
		*resource = r;
	else
		r->release(client, r);

	return 0;
}

static void
release_transaction(struct client *client, struct client_resource *resource)
{
	struct response *response =
		container_of(resource, struct response, resource);

	fw_cancel_transaction(client->device->card, &response->transaction);
}

360 361 362 363 364 365
static void
complete_transaction(struct fw_card *card, int rcode,
		     void *payload, size_t length, void *data)
{
	struct response *response = data;
	struct client *client = response->client;
366
	unsigned long flags;
367 368 369 370 371 372 373

	if (length < response->response.length)
		response->response.length = length;
	if (rcode == RCODE_COMPLETE)
		memcpy(response->response.data, payload,
		       response->response.length);

374
	spin_lock_irqsave(&client->lock, flags);
375
	list_del(&response->resource.link);
376 377
	spin_unlock_irqrestore(&client->lock, flags);

378 379 380
	response->response.type   = FW_CDEV_EVENT_RESPONSE;
	response->response.rcode  = rcode;
	queue_event(client, &response->event,
381
		    &response->response, sizeof(response->response),
382 383 384
		    response->response.data, response->response.length);
}

J
Jeff Garzik 已提交
385
static int ioctl_send_request(struct client *client, void *buffer)
386 387
{
	struct fw_device *device = client->device;
388
	struct fw_cdev_send_request *request = buffer;
389 390 391
	struct response *response;

	/* What is the biggest size we'll accept, really? */
392
	if (request->length > 4096)
393 394
		return -EINVAL;

395
	response = kmalloc(sizeof(*response) + request->length, GFP_KERNEL);
396 397 398 399
	if (response == NULL)
		return -ENOMEM;

	response->client = client;
400 401
	response->response.length = request->length;
	response->response.closure = request->closure;
402

403
	if (request->data &&
404
	    copy_from_user(response->response.data,
405
			   u64_to_uptr(request->data), request->length)) {
406 407 408 409
		kfree(response);
		return -EFAULT;
	}

410 411
	response->resource.release = release_transaction;
	add_client_resource(client, &response->resource);
412

413
	fw_send_request(device->card, &response->transaction,
414
			request->tcode & 0x1f,
415
			device->node->node_id,
416
			request->generation,
417
			device->max_speed,
418 419
			request->offset,
			response->response.data, request->length,
420 421
			complete_transaction, response);

422
	if (request->data)
423
		return sizeof(request) + request->length;
424
	else
425
		return sizeof(request);
426 427 428 429 430 431
}

struct address_handler {
	struct fw_address_handler handler;
	__u64 closure;
	struct client *client;
432
	struct client_resource resource;
433 434 435 436 437 438
};

struct request {
	struct fw_request *request;
	void *data;
	size_t length;
439
	struct client_resource resource;
440 441 442 443 444 445 446
};

struct request_event {
	struct event event;
	struct fw_cdev_event_request request;
};

447 448 449 450 451 452 453 454 455 456 457
static void
release_request(struct client *client, struct client_resource *resource)
{
	struct request *request =
		container_of(resource, struct request, resource);

	fw_send_response(client->device->card, request->request,
			 RCODE_CONFLICT_ERROR);
	kfree(request);
}

458 459 460 461 462 463 464 465 466 467 468 469
static void
handle_request(struct fw_card *card, struct fw_request *r,
	       int tcode, int destination, int source,
	       int generation, int speed,
	       unsigned long long offset,
	       void *payload, size_t length, void *callback_data)
{
	struct address_handler *handler = callback_data;
	struct request *request;
	struct request_event *e;
	struct client *client = handler->client;

470 471
	request = kmalloc(sizeof(*request), GFP_ATOMIC);
	e = kmalloc(sizeof(*e), GFP_ATOMIC);
472 473 474 475 476 477 478 479 480 481 482
	if (request == NULL || e == NULL) {
		kfree(request);
		kfree(e);
		fw_send_response(card, r, RCODE_CONFLICT_ERROR);
		return;
	}

	request->request = r;
	request->data    = payload;
	request->length  = length;

483 484
	request->resource.release = release_request;
	add_client_resource(client, &request->resource);
485 486 487 488 489

	e->request.type    = FW_CDEV_EVENT_REQUEST;
	e->request.tcode   = tcode;
	e->request.offset  = offset;
	e->request.length  = length;
490
	e->request.handle  = request->resource.handle;
491 492 493
	e->request.closure = handler->closure;

	queue_event(client, &e->event,
494
		    &e->request, sizeof(e->request), payload, length);
495 496
}

497 498 499 500 501 502 503 504 505 506 507
static void
release_address_handler(struct client *client,
			struct client_resource *resource)
{
	struct address_handler *handler =
		container_of(resource, struct address_handler, resource);

	fw_core_remove_address_handler(&handler->handler);
	kfree(handler);
}

508
static int ioctl_allocate(struct client *client, void *buffer)
509
{
510
	struct fw_cdev_allocate *request = buffer;
511 512 513
	struct address_handler *handler;
	struct fw_address_region region;

514
	handler = kmalloc(sizeof(*handler), GFP_KERNEL);
515 516 517
	if (handler == NULL)
		return -ENOMEM;

518 519 520
	region.start = request->offset;
	region.end = request->offset + request->length;
	handler->handler.length = request->length;
521 522
	handler->handler.address_callback = handle_request;
	handler->handler.callback_data = handler;
523
	handler->closure = request->closure;
524 525 526 527 528 529 530
	handler->client = client;

	if (fw_core_add_address_handler(&handler->handler, &region) < 0) {
		kfree(handler);
		return -EBUSY;
	}

531 532
	handler->resource.release = release_address_handler;
	add_client_resource(client, &handler->resource);
533
	request->handle = handler->resource.handle;
534 535 536 537

	return 0;
}

538
static int ioctl_deallocate(struct client *client, void *buffer)
539
{
540
	struct fw_cdev_deallocate *request = buffer;
541

542
	return release_client_resource(client, request->handle, NULL);
543 544
}

545
static int ioctl_send_response(struct client *client, void *buffer)
546
{
547
	struct fw_cdev_send_response *request = buffer;
548
	struct client_resource *resource;
549 550
	struct request *r;

551
	if (release_client_resource(client, request->handle, &resource) < 0)
552
		return -EINVAL;
553
	r = container_of(resource, struct request, resource);
554 555 556
	if (request->length < r->length)
		r->length = request->length;
	if (copy_from_user(r->data, u64_to_uptr(request->data), r->length))
557 558
		return -EFAULT;

559
	fw_send_response(client->device->card, r->request, request->rcode);
560 561 562 563 564
	kfree(r);

	return 0;
}

565
static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
566
{
567
	struct fw_cdev_initiate_bus_reset *request = buffer;
568 569
	int short_reset;

570
	short_reset = (request->type == FW_CDEV_SHORT_RESET);
571 572 573 574

	return fw_core_initiate_bus_reset(client->device->card, short_reset);
}

575 576
struct descriptor {
	struct fw_descriptor d;
577
	struct client_resource resource;
578 579 580
	u32 data[0];
};

581 582 583 584 585 586 587 588 589 590
static void release_descriptor(struct client *client,
			       struct client_resource *resource)
{
	struct descriptor *descriptor =
		container_of(resource, struct descriptor, resource);

	fw_core_remove_descriptor(&descriptor->d);
	kfree(descriptor);
}

591
static int ioctl_add_descriptor(struct client *client, void *buffer)
592
{
593
	struct fw_cdev_add_descriptor *request = buffer;
594 595 596
	struct descriptor *descriptor;
	int retval;

597
	if (request->length > 256)
598 599 600
		return -EINVAL;

	descriptor =
601
		kmalloc(sizeof(*descriptor) + request->length * 4, GFP_KERNEL);
602 603 604 605
	if (descriptor == NULL)
		return -ENOMEM;

	if (copy_from_user(descriptor->data,
606
			   u64_to_uptr(request->data), request->length * 4)) {
607 608 609 610
		kfree(descriptor);
		return -EFAULT;
	}

611 612 613
	descriptor->d.length = request->length;
	descriptor->d.immediate = request->immediate;
	descriptor->d.key = request->key;
614 615 616 617 618 619 620 621
	descriptor->d.data = descriptor->data;

	retval = fw_core_add_descriptor(&descriptor->d);
	if (retval < 0) {
		kfree(descriptor);
		return retval;
	}

622 623
	descriptor->resource.release = release_descriptor;
	add_client_resource(client, &descriptor->resource);
624
	request->handle = descriptor->resource.handle;
625 626 627 628

	return 0;
}

629
static int ioctl_remove_descriptor(struct client *client, void *buffer)
630
{
631
	struct fw_cdev_remove_descriptor *request = buffer;
632

633
	return release_client_resource(client, request->handle, NULL);
634 635
}

636
static void
637 638
iso_callback(struct fw_iso_context *context, u32 cycle,
	     size_t header_length, void *header, void *data)
639 640
{
	struct client *client = data;
641
	struct iso_interrupt *irq;
642

643 644
	irq = kzalloc(sizeof(*irq) + header_length, GFP_ATOMIC);
	if (irq == NULL)
645 646
		return;

647 648 649 650 651 652 653
	irq->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT;
	irq->interrupt.closure   = client->iso_closure;
	irq->interrupt.cycle     = cycle;
	irq->interrupt.header_length = header_length;
	memcpy(irq->interrupt.header, header, header_length);
	queue_event(client, &irq->event, &irq->interrupt,
		    sizeof(irq->interrupt) + header_length, NULL, 0);
654 655
}

656
static int ioctl_create_iso_context(struct client *client, void *buffer)
657
{
658
	struct fw_cdev_create_iso_context *request = buffer;
659
	struct fw_iso_context *context;
660

661 662 663 664
	/* We only support one context at this time. */
	if (client->iso_context != NULL)
		return -EBUSY;

665
	if (request->channel > 63)
666 667
		return -EINVAL;

668
	switch (request->type) {
669
	case FW_ISO_CONTEXT_RECEIVE:
670
		if (request->header_size < 4 || (request->header_size & 3))
671
			return -EINVAL;
672

673 674 675
		break;

	case FW_ISO_CONTEXT_TRANSMIT:
676
		if (request->speed > SCODE_3200)
677 678 679 680 681
			return -EINVAL;

		break;

	default:
682
		return -EINVAL;
683 684
	}

685 686 687 688 689 690 691 692 693
	context =  fw_iso_context_create(client->device->card,
					 request->type,
					 request->channel,
					 request->speed,
					 request->header_size,
					 iso_callback, client);
	if (IS_ERR(context))
		return PTR_ERR(context);

694
	client->iso_closure = request->closure;
695
	client->iso_context = context;
696

697 698 699
	/* We only support one context at this time. */
	request->handle = 0;

700 701 702
	return 0;
}

703 704 705 706 707 708 709 710
/* Macros for decoding the iso packet control header. */
#define GET_PAYLOAD_LENGTH(v)	((v) & 0xffff)
#define GET_INTERRUPT(v)	(((v) >> 16) & 0x01)
#define GET_SKIP(v)		(((v) >> 17) & 0x01)
#define GET_TAG(v)		(((v) >> 18) & 0x02)
#define GET_SY(v)		(((v) >> 20) & 0x04)
#define GET_HEADER_LENGTH(v)	(((v) >> 24) & 0xff)

711
static int ioctl_queue_iso(struct client *client, void *buffer)
712
{
713
	struct fw_cdev_queue_iso *request = buffer;
714
	struct fw_cdev_iso_packet __user *p, *end, *next;
715
	struct fw_iso_context *ctx = client->iso_context;
716
	unsigned long payload, buffer_end, header_length;
717
	u32 control;
718 719 720 721 722 723
	int count;
	struct {
		struct fw_iso_packet packet;
		u8 header[256];
	} u;

724
	if (ctx == NULL || request->handle != 0)
725 726
		return -EINVAL;

727 728
	/*
	 * If the user passes a non-NULL data pointer, has mmap()'ed
729 730
	 * the iso buffer, and the pointer points inside the buffer,
	 * we setup the payload pointers accordingly.  Otherwise we
731
	 * set them both to 0, which will still let packets with
732 733
	 * payload_length == 0 through.  In other words, if no packets
	 * use the indirect payload, the iso buffer need not be mapped
734 735
	 * and the request->data pointer is ignored.
	 */
736

737
	payload = (unsigned long)request->data - client->vm_start;
738
	buffer_end = client->buffer.page_count << PAGE_SHIFT;
739
	if (request->data == 0 || client->buffer.pages == NULL ||
740
	    payload >= buffer_end) {
741
		payload = 0;
742
		buffer_end = 0;
743 744
	}

A
Al Viro 已提交
745 746 747
	p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets);

	if (!access_ok(VERIFY_READ, p, request->size))
748 749
		return -EFAULT;

750
	end = (void __user *)p + request->size;
751 752
	count = 0;
	while (p < end) {
753
		if (get_user(control, &p->control))
754
			return -EFAULT;
755 756 757 758 759 760
		u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
		u.packet.interrupt = GET_INTERRUPT(control);
		u.packet.skip = GET_SKIP(control);
		u.packet.tag = GET_TAG(control);
		u.packet.sy = GET_SY(control);
		u.packet.header_length = GET_HEADER_LENGTH(control);
761

762
		if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
763 764
			header_length = u.packet.header_length;
		} else {
765 766 767 768
			/*
			 * We require that header_length is a multiple of
			 * the fixed header size, ctx->header_size.
			 */
769 770 771 772
			if (ctx->header_size == 0) {
				if (u.packet.header_length > 0)
					return -EINVAL;
			} else if (u.packet.header_length % ctx->header_size != 0) {
773
				return -EINVAL;
774
			}
775 776 777
			header_length = 0;
		}

778
		next = (struct fw_cdev_iso_packet __user *)
779
			&p->header[header_length / 4];
780 781 782
		if (next > end)
			return -EINVAL;
		if (__copy_from_user
783
		    (u.packet.header, p->header, header_length))
784
			return -EFAULT;
785
		if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
786 787
		    u.packet.header_length + u.packet.payload_length > 0)
			return -EINVAL;
788
		if (payload + u.packet.payload_length > buffer_end)
789 790
			return -EINVAL;

791 792
		if (fw_iso_context_queue(ctx, &u.packet,
					 &client->buffer, payload))
793 794 795 796 797 798 799
			break;

		p = next;
		payload += u.packet.payload_length;
		count++;
	}

800 801 802
	request->size    -= uptr_to_u64(p) - request->packets;
	request->packets  = uptr_to_u64(p);
	request->data     = client->vm_start + payload;
803 804 805 806

	return count;
}

807
static int ioctl_start_iso(struct client *client, void *buffer)
808
{
809
	struct fw_cdev_start_iso *request = buffer;
810

811
	if (client->iso_context == NULL || request->handle != 0)
812
		return -EINVAL;
813

814
	if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) {
815
		if (request->tags == 0 || request->tags > 15)
816 817
			return -EINVAL;

818
		if (request->sync > 15)
819 820 821
			return -EINVAL;
	}

822 823
	return fw_iso_context_start(client->iso_context, request->cycle,
				    request->sync, request->tags);
824 825
}

826
static int ioctl_stop_iso(struct client *client, void *buffer)
827
{
828 829
	struct fw_cdev_stop_iso *request = buffer;

830
	if (client->iso_context == NULL || request->handle != 0)
831 832
		return -EINVAL;

833 834 835
	return fw_iso_context_stop(client->iso_context);
}

836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857
static int ioctl_get_cycle_timer(struct client *client, void *buffer)
{
	struct fw_cdev_get_cycle_timer *request = buffer;
	struct fw_card *card = client->device->card;
	unsigned long long bus_time;
	struct timeval tv;
	unsigned long flags;

	preempt_disable();
	local_irq_save(flags);

	bus_time = card->driver->get_bus_time(card);
	do_gettimeofday(&tv);

	local_irq_restore(flags);
	preempt_enable();

	request->local_time = tv.tv_sec * 1000000ULL + tv.tv_usec;
	request->cycle_timer = bus_time & 0xffffffff;
	return 0;
}

858 859 860 861 862 863 864 865 866 867 868 869 870
static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
	ioctl_get_info,
	ioctl_send_request,
	ioctl_allocate,
	ioctl_deallocate,
	ioctl_send_response,
	ioctl_initiate_bus_reset,
	ioctl_add_descriptor,
	ioctl_remove_descriptor,
	ioctl_create_iso_context,
	ioctl_queue_iso,
	ioctl_start_iso,
	ioctl_stop_iso,
871
	ioctl_get_cycle_timer,
872 873
};

874 875 876
static int
dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg)
{
877 878 879 880 881
	char buffer[256];
	int retval;

	if (_IOC_TYPE(cmd) != '#' ||
	    _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
882
		return -EINVAL;
883 884

	if (_IOC_DIR(cmd) & _IOC_WRITE) {
885
		if (_IOC_SIZE(cmd) > sizeof(buffer) ||
886 887 888 889 890 891 892 893 894
		    copy_from_user(buffer, arg, _IOC_SIZE(cmd)))
			return -EFAULT;
	}

	retval = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
	if (retval < 0)
		return retval;

	if (_IOC_DIR(cmd) & _IOC_READ) {
895
		if (_IOC_SIZE(cmd) > sizeof(buffer) ||
896 897
		    copy_to_user(arg, buffer, _IOC_SIZE(cmd)))
			return -EFAULT;
898
	}
899 900

	return 0;
901 902 903 904 905 906 907 908
}

static long
fw_device_op_ioctl(struct file *file,
		   unsigned int cmd, unsigned long arg)
{
	struct client *client = file->private_data;

909 910 911
	if (fw_device_is_shutdown(client->device))
		return -ENODEV;

912 913 914 915 916 917 918 919 920 921
	return dispatch_ioctl(client, cmd, (void __user *) arg);
}

#ifdef CONFIG_COMPAT
static long
fw_device_op_compat_ioctl(struct file *file,
			  unsigned int cmd, unsigned long arg)
{
	struct client *client = file->private_data;

922 923 924
	if (fw_device_is_shutdown(client->device))
		return -ENODEV;

925 926 927 928 929 930 931
	return dispatch_ioctl(client, cmd, compat_ptr(arg));
}
#endif

static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
{
	struct client *client = file->private_data;
932 933 934 935
	enum dma_data_direction direction;
	unsigned long size;
	int page_count, retval;

936 937 938
	if (fw_device_is_shutdown(client->device))
		return -ENODEV;

939 940 941 942 943 944
	/* FIXME: We could support multiple buffers, but we don't. */
	if (client->buffer.pages != NULL)
		return -EBUSY;

	if (!(vma->vm_flags & VM_SHARED))
		return -EINVAL;
945

946
	if (vma->vm_start & ~PAGE_MASK)
947 948 949
		return -EINVAL;

	client->vm_start = vma->vm_start;
950 951 952 953 954 955 956 957 958 959 960 961 962 963
	size = vma->vm_end - vma->vm_start;
	page_count = size >> PAGE_SHIFT;
	if (size & ~PAGE_MASK)
		return -EINVAL;

	if (vma->vm_flags & VM_WRITE)
		direction = DMA_TO_DEVICE;
	else
		direction = DMA_FROM_DEVICE;

	retval = fw_iso_buffer_init(&client->buffer, client->device->card,
				    page_count, direction);
	if (retval < 0)
		return retval;
964

965 966 967 968 969
	retval = fw_iso_buffer_map(&client->buffer, vma);
	if (retval < 0)
		fw_iso_buffer_destroy(&client->buffer, client->device->card);

	return retval;
970 971 972 973 974
}

static int fw_device_op_release(struct inode *inode, struct file *file)
{
	struct client *client = file->private_data;
975
	struct event *e, *next_e;
976
	struct client_resource *r, *next_r;
977
	unsigned long flags;
978

979 980 981
	if (client->buffer.pages)
		fw_iso_buffer_destroy(&client->buffer, client->device->card);

982 983 984
	if (client->iso_context)
		fw_iso_context_destroy(client->iso_context);

985 986
	list_for_each_entry_safe(r, next_r, &client->resource_list, link)
		r->release(client, r);
987

988 989 990 991
	/*
	 * FIXME: We should wait for the async tasklets to stop
	 * running before freeing the memory.
	 */
992

993 994
	list_for_each_entry_safe(e, next_e, &client->event_list, link)
		kfree(e);
995

996 997 998 999
	spin_lock_irqsave(&client->device->card->lock, flags);
	list_del(&client->link);
	spin_unlock_irqrestore(&client->device->card->lock, flags);

1000 1001 1002 1003 1004 1005 1006 1007 1008
	fw_device_put(client->device);
	kfree(client);

	return 0;
}

static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
{
	struct client *client = file->private_data;
1009
	unsigned int mask = 0;
1010 1011 1012

	poll_wait(file, &client->wait, pt);

1013 1014
	if (fw_device_is_shutdown(client->device))
		mask |= POLLHUP | POLLERR;
1015
	if (!list_empty(&client->event_list))
1016 1017 1018
		mask |= POLLIN | POLLRDNORM;

	return mask;
1019 1020
}

1021
const struct file_operations fw_device_ops = {
1022 1023 1024 1025 1026 1027 1028 1029 1030
	.owner		= THIS_MODULE,
	.open		= fw_device_op_open,
	.read		= fw_device_op_read,
	.unlocked_ioctl	= fw_device_op_ioctl,
	.poll		= fw_device_op_poll,
	.release	= fw_device_op_release,
	.mmap		= fw_device_op_mmap,

#ifdef CONFIG_COMPAT
1031
	.compat_ioctl	= fw_device_op_compat_ioctl,
1032 1033
#endif
};