fw-cdev.c 25.8 KB
Newer Older
1 2
/*
 * Char device for device raw access
3
 *
4
 * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software Foundation,
 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 */

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/wait.h>
#include <linux/errno.h>
#include <linux/device.h>
#include <linux/vmalloc.h>
#include <linux/poll.h>
28 29
#include <linux/preempt.h>
#include <linux/time.h>
J
Jay Fenlason 已提交
30
#include <linux/spinlock.h>
31 32
#include <linux/delay.h>
#include <linux/mm.h>
33
#include <linux/idr.h>
34
#include <linux/compat.h>
35
#include <linux/firewire-cdev.h>
36
#include <asm/system.h>
37 38 39 40 41
#include <asm/uaccess.h>
#include "fw-transaction.h"
#include "fw-topology.h"
#include "fw-device.h"

42 43 44 45 46 47 48
struct client;
struct client_resource {
	struct list_head link;
	void (*release)(struct client *client, struct client_resource *r);
	u32 handle;
};

49 50 51 52 53
/*
 * dequeue_event() just kfree()'s the event, so the event has to be
 * the first field in the struct.
 */

54 55 56 57 58
struct event {
	struct { void *data; size_t size; } v[2];
	struct list_head link;
};

59 60 61 62 63
struct bus_reset {
	struct event event;
	struct fw_cdev_event_bus_reset reset;
};

64 65 66 67
struct response {
	struct event event;
	struct fw_transaction transaction;
	struct client *client;
68
	struct client_resource resource;
69 70 71 72 73 74 75 76 77
	struct fw_cdev_event_response response;
};

struct iso_interrupt {
	struct event event;
	struct fw_cdev_event_iso_interrupt interrupt;
};

struct client {
78
	u32 version;
79 80
	struct fw_device *device;
	spinlock_t lock;
81
	u32 resource_handle;
82
	struct list_head resource_list;
83 84
	struct list_head event_list;
	wait_queue_head_t wait;
85
	u64 bus_reset_closure;
86

87
	struct fw_iso_context *iso_context;
88
	u64 iso_closure;
89 90
	struct fw_iso_buffer buffer;
	unsigned long vm_start;
91 92

	struct list_head link;
93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
};

static inline void __user *
u64_to_uptr(__u64 value)
{
	return (void __user *)(unsigned long)value;
}

static inline __u64
uptr_to_u64(void __user *ptr)
{
	return (__u64)(unsigned long)ptr;
}

static int fw_device_op_open(struct inode *inode, struct file *file)
{
	struct fw_device *device;
	struct client *client;
111
	unsigned long flags;
112

113
	device = fw_device_get_by_devt(inode->i_rdev);
114 115
	if (device == NULL)
		return -ENODEV;
116

117 118 119 120 121
	if (fw_device_is_shutdown(device)) {
		fw_device_put(device);
		return -ENODEV;
	}

122
	client = kzalloc(sizeof(*client), GFP_KERNEL);
123 124
	if (client == NULL) {
		fw_device_put(device);
125
		return -ENOMEM;
126
	}
127

128
	client->device = device;
129
	INIT_LIST_HEAD(&client->event_list);
130
	INIT_LIST_HEAD(&client->resource_list);
131 132 133 134 135
	spin_lock_init(&client->lock);
	init_waitqueue_head(&client->wait);

	file->private_data = client;

J
Jay Fenlason 已提交
136
	spin_lock_irqsave(&device->client_list_lock, flags);
137
	list_add_tail(&client->link, &device->client_list);
J
Jay Fenlason 已提交
138
	spin_unlock_irqrestore(&device->client_list_lock, flags);
139

140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
	return 0;
}

static void queue_event(struct client *client, struct event *event,
			void *data0, size_t size0, void *data1, size_t size1)
{
	unsigned long flags;

	event->v[0].data = data0;
	event->v[0].size = size0;
	event->v[1].data = data1;
	event->v[1].size = size1;

	spin_lock_irqsave(&client->lock, flags);
	list_add_tail(&event->link, &client->event_list);
	spin_unlock_irqrestore(&client->lock, flags);
156 157

	wake_up_interruptible(&client->wait);
158 159
}

160 161
static int
dequeue_event(struct client *client, char __user *buffer, size_t count)
162 163 164 165
{
	unsigned long flags;
	struct event *event;
	size_t size, total;
166
	int i, retval;
167

168 169 170 171 172
	retval = wait_event_interruptible(client->wait,
					  !list_empty(&client->event_list) ||
					  fw_device_is_shutdown(client->device));
	if (retval < 0)
		return retval;
173

174 175 176
	if (list_empty(&client->event_list) &&
		       fw_device_is_shutdown(client->device))
		return -ENODEV;
177

178
	spin_lock_irqsave(&client->lock, flags);
179 180 181 182 183 184 185
	event = container_of(client->event_list.next, struct event, link);
	list_del(&event->link);
	spin_unlock_irqrestore(&client->lock, flags);

	total = 0;
	for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
		size = min(event->v[i].size, count - total);
186 187
		if (copy_to_user(buffer + total, event->v[i].data, size)) {
			retval = -EFAULT;
188
			goto out;
189
		}
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
		total += size;
	}
	retval = total;

 out:
	kfree(event);

	return retval;
}

static ssize_t
fw_device_op_read(struct file *file,
		  char __user *buffer, size_t count, loff_t *offset)
{
	struct client *client = file->private_data;

	return dequeue_event(client, buffer, count);
}

209 210
static void
fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
211
		     struct client *client)
212
{
213
	struct fw_card *card = client->device->card;
J
Jay Fenlason 已提交
214 215 216
	unsigned long flags;

	spin_lock_irqsave(&card->lock, flags);
217

218
	event->closure	     = client->bus_reset_closure;
219
	event->type          = FW_CDEV_EVENT_BUS_RESET;
220
	event->generation    = client->device->generation;
221
	event->node_id       = client->device->node_id;
222 223 224 225
	event->local_node_id = card->local_node->node_id;
	event->bm_node_id    = 0; /* FIXME: We don't track the BM. */
	event->irm_node_id   = card->irm_node->node_id;
	event->root_node_id  = card->root_node->node_id;
J
Jay Fenlason 已提交
226 227

	spin_unlock_irqrestore(&card->lock, flags);
228 229
}

230 231 232 233 234 235 236
static void
for_each_client(struct fw_device *device,
		void (*callback)(struct client *client))
{
	struct client *c;
	unsigned long flags;

J
Jay Fenlason 已提交
237
	spin_lock_irqsave(&device->client_list_lock, flags);
238 239 240 241

	list_for_each_entry(c, &device->client_list, link)
		callback(c);

J
Jay Fenlason 已提交
242
	spin_unlock_irqrestore(&device->client_list_lock, flags);
243 244
}

245 246 247 248 249
static void
queue_bus_reset_event(struct client *client)
{
	struct bus_reset *bus_reset;

250
	bus_reset = kzalloc(sizeof(*bus_reset), GFP_ATOMIC);
251 252 253 254 255
	if (bus_reset == NULL) {
		fw_notify("Out of memory when allocating bus reset event\n");
		return;
	}

256
	fill_bus_reset_event(&bus_reset->reset, client);
257 258

	queue_event(client, &bus_reset->event,
259
		    &bus_reset->reset, sizeof(bus_reset->reset), NULL, 0);
260 261 262 263
}

void fw_device_cdev_update(struct fw_device *device)
{
264 265
	for_each_client(device, queue_bus_reset_event);
}
266

267 268 269 270
static void wake_up_client(struct client *client)
{
	wake_up_interruptible(&client->wait);
}
271

272 273 274
void fw_device_cdev_remove(struct fw_device *device)
{
	for_each_client(device, wake_up_client);
275 276
}

277
static int ioctl_get_info(struct client *client, void *buffer)
278
{
279
	struct fw_cdev_get_info *get_info = buffer;
280
	struct fw_cdev_event_bus_reset bus_reset;
281
	unsigned long ret = 0;
282

283 284
	client->version = get_info->version;
	get_info->version = FW_CDEV_VERSION;
J
Jay Fenlason 已提交
285
	get_info->card = client->device->card->index;
286

287 288
	down_read(&fw_device_rwsem);

289 290 291
	if (get_info->rom != 0) {
		void __user *uptr = u64_to_uptr(get_info->rom);
		size_t want = get_info->rom_length;
292
		size_t have = client->device->config_rom_length * 4;
293

294 295
		ret = copy_to_user(uptr, client->device->config_rom,
				   min(want, have));
296
	}
297
	get_info->rom_length = client->device->config_rom_length * 4;
298

299 300 301 302 303
	up_read(&fw_device_rwsem);

	if (ret != 0)
		return -EFAULT;

304 305 306
	client->bus_reset_closure = get_info->bus_reset_closure;
	if (get_info->bus_reset != 0) {
		void __user *uptr = u64_to_uptr(get_info->bus_reset);
307

308
		fill_bus_reset_event(&bus_reset, client);
309
		if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
310 311
			return -EFAULT;
	}
312 313 314 315

	return 0;
}

316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
static void
add_client_resource(struct client *client, struct client_resource *resource)
{
	unsigned long flags;

	spin_lock_irqsave(&client->lock, flags);
	list_add_tail(&resource->link, &client->resource_list);
	resource->handle = client->resource_handle++;
	spin_unlock_irqrestore(&client->lock, flags);
}

static int
release_client_resource(struct client *client, u32 handle,
			struct client_resource **resource)
{
	struct client_resource *r;
	unsigned long flags;

	spin_lock_irqsave(&client->lock, flags);
	list_for_each_entry(r, &client->resource_list, link) {
		if (r->handle == handle) {
			list_del(&r->link);
			break;
		}
	}
	spin_unlock_irqrestore(&client->lock, flags);

	if (&r->link == &client->resource_list)
		return -EINVAL;

	if (resource)
		*resource = r;
	else
		r->release(client, r);

	return 0;
}

static void
release_transaction(struct client *client, struct client_resource *resource)
{
	struct response *response =
		container_of(resource, struct response, resource);

	fw_cancel_transaction(client->device->card, &response->transaction);
}

363 364 365 366 367 368
static void
complete_transaction(struct fw_card *card, int rcode,
		     void *payload, size_t length, void *data)
{
	struct response *response = data;
	struct client *client = response->client;
369
	unsigned long flags;
370
	struct fw_cdev_event_response *r = &response->response;
371

372 373
	if (length < r->length)
		r->length = length;
374
	if (rcode == RCODE_COMPLETE)
375
		memcpy(r->data, payload, r->length);
376

377
	spin_lock_irqsave(&client->lock, flags);
378
	list_del(&response->resource.link);
379 380
	spin_unlock_irqrestore(&client->lock, flags);

381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396
	r->type   = FW_CDEV_EVENT_RESPONSE;
	r->rcode  = rcode;

	/*
	 * In the case that sizeof(*r) doesn't align with the position of the
	 * data, and the read is short, preserve an extra copy of the data
	 * to stay compatible with a pre-2.6.27 bug.  Since the bug is harmless
	 * for short reads and some apps depended on it, this is both safe
	 * and prudent for compatibility.
	 */
	if (r->length <= sizeof(*r) - offsetof(typeof(*r), data))
		queue_event(client, &response->event, r, sizeof(*r),
			    r->data, r->length);
	else
		queue_event(client, &response->event, r, sizeof(*r) + r->length,
			    NULL, 0);
397 398
}

J
Jeff Garzik 已提交
399
static int ioctl_send_request(struct client *client, void *buffer)
400 401
{
	struct fw_device *device = client->device;
402
	struct fw_cdev_send_request *request = buffer;
403 404 405
	struct response *response;

	/* What is the biggest size we'll accept, really? */
406
	if (request->length > 4096)
407 408
		return -EINVAL;

409
	response = kmalloc(sizeof(*response) + request->length, GFP_KERNEL);
410 411 412 413
	if (response == NULL)
		return -ENOMEM;

	response->client = client;
414 415
	response->response.length = request->length;
	response->response.closure = request->closure;
416

417
	if (request->data &&
418
	    copy_from_user(response->response.data,
419
			   u64_to_uptr(request->data), request->length)) {
420 421 422 423
		kfree(response);
		return -EFAULT;
	}

424 425
	response->resource.release = release_transaction;
	add_client_resource(client, &response->resource);
426

427
	fw_send_request(device->card, &response->transaction,
428
			request->tcode & 0x1f,
429
			device->node->node_id,
430
			request->generation,
431
			device->max_speed,
432 433
			request->offset,
			response->response.data, request->length,
434 435
			complete_transaction, response);

436
	if (request->data)
437
		return sizeof(request) + request->length;
438
	else
439
		return sizeof(request);
440 441 442 443 444 445
}

struct address_handler {
	struct fw_address_handler handler;
	__u64 closure;
	struct client *client;
446
	struct client_resource resource;
447 448 449 450 451 452
};

struct request {
	struct fw_request *request;
	void *data;
	size_t length;
453
	struct client_resource resource;
454 455 456 457 458 459 460
};

struct request_event {
	struct event event;
	struct fw_cdev_event_request request;
};

461 462 463 464 465 466 467 468 469 470 471
static void
release_request(struct client *client, struct client_resource *resource)
{
	struct request *request =
		container_of(resource, struct request, resource);

	fw_send_response(client->device->card, request->request,
			 RCODE_CONFLICT_ERROR);
	kfree(request);
}

472 473 474 475 476 477 478 479 480 481 482 483
static void
handle_request(struct fw_card *card, struct fw_request *r,
	       int tcode, int destination, int source,
	       int generation, int speed,
	       unsigned long long offset,
	       void *payload, size_t length, void *callback_data)
{
	struct address_handler *handler = callback_data;
	struct request *request;
	struct request_event *e;
	struct client *client = handler->client;

484 485
	request = kmalloc(sizeof(*request), GFP_ATOMIC);
	e = kmalloc(sizeof(*e), GFP_ATOMIC);
486 487 488 489 490 491 492 493 494 495 496
	if (request == NULL || e == NULL) {
		kfree(request);
		kfree(e);
		fw_send_response(card, r, RCODE_CONFLICT_ERROR);
		return;
	}

	request->request = r;
	request->data    = payload;
	request->length  = length;

497 498
	request->resource.release = release_request;
	add_client_resource(client, &request->resource);
499 500 501 502 503

	e->request.type    = FW_CDEV_EVENT_REQUEST;
	e->request.tcode   = tcode;
	e->request.offset  = offset;
	e->request.length  = length;
504
	e->request.handle  = request->resource.handle;
505 506 507
	e->request.closure = handler->closure;

	queue_event(client, &e->event,
508
		    &e->request, sizeof(e->request), payload, length);
509 510
}

511 512 513 514 515 516 517 518 519 520 521
static void
release_address_handler(struct client *client,
			struct client_resource *resource)
{
	struct address_handler *handler =
		container_of(resource, struct address_handler, resource);

	fw_core_remove_address_handler(&handler->handler);
	kfree(handler);
}

522
static int ioctl_allocate(struct client *client, void *buffer)
523
{
524
	struct fw_cdev_allocate *request = buffer;
525 526 527
	struct address_handler *handler;
	struct fw_address_region region;

528
	handler = kmalloc(sizeof(*handler), GFP_KERNEL);
529 530 531
	if (handler == NULL)
		return -ENOMEM;

532 533 534
	region.start = request->offset;
	region.end = request->offset + request->length;
	handler->handler.length = request->length;
535 536
	handler->handler.address_callback = handle_request;
	handler->handler.callback_data = handler;
537
	handler->closure = request->closure;
538 539 540 541 542 543 544
	handler->client = client;

	if (fw_core_add_address_handler(&handler->handler, &region) < 0) {
		kfree(handler);
		return -EBUSY;
	}

545 546
	handler->resource.release = release_address_handler;
	add_client_resource(client, &handler->resource);
547
	request->handle = handler->resource.handle;
548 549 550 551

	return 0;
}

552
static int ioctl_deallocate(struct client *client, void *buffer)
553
{
554
	struct fw_cdev_deallocate *request = buffer;
555

556
	return release_client_resource(client, request->handle, NULL);
557 558
}

559
static int ioctl_send_response(struct client *client, void *buffer)
560
{
561
	struct fw_cdev_send_response *request = buffer;
562
	struct client_resource *resource;
563 564
	struct request *r;

565
	if (release_client_resource(client, request->handle, &resource) < 0)
566
		return -EINVAL;
567
	r = container_of(resource, struct request, resource);
568 569 570
	if (request->length < r->length)
		r->length = request->length;
	if (copy_from_user(r->data, u64_to_uptr(request->data), r->length))
571 572
		return -EFAULT;

573
	fw_send_response(client->device->card, r->request, request->rcode);
574 575 576 577 578
	kfree(r);

	return 0;
}

579
static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
580
{
581
	struct fw_cdev_initiate_bus_reset *request = buffer;
582 583
	int short_reset;

584
	short_reset = (request->type == FW_CDEV_SHORT_RESET);
585 586 587 588

	return fw_core_initiate_bus_reset(client->device->card, short_reset);
}

589 590
struct descriptor {
	struct fw_descriptor d;
591
	struct client_resource resource;
592 593 594
	u32 data[0];
};

595 596 597 598 599 600 601 602 603 604
static void release_descriptor(struct client *client,
			       struct client_resource *resource)
{
	struct descriptor *descriptor =
		container_of(resource, struct descriptor, resource);

	fw_core_remove_descriptor(&descriptor->d);
	kfree(descriptor);
}

605
static int ioctl_add_descriptor(struct client *client, void *buffer)
606
{
607
	struct fw_cdev_add_descriptor *request = buffer;
608 609 610
	struct descriptor *descriptor;
	int retval;

611
	if (request->length > 256)
612 613 614
		return -EINVAL;

	descriptor =
615
		kmalloc(sizeof(*descriptor) + request->length * 4, GFP_KERNEL);
616 617 618 619
	if (descriptor == NULL)
		return -ENOMEM;

	if (copy_from_user(descriptor->data,
620
			   u64_to_uptr(request->data), request->length * 4)) {
621 622 623 624
		kfree(descriptor);
		return -EFAULT;
	}

625 626 627
	descriptor->d.length = request->length;
	descriptor->d.immediate = request->immediate;
	descriptor->d.key = request->key;
628 629 630 631 632 633 634 635
	descriptor->d.data = descriptor->data;

	retval = fw_core_add_descriptor(&descriptor->d);
	if (retval < 0) {
		kfree(descriptor);
		return retval;
	}

636 637
	descriptor->resource.release = release_descriptor;
	add_client_resource(client, &descriptor->resource);
638
	request->handle = descriptor->resource.handle;
639 640 641 642

	return 0;
}

643
static int ioctl_remove_descriptor(struct client *client, void *buffer)
644
{
645
	struct fw_cdev_remove_descriptor *request = buffer;
646

647
	return release_client_resource(client, request->handle, NULL);
648 649
}

650
static void
651 652
iso_callback(struct fw_iso_context *context, u32 cycle,
	     size_t header_length, void *header, void *data)
653 654
{
	struct client *client = data;
655
	struct iso_interrupt *irq;
656

657 658
	irq = kzalloc(sizeof(*irq) + header_length, GFP_ATOMIC);
	if (irq == NULL)
659 660
		return;

661 662 663 664 665 666 667
	irq->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT;
	irq->interrupt.closure   = client->iso_closure;
	irq->interrupt.cycle     = cycle;
	irq->interrupt.header_length = header_length;
	memcpy(irq->interrupt.header, header, header_length);
	queue_event(client, &irq->event, &irq->interrupt,
		    sizeof(irq->interrupt) + header_length, NULL, 0);
668 669
}

670
static int ioctl_create_iso_context(struct client *client, void *buffer)
671
{
672
	struct fw_cdev_create_iso_context *request = buffer;
673
	struct fw_iso_context *context;
674

675 676 677 678
	/* We only support one context at this time. */
	if (client->iso_context != NULL)
		return -EBUSY;

679
	if (request->channel > 63)
680 681
		return -EINVAL;

682
	switch (request->type) {
683
	case FW_ISO_CONTEXT_RECEIVE:
684
		if (request->header_size < 4 || (request->header_size & 3))
685
			return -EINVAL;
686

687 688 689
		break;

	case FW_ISO_CONTEXT_TRANSMIT:
690
		if (request->speed > SCODE_3200)
691 692 693 694 695
			return -EINVAL;

		break;

	default:
696
		return -EINVAL;
697 698
	}

699 700 701 702 703 704 705 706 707
	context =  fw_iso_context_create(client->device->card,
					 request->type,
					 request->channel,
					 request->speed,
					 request->header_size,
					 iso_callback, client);
	if (IS_ERR(context))
		return PTR_ERR(context);

708
	client->iso_closure = request->closure;
709
	client->iso_context = context;
710

711 712 713
	/* We only support one context at this time. */
	request->handle = 0;

714 715 716
	return 0;
}

717 718 719 720
/* Macros for decoding the iso packet control header. */
#define GET_PAYLOAD_LENGTH(v)	((v) & 0xffff)
#define GET_INTERRUPT(v)	(((v) >> 16) & 0x01)
#define GET_SKIP(v)		(((v) >> 17) & 0x01)
721 722
#define GET_TAG(v)		(((v) >> 18) & 0x03)
#define GET_SY(v)		(((v) >> 20) & 0x0f)
723 724
#define GET_HEADER_LENGTH(v)	(((v) >> 24) & 0xff)

725
static int ioctl_queue_iso(struct client *client, void *buffer)
726
{
727
	struct fw_cdev_queue_iso *request = buffer;
728
	struct fw_cdev_iso_packet __user *p, *end, *next;
729
	struct fw_iso_context *ctx = client->iso_context;
730
	unsigned long payload, buffer_end, header_length;
731
	u32 control;
732 733 734 735 736 737
	int count;
	struct {
		struct fw_iso_packet packet;
		u8 header[256];
	} u;

738
	if (ctx == NULL || request->handle != 0)
739 740
		return -EINVAL;

741 742
	/*
	 * If the user passes a non-NULL data pointer, has mmap()'ed
743 744
	 * the iso buffer, and the pointer points inside the buffer,
	 * we setup the payload pointers accordingly.  Otherwise we
745
	 * set them both to 0, which will still let packets with
746 747
	 * payload_length == 0 through.  In other words, if no packets
	 * use the indirect payload, the iso buffer need not be mapped
748 749
	 * and the request->data pointer is ignored.
	 */
750

751
	payload = (unsigned long)request->data - client->vm_start;
752
	buffer_end = client->buffer.page_count << PAGE_SHIFT;
753
	if (request->data == 0 || client->buffer.pages == NULL ||
754
	    payload >= buffer_end) {
755
		payload = 0;
756
		buffer_end = 0;
757 758
	}

A
Al Viro 已提交
759 760 761
	p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets);

	if (!access_ok(VERIFY_READ, p, request->size))
762 763
		return -EFAULT;

764
	end = (void __user *)p + request->size;
765 766
	count = 0;
	while (p < end) {
767
		if (get_user(control, &p->control))
768
			return -EFAULT;
769 770 771 772 773 774
		u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
		u.packet.interrupt = GET_INTERRUPT(control);
		u.packet.skip = GET_SKIP(control);
		u.packet.tag = GET_TAG(control);
		u.packet.sy = GET_SY(control);
		u.packet.header_length = GET_HEADER_LENGTH(control);
775

776
		if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
777 778
			header_length = u.packet.header_length;
		} else {
779 780 781 782
			/*
			 * We require that header_length is a multiple of
			 * the fixed header size, ctx->header_size.
			 */
783 784 785 786
			if (ctx->header_size == 0) {
				if (u.packet.header_length > 0)
					return -EINVAL;
			} else if (u.packet.header_length % ctx->header_size != 0) {
787
				return -EINVAL;
788
			}
789 790 791
			header_length = 0;
		}

792
		next = (struct fw_cdev_iso_packet __user *)
793
			&p->header[header_length / 4];
794 795 796
		if (next > end)
			return -EINVAL;
		if (__copy_from_user
797
		    (u.packet.header, p->header, header_length))
798
			return -EFAULT;
799
		if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
800 801
		    u.packet.header_length + u.packet.payload_length > 0)
			return -EINVAL;
802
		if (payload + u.packet.payload_length > buffer_end)
803 804
			return -EINVAL;

805 806
		if (fw_iso_context_queue(ctx, &u.packet,
					 &client->buffer, payload))
807 808 809 810 811 812 813
			break;

		p = next;
		payload += u.packet.payload_length;
		count++;
	}

814 815 816
	request->size    -= uptr_to_u64(p) - request->packets;
	request->packets  = uptr_to_u64(p);
	request->data     = client->vm_start + payload;
817 818 819 820

	return count;
}

821
static int ioctl_start_iso(struct client *client, void *buffer)
822
{
823
	struct fw_cdev_start_iso *request = buffer;
824

825
	if (client->iso_context == NULL || request->handle != 0)
826
		return -EINVAL;
827

828
	if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) {
829
		if (request->tags == 0 || request->tags > 15)
830 831
			return -EINVAL;

832
		if (request->sync > 15)
833 834 835
			return -EINVAL;
	}

836 837
	return fw_iso_context_start(client->iso_context, request->cycle,
				    request->sync, request->tags);
838 839
}

840
static int ioctl_stop_iso(struct client *client, void *buffer)
841
{
842 843
	struct fw_cdev_stop_iso *request = buffer;

844
	if (client->iso_context == NULL || request->handle != 0)
845 846
		return -EINVAL;

847 848 849
	return fw_iso_context_stop(client->iso_context);
}

850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871
static int ioctl_get_cycle_timer(struct client *client, void *buffer)
{
	struct fw_cdev_get_cycle_timer *request = buffer;
	struct fw_card *card = client->device->card;
	unsigned long long bus_time;
	struct timeval tv;
	unsigned long flags;

	preempt_disable();
	local_irq_save(flags);

	bus_time = card->driver->get_bus_time(card);
	do_gettimeofday(&tv);

	local_irq_restore(flags);
	preempt_enable();

	request->local_time = tv.tv_sec * 1000000ULL + tv.tv_usec;
	request->cycle_timer = bus_time & 0xffffffff;
	return 0;
}

872 873 874 875 876 877 878 879 880 881 882 883 884
static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
	ioctl_get_info,
	ioctl_send_request,
	ioctl_allocate,
	ioctl_deallocate,
	ioctl_send_response,
	ioctl_initiate_bus_reset,
	ioctl_add_descriptor,
	ioctl_remove_descriptor,
	ioctl_create_iso_context,
	ioctl_queue_iso,
	ioctl_start_iso,
	ioctl_stop_iso,
885
	ioctl_get_cycle_timer,
886 887
};

888 889 890
static int
dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg)
{
891 892 893 894 895
	char buffer[256];
	int retval;

	if (_IOC_TYPE(cmd) != '#' ||
	    _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
896
		return -EINVAL;
897 898

	if (_IOC_DIR(cmd) & _IOC_WRITE) {
899
		if (_IOC_SIZE(cmd) > sizeof(buffer) ||
900 901 902 903 904 905 906 907 908
		    copy_from_user(buffer, arg, _IOC_SIZE(cmd)))
			return -EFAULT;
	}

	retval = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
	if (retval < 0)
		return retval;

	if (_IOC_DIR(cmd) & _IOC_READ) {
909
		if (_IOC_SIZE(cmd) > sizeof(buffer) ||
910 911
		    copy_to_user(arg, buffer, _IOC_SIZE(cmd)))
			return -EFAULT;
912
	}
913

S
Stefan Richter 已提交
914
	return retval;
915 916 917 918 919 920 921 922
}

static long
fw_device_op_ioctl(struct file *file,
		   unsigned int cmd, unsigned long arg)
{
	struct client *client = file->private_data;

923 924 925
	if (fw_device_is_shutdown(client->device))
		return -ENODEV;

926 927 928 929 930 931 932 933 934 935
	return dispatch_ioctl(client, cmd, (void __user *) arg);
}

#ifdef CONFIG_COMPAT
static long
fw_device_op_compat_ioctl(struct file *file,
			  unsigned int cmd, unsigned long arg)
{
	struct client *client = file->private_data;

936 937 938
	if (fw_device_is_shutdown(client->device))
		return -ENODEV;

939 940 941 942 943 944 945
	return dispatch_ioctl(client, cmd, compat_ptr(arg));
}
#endif

static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
{
	struct client *client = file->private_data;
946 947 948 949
	enum dma_data_direction direction;
	unsigned long size;
	int page_count, retval;

950 951 952
	if (fw_device_is_shutdown(client->device))
		return -ENODEV;

953 954 955 956 957 958
	/* FIXME: We could support multiple buffers, but we don't. */
	if (client->buffer.pages != NULL)
		return -EBUSY;

	if (!(vma->vm_flags & VM_SHARED))
		return -EINVAL;
959

960
	if (vma->vm_start & ~PAGE_MASK)
961 962 963
		return -EINVAL;

	client->vm_start = vma->vm_start;
964 965 966 967 968 969 970 971 972 973 974 975 976 977
	size = vma->vm_end - vma->vm_start;
	page_count = size >> PAGE_SHIFT;
	if (size & ~PAGE_MASK)
		return -EINVAL;

	if (vma->vm_flags & VM_WRITE)
		direction = DMA_TO_DEVICE;
	else
		direction = DMA_FROM_DEVICE;

	retval = fw_iso_buffer_init(&client->buffer, client->device->card,
				    page_count, direction);
	if (retval < 0)
		return retval;
978

979 980 981 982 983
	retval = fw_iso_buffer_map(&client->buffer, vma);
	if (retval < 0)
		fw_iso_buffer_destroy(&client->buffer, client->device->card);

	return retval;
984 985 986 987 988
}

static int fw_device_op_release(struct inode *inode, struct file *file)
{
	struct client *client = file->private_data;
989
	struct event *e, *next_e;
990
	struct client_resource *r, *next_r;
991
	unsigned long flags;
992

993 994 995
	if (client->buffer.pages)
		fw_iso_buffer_destroy(&client->buffer, client->device->card);

996 997 998
	if (client->iso_context)
		fw_iso_context_destroy(client->iso_context);

999 1000
	list_for_each_entry_safe(r, next_r, &client->resource_list, link)
		r->release(client, r);
1001

1002 1003 1004 1005
	/*
	 * FIXME: We should wait for the async tasklets to stop
	 * running before freeing the memory.
	 */
1006

1007 1008
	list_for_each_entry_safe(e, next_e, &client->event_list, link)
		kfree(e);
1009

J
Jay Fenlason 已提交
1010
	spin_lock_irqsave(&client->device->client_list_lock, flags);
1011
	list_del(&client->link);
J
Jay Fenlason 已提交
1012
	spin_unlock_irqrestore(&client->device->client_list_lock, flags);
1013

1014 1015 1016 1017 1018 1019 1020 1021 1022
	fw_device_put(client->device);
	kfree(client);

	return 0;
}

static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
{
	struct client *client = file->private_data;
1023
	unsigned int mask = 0;
1024 1025 1026

	poll_wait(file, &client->wait, pt);

1027 1028
	if (fw_device_is_shutdown(client->device))
		mask |= POLLHUP | POLLERR;
1029
	if (!list_empty(&client->event_list))
1030 1031 1032
		mask |= POLLIN | POLLRDNORM;

	return mask;
1033 1034
}

1035
const struct file_operations fw_device_ops = {
1036 1037 1038 1039 1040 1041 1042 1043 1044
	.owner		= THIS_MODULE,
	.open		= fw_device_op_open,
	.read		= fw_device_op_read,
	.unlocked_ioctl	= fw_device_op_ioctl,
	.poll		= fw_device_op_poll,
	.release	= fw_device_op_release,
	.mmap		= fw_device_op_mmap,

#ifdef CONFIG_COMPAT
1045
	.compat_ioctl	= fw_device_op_compat_ioctl,
1046 1047
#endif
};