fw-cdev.c 28.4 KB
Newer Older
1 2
/*
 * Char device for device raw access
3
 *
4
 * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software Foundation,
 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 */

#include <linux/module.h>
#include <linux/kernel.h>
23
#include <linux/kref.h>
24 25 26 27
#include <linux/wait.h>
#include <linux/errno.h>
#include <linux/device.h>
#include <linux/vmalloc.h>
28
#include <linux/mutex.h>
29
#include <linux/poll.h>
30 31
#include <linux/preempt.h>
#include <linux/time.h>
J
Jay Fenlason 已提交
32
#include <linux/spinlock.h>
33 34
#include <linux/delay.h>
#include <linux/mm.h>
35
#include <linux/idr.h>
36
#include <linux/compat.h>
37
#include <linux/firewire-cdev.h>
38
#include <asm/system.h>
39 40 41 42 43 44
#include <asm/uaccess.h>
#include "fw-transaction.h"
#include "fw-topology.h"
#include "fw-device.h"

struct client {
45
	u32 version;
46
	struct fw_device *device;
47

48
	spinlock_t lock;
49 50
	bool in_shutdown;
	struct idr resource_idr;
51 52
	struct list_head event_list;
	wait_queue_head_t wait;
53
	u64 bus_reset_closure;
54

55
	struct fw_iso_context *iso_context;
56
	u64 iso_closure;
57 58
	struct fw_iso_buffer buffer;
	unsigned long vm_start;
59 60

	struct list_head link;
61
	struct kref kref;
62 63
};

64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
static inline void client_get(struct client *client)
{
	kref_get(&client->kref);
}

static void client_release(struct kref *kref)
{
	struct client *client = container_of(kref, struct client, kref);

	fw_device_put(client->device);
	kfree(client);
}

static void client_put(struct client *client)
{
	kref_put(&client->kref, client_release);
}

82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
struct client_resource;
typedef void (*client_resource_release_fn_t)(struct client *,
					     struct client_resource *);
struct client_resource {
	client_resource_release_fn_t release;
	int handle;
};

struct address_handler_resource {
	struct client_resource resource;
	struct fw_address_handler handler;
	__u64 closure;
	struct client *client;
};

struct outbound_transaction_resource {
	struct client_resource resource;
	struct fw_transaction transaction;
};

struct inbound_transaction_resource {
	struct client_resource resource;
	struct fw_request *request;
	void *data;
	size_t length;
};

struct descriptor_resource {
	struct client_resource resource;
	struct fw_descriptor descriptor;
	u32 data[0];
};

/*
 * dequeue_event() just kfree()'s the event, so the event has to be
 * the first field in a struct XYZ_event.
 */
struct event {
	struct { void *data; size_t size; } v[2];
	struct list_head link;
};

struct bus_reset_event {
	struct event event;
	struct fw_cdev_event_bus_reset reset;
};

struct outbound_transaction_event {
	struct event event;
	struct client *client;
	struct outbound_transaction_resource r;
	struct fw_cdev_event_response response;
};

struct inbound_transaction_event {
	struct event event;
	struct fw_cdev_event_request request;
};

struct iso_interrupt_event {
	struct event event;
	struct fw_cdev_event_iso_interrupt interrupt;
};

146
static inline void __user *u64_to_uptr(__u64 value)
147 148 149 150
{
	return (void __user *)(unsigned long)value;
}

151
static inline __u64 uptr_to_u64(void __user *ptr)
152 153 154 155 156 157 158 159 160
{
	return (__u64)(unsigned long)ptr;
}

static int fw_device_op_open(struct inode *inode, struct file *file)
{
	struct fw_device *device;
	struct client *client;

161
	device = fw_device_get_by_devt(inode->i_rdev);
162 163
	if (device == NULL)
		return -ENODEV;
164

165 166 167 168 169
	if (fw_device_is_shutdown(device)) {
		fw_device_put(device);
		return -ENODEV;
	}

170
	client = kzalloc(sizeof(*client), GFP_KERNEL);
171 172
	if (client == NULL) {
		fw_device_put(device);
173
		return -ENOMEM;
174
	}
175

176
	client->device = device;
177
	spin_lock_init(&client->lock);
178 179
	idr_init(&client->resource_idr);
	INIT_LIST_HEAD(&client->event_list);
180
	init_waitqueue_head(&client->wait);
181
	kref_init(&client->kref);
182 183 184

	file->private_data = client;

185
	mutex_lock(&device->client_list_mutex);
186
	list_add_tail(&client->link, &device->client_list);
187
	mutex_unlock(&device->client_list_mutex);
188

189 190 191 192 193 194 195 196 197 198 199 200 201 202
	return 0;
}

static void queue_event(struct client *client, struct event *event,
			void *data0, size_t size0, void *data1, size_t size1)
{
	unsigned long flags;

	event->v[0].data = data0;
	event->v[0].size = size0;
	event->v[1].data = data1;
	event->v[1].size = size1;

	spin_lock_irqsave(&client->lock, flags);
203 204 205 206
	if (client->in_shutdown)
		kfree(event);
	else
		list_add_tail(&event->link, &client->event_list);
207
	spin_unlock_irqrestore(&client->lock, flags);
208 209

	wake_up_interruptible(&client->wait);
210 211
}

212 213
static int dequeue_event(struct client *client,
			 char __user *buffer, size_t count)
214 215 216 217
{
	unsigned long flags;
	struct event *event;
	size_t size, total;
218
	int i, ret;
219

220 221 222 223 224
	ret = wait_event_interruptible(client->wait,
			!list_empty(&client->event_list) ||
			fw_device_is_shutdown(client->device));
	if (ret < 0)
		return ret;
225

226 227 228
	if (list_empty(&client->event_list) &&
		       fw_device_is_shutdown(client->device))
		return -ENODEV;
229

230
	spin_lock_irqsave(&client->lock, flags);
231
	event = list_first_entry(&client->event_list, struct event, link);
232 233 234 235 236 237
	list_del(&event->link);
	spin_unlock_irqrestore(&client->lock, flags);

	total = 0;
	for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
		size = min(event->v[i].size, count - total);
238
		if (copy_to_user(buffer + total, event->v[i].data, size)) {
239
			ret = -EFAULT;
240
			goto out;
241
		}
242 243
		total += size;
	}
244
	ret = total;
245 246 247 248

 out:
	kfree(event);

249
	return ret;
250 251
}

252 253
static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
				 size_t count, loff_t *offset)
254 255 256 257 258 259
{
	struct client *client = file->private_data;

	return dequeue_event(client, buffer, count);
}

260 261
static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
				 struct client *client)
262
{
263
	struct fw_card *card = client->device->card;
J
Jay Fenlason 已提交
264 265 266
	unsigned long flags;

	spin_lock_irqsave(&card->lock, flags);
267

268
	event->closure	     = client->bus_reset_closure;
269
	event->type          = FW_CDEV_EVENT_BUS_RESET;
270
	event->generation    = client->device->generation;
271
	event->node_id       = client->device->node_id;
272 273 274 275
	event->local_node_id = card->local_node->node_id;
	event->bm_node_id    = 0; /* FIXME: We don't track the BM. */
	event->irm_node_id   = card->irm_node->node_id;
	event->root_node_id  = card->root_node->node_id;
J
Jay Fenlason 已提交
276 277

	spin_unlock_irqrestore(&card->lock, flags);
278 279
}

280 281
static void for_each_client(struct fw_device *device,
			    void (*callback)(struct client *client))
282 283 284
{
	struct client *c;

285
	mutex_lock(&device->client_list_mutex);
286 287
	list_for_each_entry(c, &device->client_list, link)
		callback(c);
288
	mutex_unlock(&device->client_list_mutex);
289 290
}

291
static void queue_bus_reset_event(struct client *client)
292
{
293
	struct bus_reset_event *e;
294

295 296
	e = kzalloc(sizeof(*e), GFP_KERNEL);
	if (e == NULL) {
297 298 299 300
		fw_notify("Out of memory when allocating bus reset event\n");
		return;
	}

301
	fill_bus_reset_event(&e->reset, client);
302

303 304
	queue_event(client, &e->event,
		    &e->reset, sizeof(e->reset), NULL, 0);
305 306 307 308
}

void fw_device_cdev_update(struct fw_device *device)
{
309 310
	for_each_client(device, queue_bus_reset_event);
}
311

312 313 314 315
static void wake_up_client(struct client *client)
{
	wake_up_interruptible(&client->wait);
}
316

317 318 319
void fw_device_cdev_remove(struct fw_device *device)
{
	for_each_client(device, wake_up_client);
320 321
}

322
static int ioctl_get_info(struct client *client, void *buffer)
323
{
324
	struct fw_cdev_get_info *get_info = buffer;
325
	struct fw_cdev_event_bus_reset bus_reset;
326
	unsigned long ret = 0;
327

328 329
	client->version = get_info->version;
	get_info->version = FW_CDEV_VERSION;
J
Jay Fenlason 已提交
330
	get_info->card = client->device->card->index;
331

332 333
	down_read(&fw_device_rwsem);

334 335 336
	if (get_info->rom != 0) {
		void __user *uptr = u64_to_uptr(get_info->rom);
		size_t want = get_info->rom_length;
337
		size_t have = client->device->config_rom_length * 4;
338

339 340
		ret = copy_to_user(uptr, client->device->config_rom,
				   min(want, have));
341
	}
342
	get_info->rom_length = client->device->config_rom_length * 4;
343

344 345 346 347 348
	up_read(&fw_device_rwsem);

	if (ret != 0)
		return -EFAULT;

349 350 351
	client->bus_reset_closure = get_info->bus_reset_closure;
	if (get_info->bus_reset != 0) {
		void __user *uptr = u64_to_uptr(get_info->bus_reset);
352

353
		fill_bus_reset_event(&bus_reset, client);
354
		if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
355 356
			return -EFAULT;
	}
357 358 359 360

	return 0;
}

361 362
static int add_client_resource(struct client *client,
			       struct client_resource *resource, gfp_t gfp_mask)
363 364
{
	unsigned long flags;
365 366 367 368 369
	int ret;

 retry:
	if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
		return -ENOMEM;
370 371

	spin_lock_irqsave(&client->lock, flags);
372 373 374 375 376
	if (client->in_shutdown)
		ret = -ECANCELED;
	else
		ret = idr_get_new(&client->resource_idr, resource,
				  &resource->handle);
377 378
	if (ret >= 0)
		client_get(client);
379
	spin_unlock_irqrestore(&client->lock, flags);
380 381 382 383 384

	if (ret == -EAGAIN)
		goto retry;

	return ret < 0 ? ret : 0;
385 386
}

387 388 389
static int release_client_resource(struct client *client, u32 handle,
				   client_resource_release_fn_t release,
				   struct client_resource **resource)
390 391 392 393 394
{
	struct client_resource *r;
	unsigned long flags;

	spin_lock_irqsave(&client->lock, flags);
395 396 397 398 399 400
	if (client->in_shutdown)
		r = NULL;
	else
		r = idr_find(&client->resource_idr, handle);
	if (r && r->release == release)
		idr_remove(&client->resource_idr, handle);
401 402
	spin_unlock_irqrestore(&client->lock, flags);

403
	if (!(r && r->release == release))
404 405 406 407 408 409 410
		return -EINVAL;

	if (resource)
		*resource = r;
	else
		r->release(client, r);

411 412
	client_put(client);

413 414 415
	return 0;
}

416 417
static void release_transaction(struct client *client,
				struct client_resource *resource)
418
{
419 420
	struct outbound_transaction_resource *r = container_of(resource,
			struct outbound_transaction_resource, resource);
421

422
	fw_cancel_transaction(client->device->card, &r->transaction);
423 424
}

425 426
static void complete_transaction(struct fw_card *card, int rcode,
				 void *payload, size_t length, void *data)
427
{
428 429 430
	struct outbound_transaction_event *e = data;
	struct fw_cdev_event_response *rsp = &e->response;
	struct client *client = e->client;
431
	unsigned long flags;
432

433 434
	if (length < rsp->length)
		rsp->length = length;
435
	if (rcode == RCODE_COMPLETE)
436
		memcpy(rsp->data, payload, rsp->length);
437

438
	spin_lock_irqsave(&client->lock, flags);
439
	/*
440 441 442 443 444 445 446 447
	 * 1. If called while in shutdown, the idr tree must be left untouched.
	 *    The idr handle will be removed and the client reference will be
	 *    dropped later.
	 * 2. If the call chain was release_client_resource ->
	 *    release_transaction -> complete_transaction (instead of a normal
	 *    conclusion of the transaction), i.e. if this resource was already
	 *    unregistered from the idr, the client reference will be dropped
	 *    by release_client_resource and we must not drop it here.
448
	 */
449
	if (!client->in_shutdown &&
450 451
	    idr_find(&client->resource_idr, e->r.resource.handle)) {
		idr_remove(&client->resource_idr, e->r.resource.handle);
452 453 454
		/* Drop the idr's reference */
		client_put(client);
	}
455 456
	spin_unlock_irqrestore(&client->lock, flags);

457 458
	rsp->type = FW_CDEV_EVENT_RESPONSE;
	rsp->rcode = rcode;
459 460

	/*
461
	 * In the case that sizeof(*rsp) doesn't align with the position of the
462 463 464 465 466
	 * data, and the read is short, preserve an extra copy of the data
	 * to stay compatible with a pre-2.6.27 bug.  Since the bug is harmless
	 * for short reads and some apps depended on it, this is both safe
	 * and prudent for compatibility.
	 */
467 468 469
	if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
		queue_event(client, &e->event, rsp, sizeof(*rsp),
			    rsp->data, rsp->length);
470
	else
471
		queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
472
			    NULL, 0);
473 474 475

	/* Drop the transaction callback's reference */
	client_put(client);
476 477
}

J
Jeff Garzik 已提交
478
static int ioctl_send_request(struct client *client, void *buffer)
479 480
{
	struct fw_device *device = client->device;
481
	struct fw_cdev_send_request *request = buffer;
482
	struct outbound_transaction_event *e;
483
	int ret;
484 485

	/* What is the biggest size we'll accept, really? */
486
	if (request->length > 4096)
487 488
		return -EINVAL;

489 490
	e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
	if (e == NULL)
491 492
		return -ENOMEM;

493 494 495
	e->client = client;
	e->response.length = request->length;
	e->response.closure = request->closure;
496

497
	if (request->data &&
498
	    copy_from_user(e->response.data,
499
			   u64_to_uptr(request->data), request->length)) {
500
		ret = -EFAULT;
501
		goto failed;
502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518
	}

	switch (request->tcode) {
	case TCODE_WRITE_QUADLET_REQUEST:
	case TCODE_WRITE_BLOCK_REQUEST:
	case TCODE_READ_QUADLET_REQUEST:
	case TCODE_READ_BLOCK_REQUEST:
	case TCODE_LOCK_MASK_SWAP:
	case TCODE_LOCK_COMPARE_SWAP:
	case TCODE_LOCK_FETCH_ADD:
	case TCODE_LOCK_LITTLE_ADD:
	case TCODE_LOCK_BOUNDED_ADD:
	case TCODE_LOCK_WRAP_ADD:
	case TCODE_LOCK_VENDOR_DEPENDENT:
		break;
	default:
		ret = -EINVAL;
519
		goto failed;
520 521
	}

522 523
	e->r.resource.release = release_transaction;
	ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
524 525
	if (ret < 0)
		goto failed;
526

527 528 529
	/* Get a reference for the transaction callback */
	client_get(client);

530
	fw_send_request(device->card, &e->r.transaction,
531
			request->tcode & 0x1f,
532
			device->node->node_id,
533
			request->generation,
534
			device->max_speed,
535
			request->offset,
536 537
			e->response.data, request->length,
			complete_transaction, e);
538

539
	if (request->data)
540
		return sizeof(request) + request->length;
541
	else
542
		return sizeof(request);
543
 failed:
544
	kfree(e);
545 546

	return ret;
547 548
}

549 550
static void release_request(struct client *client,
			    struct client_resource *resource)
551
{
552 553
	struct inbound_transaction_resource *r = container_of(resource,
			struct inbound_transaction_resource, resource);
554

555
	fw_send_response(client->device->card, r->request,
556
			 RCODE_CONFLICT_ERROR);
557
	kfree(r);
558 559
}

560
static void handle_request(struct fw_card *card, struct fw_request *request,
561 562 563 564
			   int tcode, int destination, int source,
			   int generation, int speed,
			   unsigned long long offset,
			   void *payload, size_t length, void *callback_data)
565
{
566 567 568
	struct address_handler_resource *handler = callback_data;
	struct inbound_transaction_resource *r;
	struct inbound_transaction_event *e;
569
	int ret;
570

571
	r = kmalloc(sizeof(*r), GFP_ATOMIC);
572
	e = kmalloc(sizeof(*e), GFP_ATOMIC);
573
	if (r == NULL || e == NULL)
574
		goto failed;
575

576 577 578
	r->request = request;
	r->data    = payload;
	r->length  = length;
579

580 581
	r->resource.release = release_request;
	ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
582 583
	if (ret < 0)
		goto failed;
584 585 586 587 588

	e->request.type    = FW_CDEV_EVENT_REQUEST;
	e->request.tcode   = tcode;
	e->request.offset  = offset;
	e->request.length  = length;
589
	e->request.handle  = r->resource.handle;
590 591
	e->request.closure = handler->closure;

592
	queue_event(handler->client, &e->event,
593
		    &e->request, sizeof(e->request), payload, length);
594 595 596
	return;

 failed:
597
	kfree(r);
598
	kfree(e);
599
	fw_send_response(card, request, RCODE_CONFLICT_ERROR);
600 601
}

602 603
static void release_address_handler(struct client *client,
				    struct client_resource *resource)
604
{
605 606
	struct address_handler_resource *r =
	    container_of(resource, struct address_handler_resource, resource);
607

608 609
	fw_core_remove_address_handler(&r->handler);
	kfree(r);
610 611
}

612
static int ioctl_allocate(struct client *client, void *buffer)
613
{
614
	struct fw_cdev_allocate *request = buffer;
615
	struct address_handler_resource *r;
616
	struct fw_address_region region;
617
	int ret;
618

619 620
	r = kmalloc(sizeof(*r), GFP_KERNEL);
	if (r == NULL)
621 622
		return -ENOMEM;

623 624
	region.start = request->offset;
	region.end = request->offset + request->length;
625 626 627 628 629
	r->handler.length = request->length;
	r->handler.address_callback = handle_request;
	r->handler.callback_data = r;
	r->closure = request->closure;
	r->client = client;
630

631
	ret = fw_core_add_address_handler(&r->handler, &region);
632
	if (ret < 0) {
633
		kfree(r);
634
		return ret;
635 636
	}

637 638
	r->resource.release = release_address_handler;
	ret = add_client_resource(client, &r->resource, GFP_KERNEL);
639
	if (ret < 0) {
640
		release_address_handler(client, &r->resource);
641 642
		return ret;
	}
643
	request->handle = r->resource.handle;
644 645 646 647

	return 0;
}

648
static int ioctl_deallocate(struct client *client, void *buffer)
649
{
650
	struct fw_cdev_deallocate *request = buffer;
651

652 653
	return release_client_resource(client, request->handle,
				       release_address_handler, NULL);
654 655
}

656
static int ioctl_send_response(struct client *client, void *buffer)
657
{
658
	struct fw_cdev_send_response *request = buffer;
659
	struct client_resource *resource;
660
	struct inbound_transaction_resource *r;
661

662 663
	if (release_client_resource(client, request->handle,
				    release_request, &resource) < 0)
664
		return -EINVAL;
665

666 667
	r = container_of(resource, struct inbound_transaction_resource,
			 resource);
668 669 670
	if (request->length < r->length)
		r->length = request->length;
	if (copy_from_user(r->data, u64_to_uptr(request->data), r->length))
671 672
		return -EFAULT;

673
	fw_send_response(client->device->card, r->request, request->rcode);
674 675 676 677 678
	kfree(r);

	return 0;
}

679
static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
680
{
681
	struct fw_cdev_initiate_bus_reset *request = buffer;
682 683
	int short_reset;

684
	short_reset = (request->type == FW_CDEV_SHORT_RESET);
685 686 687 688

	return fw_core_initiate_bus_reset(client->device->card, short_reset);
}

689 690 691
static void release_descriptor(struct client *client,
			       struct client_resource *resource)
{
692 693
	struct descriptor_resource *r =
		container_of(resource, struct descriptor_resource, resource);
694

695 696
	fw_core_remove_descriptor(&r->descriptor);
	kfree(r);
697 698
}

699
static int ioctl_add_descriptor(struct client *client, void *buffer)
700
{
701
	struct fw_cdev_add_descriptor *request = buffer;
702
	struct descriptor_resource *r;
703
	int ret;
704

705
	if (request->length > 256)
706 707
		return -EINVAL;

708 709
	r = kmalloc(sizeof(*r) + request->length * 4, GFP_KERNEL);
	if (r == NULL)
710 711
		return -ENOMEM;

712
	if (copy_from_user(r->data,
713
			   u64_to_uptr(request->data), request->length * 4)) {
714 715
		ret = -EFAULT;
		goto failed;
716 717
	}

718 719 720 721
	r->descriptor.length    = request->length;
	r->descriptor.immediate = request->immediate;
	r->descriptor.key       = request->key;
	r->descriptor.data      = r->data;
722

723
	ret = fw_core_add_descriptor(&r->descriptor);
724 725
	if (ret < 0)
		goto failed;
726

727 728
	r->resource.release = release_descriptor;
	ret = add_client_resource(client, &r->resource, GFP_KERNEL);
729
	if (ret < 0) {
730
		fw_core_remove_descriptor(&r->descriptor);
731 732
		goto failed;
	}
733
	request->handle = r->resource.handle;
734 735

	return 0;
736
 failed:
737
	kfree(r);
738 739

	return ret;
740 741
}

742
static int ioctl_remove_descriptor(struct client *client, void *buffer)
743
{
744
	struct fw_cdev_remove_descriptor *request = buffer;
745

746 747
	return release_client_resource(client, request->handle,
				       release_descriptor, NULL);
748 749
}

750 751
static void iso_callback(struct fw_iso_context *context, u32 cycle,
			 size_t header_length, void *header, void *data)
752 753
{
	struct client *client = data;
754
	struct iso_interrupt_event *e;
755

756 757
	e = kzalloc(sizeof(*e) + header_length, GFP_ATOMIC);
	if (e == NULL)
758 759
		return;

760 761 762 763 764 765 766
	e->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT;
	e->interrupt.closure   = client->iso_closure;
	e->interrupt.cycle     = cycle;
	e->interrupt.header_length = header_length;
	memcpy(e->interrupt.header, header, header_length);
	queue_event(client, &e->event, &e->interrupt,
		    sizeof(e->interrupt) + header_length, NULL, 0);
767 768
}

769
static int ioctl_create_iso_context(struct client *client, void *buffer)
770
{
771
	struct fw_cdev_create_iso_context *request = buffer;
772
	struct fw_iso_context *context;
773

774 775 776 777
	/* We only support one context at this time. */
	if (client->iso_context != NULL)
		return -EBUSY;

778
	if (request->channel > 63)
779 780
		return -EINVAL;

781
	switch (request->type) {
782
	case FW_ISO_CONTEXT_RECEIVE:
783
		if (request->header_size < 4 || (request->header_size & 3))
784
			return -EINVAL;
785

786 787 788
		break;

	case FW_ISO_CONTEXT_TRANSMIT:
789
		if (request->speed > SCODE_3200)
790 791 792 793 794
			return -EINVAL;

		break;

	default:
795
		return -EINVAL;
796 797
	}

798 799 800 801 802 803 804 805 806
	context =  fw_iso_context_create(client->device->card,
					 request->type,
					 request->channel,
					 request->speed,
					 request->header_size,
					 iso_callback, client);
	if (IS_ERR(context))
		return PTR_ERR(context);

807
	client->iso_closure = request->closure;
808
	client->iso_context = context;
809

810 811 812
	/* We only support one context at this time. */
	request->handle = 0;

813 814 815
	return 0;
}

816 817 818 819
/* Macros for decoding the iso packet control header. */
#define GET_PAYLOAD_LENGTH(v)	((v) & 0xffff)
#define GET_INTERRUPT(v)	(((v) >> 16) & 0x01)
#define GET_SKIP(v)		(((v) >> 17) & 0x01)
820 821
#define GET_TAG(v)		(((v) >> 18) & 0x03)
#define GET_SY(v)		(((v) >> 20) & 0x0f)
822 823
#define GET_HEADER_LENGTH(v)	(((v) >> 24) & 0xff)

824
static int ioctl_queue_iso(struct client *client, void *buffer)
825
{
826
	struct fw_cdev_queue_iso *request = buffer;
827
	struct fw_cdev_iso_packet __user *p, *end, *next;
828
	struct fw_iso_context *ctx = client->iso_context;
829
	unsigned long payload, buffer_end, header_length;
830
	u32 control;
831 832 833 834 835 836
	int count;
	struct {
		struct fw_iso_packet packet;
		u8 header[256];
	} u;

837
	if (ctx == NULL || request->handle != 0)
838 839
		return -EINVAL;

840 841
	/*
	 * If the user passes a non-NULL data pointer, has mmap()'ed
842 843
	 * the iso buffer, and the pointer points inside the buffer,
	 * we setup the payload pointers accordingly.  Otherwise we
844
	 * set them both to 0, which will still let packets with
845 846
	 * payload_length == 0 through.  In other words, if no packets
	 * use the indirect payload, the iso buffer need not be mapped
847 848
	 * and the request->data pointer is ignored.
	 */
849

850
	payload = (unsigned long)request->data - client->vm_start;
851
	buffer_end = client->buffer.page_count << PAGE_SHIFT;
852
	if (request->data == 0 || client->buffer.pages == NULL ||
853
	    payload >= buffer_end) {
854
		payload = 0;
855
		buffer_end = 0;
856 857
	}

A
Al Viro 已提交
858 859 860
	p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets);

	if (!access_ok(VERIFY_READ, p, request->size))
861 862
		return -EFAULT;

863
	end = (void __user *)p + request->size;
864 865
	count = 0;
	while (p < end) {
866
		if (get_user(control, &p->control))
867
			return -EFAULT;
868 869 870 871 872 873
		u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
		u.packet.interrupt = GET_INTERRUPT(control);
		u.packet.skip = GET_SKIP(control);
		u.packet.tag = GET_TAG(control);
		u.packet.sy = GET_SY(control);
		u.packet.header_length = GET_HEADER_LENGTH(control);
874

875
		if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
876 877
			header_length = u.packet.header_length;
		} else {
878 879 880 881
			/*
			 * We require that header_length is a multiple of
			 * the fixed header size, ctx->header_size.
			 */
882 883 884 885
			if (ctx->header_size == 0) {
				if (u.packet.header_length > 0)
					return -EINVAL;
			} else if (u.packet.header_length % ctx->header_size != 0) {
886
				return -EINVAL;
887
			}
888 889 890
			header_length = 0;
		}

891
		next = (struct fw_cdev_iso_packet __user *)
892
			&p->header[header_length / 4];
893 894 895
		if (next > end)
			return -EINVAL;
		if (__copy_from_user
896
		    (u.packet.header, p->header, header_length))
897
			return -EFAULT;
898
		if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
899 900
		    u.packet.header_length + u.packet.payload_length > 0)
			return -EINVAL;
901
		if (payload + u.packet.payload_length > buffer_end)
902 903
			return -EINVAL;

904 905
		if (fw_iso_context_queue(ctx, &u.packet,
					 &client->buffer, payload))
906 907 908 909 910 911 912
			break;

		p = next;
		payload += u.packet.payload_length;
		count++;
	}

913 914 915
	request->size    -= uptr_to_u64(p) - request->packets;
	request->packets  = uptr_to_u64(p);
	request->data     = client->vm_start + payload;
916 917 918 919

	return count;
}

920
static int ioctl_start_iso(struct client *client, void *buffer)
921
{
922
	struct fw_cdev_start_iso *request = buffer;
923

924
	if (client->iso_context == NULL || request->handle != 0)
925
		return -EINVAL;
926

927
	if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) {
928
		if (request->tags == 0 || request->tags > 15)
929 930
			return -EINVAL;

931
		if (request->sync > 15)
932 933 934
			return -EINVAL;
	}

935 936
	return fw_iso_context_start(client->iso_context, request->cycle,
				    request->sync, request->tags);
937 938
}

939
static int ioctl_stop_iso(struct client *client, void *buffer)
940
{
941 942
	struct fw_cdev_stop_iso *request = buffer;

943
	if (client->iso_context == NULL || request->handle != 0)
944 945
		return -EINVAL;

946 947 948
	return fw_iso_context_stop(client->iso_context);
}

949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970
static int ioctl_get_cycle_timer(struct client *client, void *buffer)
{
	struct fw_cdev_get_cycle_timer *request = buffer;
	struct fw_card *card = client->device->card;
	unsigned long long bus_time;
	struct timeval tv;
	unsigned long flags;

	preempt_disable();
	local_irq_save(flags);

	bus_time = card->driver->get_bus_time(card);
	do_gettimeofday(&tv);

	local_irq_restore(flags);
	preempt_enable();

	request->local_time = tv.tv_sec * 1000000ULL + tv.tv_usec;
	request->cycle_timer = bus_time & 0xffffffff;
	return 0;
}

971 972 973 974 975 976 977 978 979 980 981 982 983
static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
	ioctl_get_info,
	ioctl_send_request,
	ioctl_allocate,
	ioctl_deallocate,
	ioctl_send_response,
	ioctl_initiate_bus_reset,
	ioctl_add_descriptor,
	ioctl_remove_descriptor,
	ioctl_create_iso_context,
	ioctl_queue_iso,
	ioctl_start_iso,
	ioctl_stop_iso,
984
	ioctl_get_cycle_timer,
985 986
};

987 988
static int dispatch_ioctl(struct client *client,
			  unsigned int cmd, void __user *arg)
989
{
990
	char buffer[256];
991
	int ret;
992 993 994

	if (_IOC_TYPE(cmd) != '#' ||
	    _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
995
		return -EINVAL;
996 997

	if (_IOC_DIR(cmd) & _IOC_WRITE) {
998
		if (_IOC_SIZE(cmd) > sizeof(buffer) ||
999 1000 1001 1002
		    copy_from_user(buffer, arg, _IOC_SIZE(cmd)))
			return -EFAULT;
	}

1003 1004 1005
	ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
	if (ret < 0)
		return ret;
1006 1007

	if (_IOC_DIR(cmd) & _IOC_READ) {
1008
		if (_IOC_SIZE(cmd) > sizeof(buffer) ||
1009 1010
		    copy_to_user(arg, buffer, _IOC_SIZE(cmd)))
			return -EFAULT;
1011
	}
1012

1013
	return ret;
1014 1015
}

1016 1017
static long fw_device_op_ioctl(struct file *file,
			       unsigned int cmd, unsigned long arg)
1018 1019 1020
{
	struct client *client = file->private_data;

1021 1022 1023
	if (fw_device_is_shutdown(client->device))
		return -ENODEV;

1024 1025 1026 1027
	return dispatch_ioctl(client, cmd, (void __user *) arg);
}

#ifdef CONFIG_COMPAT
1028 1029
static long fw_device_op_compat_ioctl(struct file *file,
				      unsigned int cmd, unsigned long arg)
1030 1031 1032
{
	struct client *client = file->private_data;

1033 1034 1035
	if (fw_device_is_shutdown(client->device))
		return -ENODEV;

1036 1037 1038 1039 1040 1041 1042
	return dispatch_ioctl(client, cmd, compat_ptr(arg));
}
#endif

static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
{
	struct client *client = file->private_data;
1043 1044
	enum dma_data_direction direction;
	unsigned long size;
1045
	int page_count, ret;
1046

1047 1048 1049
	if (fw_device_is_shutdown(client->device))
		return -ENODEV;

1050 1051 1052 1053 1054 1055
	/* FIXME: We could support multiple buffers, but we don't. */
	if (client->buffer.pages != NULL)
		return -EBUSY;

	if (!(vma->vm_flags & VM_SHARED))
		return -EINVAL;
1056

1057
	if (vma->vm_start & ~PAGE_MASK)
1058 1059 1060
		return -EINVAL;

	client->vm_start = vma->vm_start;
1061 1062 1063 1064 1065 1066 1067 1068 1069 1070
	size = vma->vm_end - vma->vm_start;
	page_count = size >> PAGE_SHIFT;
	if (size & ~PAGE_MASK)
		return -EINVAL;

	if (vma->vm_flags & VM_WRITE)
		direction = DMA_TO_DEVICE;
	else
		direction = DMA_FROM_DEVICE;

1071 1072 1073 1074
	ret = fw_iso_buffer_init(&client->buffer, client->device->card,
				 page_count, direction);
	if (ret < 0)
		return ret;
1075

1076 1077
	ret = fw_iso_buffer_map(&client->buffer, vma);
	if (ret < 0)
1078 1079
		fw_iso_buffer_destroy(&client->buffer, client->device->card);

1080
	return ret;
1081 1082
}

1083 1084 1085 1086 1087 1088
static int shutdown_resource(int id, void *p, void *data)
{
	struct client_resource *r = p;
	struct client *client = data;

	r->release(client, r);
1089
	client_put(client);
1090 1091 1092 1093

	return 0;
}

1094 1095 1096
static int fw_device_op_release(struct inode *inode, struct file *file)
{
	struct client *client = file->private_data;
1097
	struct event *e, *next_e;
1098
	unsigned long flags;
1099

1100 1101 1102 1103
	mutex_lock(&client->device->client_list_mutex);
	list_del(&client->link);
	mutex_unlock(&client->device->client_list_mutex);

1104 1105 1106
	if (client->buffer.pages)
		fw_iso_buffer_destroy(&client->buffer, client->device->card);

1107 1108 1109
	if (client->iso_context)
		fw_iso_context_destroy(client->iso_context);

1110 1111 1112 1113
	/* Freeze client->resource_idr and client->event_list */
	spin_lock_irqsave(&client->lock, flags);
	client->in_shutdown = true;
	spin_unlock_irqrestore(&client->lock, flags);
1114

1115 1116 1117
	idr_for_each(&client->resource_idr, shutdown_resource, client);
	idr_remove_all(&client->resource_idr);
	idr_destroy(&client->resource_idr);
1118

1119 1120
	list_for_each_entry_safe(e, next_e, &client->event_list, link)
		kfree(e);
1121

1122
	client_put(client);
1123 1124 1125 1126 1127 1128 1129

	return 0;
}

static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
{
	struct client *client = file->private_data;
1130
	unsigned int mask = 0;
1131 1132 1133

	poll_wait(file, &client->wait, pt);

1134 1135
	if (fw_device_is_shutdown(client->device))
		mask |= POLLHUP | POLLERR;
1136
	if (!list_empty(&client->event_list))
1137 1138 1139
		mask |= POLLIN | POLLRDNORM;

	return mask;
1140 1141
}

1142
const struct file_operations fw_device_ops = {
1143 1144 1145 1146 1147 1148 1149 1150 1151
	.owner		= THIS_MODULE,
	.open		= fw_device_op_open,
	.read		= fw_device_op_read,
	.unlocked_ioctl	= fw_device_op_ioctl,
	.poll		= fw_device_op_poll,
	.release	= fw_device_op_release,
	.mmap		= fw_device_op_mmap,

#ifdef CONFIG_COMPAT
1152
	.compat_ioctl	= fw_device_op_compat_ioctl,
1153 1154
#endif
};