core-cdev.c 43.8 KB
Newer Older
1 2
/*
 * Char device for device raw access
3
 *
4
 * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software Foundation,
 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 */

21
#include <linux/bug.h>
S
Stefan Richter 已提交
22 23 24 25
#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
26
#include <linux/firewire.h>
S
Stefan Richter 已提交
27 28
#include <linux/firewire-cdev.h>
#include <linux/idr.h>
29
#include <linux/irqflags.h>
30
#include <linux/jiffies.h>
31
#include <linux/kernel.h>
32
#include <linux/kref.h>
S
Stefan Richter 已提交
33 34
#include <linux/mm.h>
#include <linux/module.h>
35
#include <linux/mutex.h>
36
#include <linux/poll.h>
37
#include <linux/sched.h>
J
Jay Fenlason 已提交
38
#include <linux/spinlock.h>
39
#include <linux/string.h>
S
Stefan Richter 已提交
40
#include <linux/time.h>
41
#include <linux/uaccess.h>
S
Stefan Richter 已提交
42 43
#include <linux/vmalloc.h>
#include <linux/wait.h>
44
#include <linux/workqueue.h>
S
Stefan Richter 已提交
45

46
#include <asm/system.h>
S
Stefan Richter 已提交
47

48
#include "core.h"
49

50 51 52
/*
 * ABI version history is documented in linux/firewire-cdev.h.
 */
53 54 55
#define FW_CDEV_KERNEL_VERSION			4
#define FW_CDEV_VERSION_EVENT_REQUEST2		4
#define FW_CDEV_VERSION_ALLOCATE_REGION_END	4
56

57
struct client {
58
	u32 version;
59
	struct fw_device *device;
60

61
	spinlock_t lock;
62 63
	bool in_shutdown;
	struct idr resource_idr;
64 65
	struct list_head event_list;
	wait_queue_head_t wait;
66
	u64 bus_reset_closure;
67

68
	struct fw_iso_context *iso_context;
69
	u64 iso_closure;
70 71
	struct fw_iso_buffer buffer;
	unsigned long vm_start;
72

73 74 75
	struct list_head phy_receiver_link;
	u64 phy_receiver_closure;

76
	struct list_head link;
77
	struct kref kref;
78 79
};

80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
static inline void client_get(struct client *client)
{
	kref_get(&client->kref);
}

static void client_release(struct kref *kref)
{
	struct client *client = container_of(kref, struct client, kref);

	fw_device_put(client->device);
	kfree(client);
}

static void client_put(struct client *client)
{
	kref_put(&client->kref, client_release);
}

98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
struct client_resource;
typedef void (*client_resource_release_fn_t)(struct client *,
					     struct client_resource *);
struct client_resource {
	client_resource_release_fn_t release;
	int handle;
};

struct address_handler_resource {
	struct client_resource resource;
	struct fw_address_handler handler;
	__u64 closure;
	struct client *client;
};

struct outbound_transaction_resource {
	struct client_resource resource;
	struct fw_transaction transaction;
};

struct inbound_transaction_resource {
	struct client_resource resource;
120
	struct fw_card *card;
121 122 123 124 125 126 127 128 129 130 131
	struct fw_request *request;
	void *data;
	size_t length;
};

struct descriptor_resource {
	struct client_resource resource;
	struct fw_descriptor descriptor;
	u32 data[0];
};

132 133 134 135 136
struct iso_resource {
	struct client_resource resource;
	struct client *client;
	/* Schedule work and access todo only with client->lock held. */
	struct delayed_work work;
137 138
	enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
	      ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
139 140 141
	int generation;
	u64 channels;
	s32 bandwidth;
142
	__be32 transaction_data[2];
143 144 145 146 147
	struct iso_resource_event *e_alloc, *e_dealloc;
};

static void release_iso_resource(struct client *, struct client_resource *);

148 149 150 151 152 153 154 155 156 157 158 159 160 161
static void schedule_iso_resource(struct iso_resource *r, unsigned long delay)
{
	client_get(r->client);
	if (!schedule_delayed_work(&r->work, delay))
		client_put(r->client);
}

static void schedule_if_iso_resource(struct client_resource *resource)
{
	if (resource->release == release_iso_resource)
		schedule_iso_resource(container_of(resource,
					struct iso_resource, resource), 0);
}

162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
/*
 * dequeue_event() just kfree()'s the event, so the event has to be
 * the first field in a struct XYZ_event.
 */
struct event {
	struct { void *data; size_t size; } v[2];
	struct list_head link;
};

struct bus_reset_event {
	struct event event;
	struct fw_cdev_event_bus_reset reset;
};

struct outbound_transaction_event {
	struct event event;
	struct client *client;
	struct outbound_transaction_resource r;
	struct fw_cdev_event_response response;
};

struct inbound_transaction_event {
	struct event event;
185 186 187 188
	union {
		struct fw_cdev_event_request request;
		struct fw_cdev_event_request2 request2;
	} req;
189 190 191 192 193 194 195
};

struct iso_interrupt_event {
	struct event event;
	struct fw_cdev_event_iso_interrupt interrupt;
};

196 197
struct iso_resource_event {
	struct event event;
198
	struct fw_cdev_event_iso_resource iso_resource;
199 200
};

201 202 203 204 205 206 207
struct outbound_phy_packet_event {
	struct event event;
	struct client *client;
	struct fw_packet p;
	struct fw_cdev_event_phy_packet phy_packet;
};

208 209 210 211 212
struct inbound_phy_packet_event {
	struct event event;
	struct fw_cdev_event_phy_packet phy_packet;
};

213
static inline void __user *u64_to_uptr(__u64 value)
214 215 216 217
{
	return (void __user *)(unsigned long)value;
}

218
static inline __u64 uptr_to_u64(void __user *ptr)
219 220 221 222 223 224 225 226 227
{
	return (__u64)(unsigned long)ptr;
}

static int fw_device_op_open(struct inode *inode, struct file *file)
{
	struct fw_device *device;
	struct client *client;

228
	device = fw_device_get_by_devt(inode->i_rdev);
229 230
	if (device == NULL)
		return -ENODEV;
231

232 233 234 235 236
	if (fw_device_is_shutdown(device)) {
		fw_device_put(device);
		return -ENODEV;
	}

237
	client = kzalloc(sizeof(*client), GFP_KERNEL);
238 239
	if (client == NULL) {
		fw_device_put(device);
240
		return -ENOMEM;
241
	}
242

243
	client->device = device;
244
	spin_lock_init(&client->lock);
245 246
	idr_init(&client->resource_idr);
	INIT_LIST_HEAD(&client->event_list);
247
	init_waitqueue_head(&client->wait);
248
	INIT_LIST_HEAD(&client->phy_receiver_link);
249
	kref_init(&client->kref);
250 251 252

	file->private_data = client;

253
	mutex_lock(&device->client_list_mutex);
254
	list_add_tail(&client->link, &device->client_list);
255
	mutex_unlock(&device->client_list_mutex);
256

257
	return nonseekable_open(inode, file);
258 259 260 261 262 263 264 265 266 267 268 269 270
}

static void queue_event(struct client *client, struct event *event,
			void *data0, size_t size0, void *data1, size_t size1)
{
	unsigned long flags;

	event->v[0].data = data0;
	event->v[0].size = size0;
	event->v[1].data = data1;
	event->v[1].size = size1;

	spin_lock_irqsave(&client->lock, flags);
271 272 273 274
	if (client->in_shutdown)
		kfree(event);
	else
		list_add_tail(&event->link, &client->event_list);
275
	spin_unlock_irqrestore(&client->lock, flags);
276 277

	wake_up_interruptible(&client->wait);
278 279
}

280 281
static int dequeue_event(struct client *client,
			 char __user *buffer, size_t count)
282 283 284
{
	struct event *event;
	size_t size, total;
285
	int i, ret;
286

287 288 289 290 291
	ret = wait_event_interruptible(client->wait,
			!list_empty(&client->event_list) ||
			fw_device_is_shutdown(client->device));
	if (ret < 0)
		return ret;
292

293 294 295
	if (list_empty(&client->event_list) &&
		       fw_device_is_shutdown(client->device))
		return -ENODEV;
296

297
	spin_lock_irq(&client->lock);
298
	event = list_first_entry(&client->event_list, struct event, link);
299
	list_del(&event->link);
300
	spin_unlock_irq(&client->lock);
301 302 303 304

	total = 0;
	for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
		size = min(event->v[i].size, count - total);
305
		if (copy_to_user(buffer + total, event->v[i].data, size)) {
306
			ret = -EFAULT;
307
			goto out;
308
		}
309 310
		total += size;
	}
311
	ret = total;
312 313 314 315

 out:
	kfree(event);

316
	return ret;
317 318
}

319 320
static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
				 size_t count, loff_t *offset)
321 322 323 324 325 326
{
	struct client *client = file->private_data;

	return dequeue_event(client, buffer, count);
}

327 328
static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
				 struct client *client)
329
{
330
	struct fw_card *card = client->device->card;
J
Jay Fenlason 已提交
331

332
	spin_lock_irq(&card->lock);
333

334
	event->closure	     = client->bus_reset_closure;
335
	event->type          = FW_CDEV_EVENT_BUS_RESET;
336
	event->generation    = client->device->generation;
337
	event->node_id       = client->device->node_id;
338
	event->local_node_id = card->local_node->node_id;
339
	event->bm_node_id    = card->bm_node_id;
340 341
	event->irm_node_id   = card->irm_node->node_id;
	event->root_node_id  = card->root_node->node_id;
J
Jay Fenlason 已提交
342

343
	spin_unlock_irq(&card->lock);
344 345
}

346 347
static void for_each_client(struct fw_device *device,
			    void (*callback)(struct client *client))
348 349 350
{
	struct client *c;

351
	mutex_lock(&device->client_list_mutex);
352 353
	list_for_each_entry(c, &device->client_list, link)
		callback(c);
354
	mutex_unlock(&device->client_list_mutex);
355 356
}

357 358
static int schedule_reallocations(int id, void *p, void *data)
{
359
	schedule_if_iso_resource(p);
360 361 362 363

	return 0;
}

364
static void queue_bus_reset_event(struct client *client)
365
{
366
	struct bus_reset_event *e;
367

368 369
	e = kzalloc(sizeof(*e), GFP_KERNEL);
	if (e == NULL) {
370
		fw_notify("Out of memory when allocating event\n");
371 372 373
		return;
	}

374
	fill_bus_reset_event(&e->reset, client);
375

376 377
	queue_event(client, &e->event,
		    &e->reset, sizeof(e->reset), NULL, 0);
378 379 380 381

	spin_lock_irq(&client->lock);
	idr_for_each(&client->resource_idr, schedule_reallocations, client);
	spin_unlock_irq(&client->lock);
382 383 384 385
}

void fw_device_cdev_update(struct fw_device *device)
{
386 387
	for_each_client(device, queue_bus_reset_event);
}
388

389 390 391 392
static void wake_up_client(struct client *client)
{
	wake_up_interruptible(&client->wait);
}
393

394 395 396
void fw_device_cdev_remove(struct fw_device *device)
{
	for_each_client(device, wake_up_client);
397 398
}

399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415
union ioctl_arg {
	struct fw_cdev_get_info			get_info;
	struct fw_cdev_send_request		send_request;
	struct fw_cdev_allocate			allocate;
	struct fw_cdev_deallocate		deallocate;
	struct fw_cdev_send_response		send_response;
	struct fw_cdev_initiate_bus_reset	initiate_bus_reset;
	struct fw_cdev_add_descriptor		add_descriptor;
	struct fw_cdev_remove_descriptor	remove_descriptor;
	struct fw_cdev_create_iso_context	create_iso_context;
	struct fw_cdev_queue_iso		queue_iso;
	struct fw_cdev_start_iso		start_iso;
	struct fw_cdev_stop_iso			stop_iso;
	struct fw_cdev_get_cycle_timer		get_cycle_timer;
	struct fw_cdev_allocate_iso_resource	allocate_iso_resource;
	struct fw_cdev_send_stream_packet	send_stream_packet;
	struct fw_cdev_get_cycle_timer2		get_cycle_timer2;
416
	struct fw_cdev_send_phy_packet		send_phy_packet;
417
	struct fw_cdev_receive_phy_packets	receive_phy_packets;
418 419 420
};

static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
421
{
422
	struct fw_cdev_get_info *a = &arg->get_info;
423
	struct fw_cdev_event_bus_reset bus_reset;
424
	unsigned long ret = 0;
425

426
	client->version = a->version;
427
	a->version = FW_CDEV_KERNEL_VERSION;
428
	a->card = client->device->card->index;
429

430 431
	down_read(&fw_device_rwsem);

432 433
	if (a->rom != 0) {
		size_t want = a->rom_length;
434
		size_t have = client->device->config_rom_length * 4;
435

436 437
		ret = copy_to_user(u64_to_uptr(a->rom),
				   client->device->config_rom, min(want, have));
438
	}
439
	a->rom_length = client->device->config_rom_length * 4;
440

441 442 443 444 445
	up_read(&fw_device_rwsem);

	if (ret != 0)
		return -EFAULT;

446 447
	client->bus_reset_closure = a->bus_reset_closure;
	if (a->bus_reset != 0) {
448
		fill_bus_reset_event(&bus_reset, client);
449 450
		if (copy_to_user(u64_to_uptr(a->bus_reset),
				 &bus_reset, sizeof(bus_reset)))
451 452
			return -EFAULT;
	}
453 454 455 456

	return 0;
}

457 458
static int add_client_resource(struct client *client,
			       struct client_resource *resource, gfp_t gfp_mask)
459 460
{
	unsigned long flags;
461 462 463 464 465
	int ret;

 retry:
	if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
		return -ENOMEM;
466 467

	spin_lock_irqsave(&client->lock, flags);
468 469 470 471 472
	if (client->in_shutdown)
		ret = -ECANCELED;
	else
		ret = idr_get_new(&client->resource_idr, resource,
				  &resource->handle);
473
	if (ret >= 0) {
474
		client_get(client);
475
		schedule_if_iso_resource(resource);
476
	}
477
	spin_unlock_irqrestore(&client->lock, flags);
478 479 480 481 482

	if (ret == -EAGAIN)
		goto retry;

	return ret < 0 ? ret : 0;
483 484
}

485 486
static int release_client_resource(struct client *client, u32 handle,
				   client_resource_release_fn_t release,
487
				   struct client_resource **return_resource)
488
{
489
	struct client_resource *resource;
490

491
	spin_lock_irq(&client->lock);
492
	if (client->in_shutdown)
493
		resource = NULL;
494
	else
495 496
		resource = idr_find(&client->resource_idr, handle);
	if (resource && resource->release == release)
497
		idr_remove(&client->resource_idr, handle);
498
	spin_unlock_irq(&client->lock);
499

500
	if (!(resource && resource->release == release))
501 502
		return -EINVAL;

503 504
	if (return_resource)
		*return_resource = resource;
505
	else
506
		resource->release(client, resource);
507

508 509
	client_put(client);

510 511 512
	return 0;
}

513 514
static void release_transaction(struct client *client,
				struct client_resource *resource)
515
{
516 517
	struct outbound_transaction_resource *r = container_of(resource,
			struct outbound_transaction_resource, resource);
518

519
	fw_cancel_transaction(client->device->card, &r->transaction);
520 521
}

522 523
static void complete_transaction(struct fw_card *card, int rcode,
				 void *payload, size_t length, void *data)
524
{
525 526 527
	struct outbound_transaction_event *e = data;
	struct fw_cdev_event_response *rsp = &e->response;
	struct client *client = e->client;
528
	unsigned long flags;
529

530 531
	if (length < rsp->length)
		rsp->length = length;
532
	if (rcode == RCODE_COMPLETE)
533
		memcpy(rsp->data, payload, rsp->length);
534

535
	spin_lock_irqsave(&client->lock, flags);
536
	/*
537 538 539 540 541 542 543 544
	 * 1. If called while in shutdown, the idr tree must be left untouched.
	 *    The idr handle will be removed and the client reference will be
	 *    dropped later.
	 * 2. If the call chain was release_client_resource ->
	 *    release_transaction -> complete_transaction (instead of a normal
	 *    conclusion of the transaction), i.e. if this resource was already
	 *    unregistered from the idr, the client reference will be dropped
	 *    by release_client_resource and we must not drop it here.
545
	 */
546
	if (!client->in_shutdown &&
547 548
	    idr_find(&client->resource_idr, e->r.resource.handle)) {
		idr_remove(&client->resource_idr, e->r.resource.handle);
549 550 551
		/* Drop the idr's reference */
		client_put(client);
	}
552 553
	spin_unlock_irqrestore(&client->lock, flags);

554 555
	rsp->type = FW_CDEV_EVENT_RESPONSE;
	rsp->rcode = rcode;
556 557

	/*
558
	 * In the case that sizeof(*rsp) doesn't align with the position of the
559 560 561 562 563
	 * data, and the read is short, preserve an extra copy of the data
	 * to stay compatible with a pre-2.6.27 bug.  Since the bug is harmless
	 * for short reads and some apps depended on it, this is both safe
	 * and prudent for compatibility.
	 */
564 565 566
	if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
		queue_event(client, &e->event, rsp, sizeof(*rsp),
			    rsp->data, rsp->length);
567
	else
568
		queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
569
			    NULL, 0);
570 571 572

	/* Drop the transaction callback's reference */
	client_put(client);
573 574
}

575 576 577
static int init_request(struct client *client,
			struct fw_cdev_send_request *request,
			int destination_id, int speed)
578
{
579
	struct outbound_transaction_event *e;
580
	int ret;
581

582 583
	if (request->tcode != TCODE_STREAM_DATA &&
	    (request->length > 4096 || request->length > 512 << speed))
584
		return -EIO;
585

586 587 588 589
	if (request->tcode == TCODE_WRITE_QUADLET_REQUEST &&
	    request->length < 4)
		return -EINVAL;

590 591
	e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
	if (e == NULL)
592 593
		return -ENOMEM;

594 595 596
	e->client = client;
	e->response.length = request->length;
	e->response.closure = request->closure;
597

598
	if (request->data &&
599
	    copy_from_user(e->response.data,
600
			   u64_to_uptr(request->data), request->length)) {
601
		ret = -EFAULT;
602
		goto failed;
603 604
	}

605 606
	e->r.resource.release = release_transaction;
	ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
607 608
	if (ret < 0)
		goto failed;
609

610 611 612
	/* Get a reference for the transaction callback */
	client_get(client);

613
	fw_send_request(client->device->card, &e->r.transaction,
614 615 616 617
			request->tcode, destination_id, request->generation,
			speed, request->offset, e->response.data,
			request->length, complete_transaction, e);
	return 0;
618

619
 failed:
620
	kfree(e);
621 622

	return ret;
623 624
}

625
static int ioctl_send_request(struct client *client, union ioctl_arg *arg)
626
{
627
	switch (arg->send_request.tcode) {
628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
	case TCODE_WRITE_QUADLET_REQUEST:
	case TCODE_WRITE_BLOCK_REQUEST:
	case TCODE_READ_QUADLET_REQUEST:
	case TCODE_READ_BLOCK_REQUEST:
	case TCODE_LOCK_MASK_SWAP:
	case TCODE_LOCK_COMPARE_SWAP:
	case TCODE_LOCK_FETCH_ADD:
	case TCODE_LOCK_LITTLE_ADD:
	case TCODE_LOCK_BOUNDED_ADD:
	case TCODE_LOCK_WRAP_ADD:
	case TCODE_LOCK_VENDOR_DEPENDENT:
		break;
	default:
		return -EINVAL;
	}

644
	return init_request(client, &arg->send_request, client->device->node_id,
645 646 647
			    client->device->max_speed);
}

648 649 650 651 652
static inline bool is_fcp_request(struct fw_request *request)
{
	return request == NULL;
}

653 654
static void release_request(struct client *client,
			    struct client_resource *resource)
655
{
656 657
	struct inbound_transaction_resource *r = container_of(resource,
			struct inbound_transaction_resource, resource);
658

659 660 661
	if (is_fcp_request(r->request))
		kfree(r->data);
	else
662
		fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR);
663 664

	fw_card_put(r->card);
665
	kfree(r);
666 667
}

668
static void handle_request(struct fw_card *card, struct fw_request *request,
669
			   int tcode, int destination, int source,
670
			   int generation, unsigned long long offset,
671
			   void *payload, size_t length, void *callback_data)
672
{
673 674 675
	struct address_handler_resource *handler = callback_data;
	struct inbound_transaction_resource *r;
	struct inbound_transaction_event *e;
676
	size_t event_size0;
677
	void *fcp_frame = NULL;
678
	int ret;
679

680 681 682
	/* card may be different from handler->client->device->card */
	fw_card_get(card);

683
	r = kmalloc(sizeof(*r), GFP_ATOMIC);
684
	e = kmalloc(sizeof(*e), GFP_ATOMIC);
685 686
	if (r == NULL || e == NULL) {
		fw_notify("Out of memory when allocating event\n");
687
		goto failed;
688
	}
689
	r->card    = card;
690 691 692
	r->request = request;
	r->data    = payload;
	r->length  = length;
693

694 695 696 697 698 699 700 701 702 703 704 705
	if (is_fcp_request(request)) {
		/*
		 * FIXME: Let core-transaction.c manage a
		 * single reference-counted copy?
		 */
		fcp_frame = kmemdup(payload, length, GFP_ATOMIC);
		if (fcp_frame == NULL)
			goto failed;

		r->data = fcp_frame;
	}

706 707
	r->resource.release = release_request;
	ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
708 709
	if (ret < 0)
		goto failed;
710

711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738
	if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) {
		struct fw_cdev_event_request *req = &e->req.request;

		if (tcode & 0x10)
			tcode = TCODE_LOCK_REQUEST;

		req->type	= FW_CDEV_EVENT_REQUEST;
		req->tcode	= tcode;
		req->offset	= offset;
		req->length	= length;
		req->handle	= r->resource.handle;
		req->closure	= handler->closure;
		event_size0	= sizeof(*req);
	} else {
		struct fw_cdev_event_request2 *req = &e->req.request2;

		req->type	= FW_CDEV_EVENT_REQUEST2;
		req->tcode	= tcode;
		req->offset	= offset;
		req->source_node_id = source;
		req->destination_node_id = destination;
		req->card	= card->index;
		req->generation	= generation;
		req->length	= length;
		req->handle	= r->resource.handle;
		req->closure	= handler->closure;
		event_size0	= sizeof(*req);
	}
739

740
	queue_event(handler->client, &e->event,
741
		    &e->req, event_size0, r->data, length);
742 743 744
	return;

 failed:
745
	kfree(r);
746
	kfree(e);
747 748 749
	kfree(fcp_frame);

	if (!is_fcp_request(request))
750
		fw_send_response(card, request, RCODE_CONFLICT_ERROR);
751 752

	fw_card_put(card);
753 754
}

755 756
static void release_address_handler(struct client *client,
				    struct client_resource *resource)
757
{
758 759
	struct address_handler_resource *r =
	    container_of(resource, struct address_handler_resource, resource);
760

761 762
	fw_core_remove_address_handler(&r->handler);
	kfree(r);
763 764
}

765
static int ioctl_allocate(struct client *client, union ioctl_arg *arg)
766
{
767
	struct fw_cdev_allocate *a = &arg->allocate;
768
	struct address_handler_resource *r;
769
	struct fw_address_region region;
770
	int ret;
771

772 773
	r = kmalloc(sizeof(*r), GFP_KERNEL);
	if (r == NULL)
774 775
		return -ENOMEM;

776
	region.start = a->offset;
777 778 779 780 781
	if (client->version < FW_CDEV_VERSION_ALLOCATE_REGION_END)
		region.end = a->offset + a->length;
	else
		region.end = a->region_end;

782
	r->handler.length           = a->length;
783
	r->handler.address_callback = handle_request;
784 785 786
	r->handler.callback_data    = r;
	r->closure   = a->closure;
	r->client    = client;
787

788
	ret = fw_core_add_address_handler(&r->handler, &region);
789
	if (ret < 0) {
790
		kfree(r);
791
		return ret;
792
	}
793
	a->offset = r->handler.offset;
794

795 796
	r->resource.release = release_address_handler;
	ret = add_client_resource(client, &r->resource, GFP_KERNEL);
797
	if (ret < 0) {
798
		release_address_handler(client, &r->resource);
799 800
		return ret;
	}
801
	a->handle = r->resource.handle;
802 803 804 805

	return 0;
}

806
static int ioctl_deallocate(struct client *client, union ioctl_arg *arg)
807
{
808
	return release_client_resource(client, arg->deallocate.handle,
809
				       release_address_handler, NULL);
810 811
}

812
static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
813
{
814
	struct fw_cdev_send_response *a = &arg->send_response;
815
	struct client_resource *resource;
816
	struct inbound_transaction_resource *r;
817
	int ret = 0;
818

819
	if (release_client_resource(client, a->handle,
820
				    release_request, &resource) < 0)
821
		return -EINVAL;
822

823 824
	r = container_of(resource, struct inbound_transaction_resource,
			 resource);
825 826 827
	if (is_fcp_request(r->request))
		goto out;

828 829 830 831 832 833
	if (a->length != fw_get_response_length(r->request)) {
		ret = -EINVAL;
		kfree(r->request);
		goto out;
	}
	if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) {
834 835 836
		ret = -EFAULT;
		kfree(r->request);
		goto out;
837
	}
838
	fw_send_response(r->card, r->request, a->rcode);
839
 out:
840
	fw_card_put(r->card);
841 842
	kfree(r);

843
	return ret;
844 845
}

846
static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg)
847
{
848
	fw_schedule_bus_reset(client->device->card, true,
849
			arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET);
850
	return 0;
851 852
}

853 854 855
static void release_descriptor(struct client *client,
			       struct client_resource *resource)
{
856 857
	struct descriptor_resource *r =
		container_of(resource, struct descriptor_resource, resource);
858

859 860
	fw_core_remove_descriptor(&r->descriptor);
	kfree(r);
861 862
}

863
static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg)
864
{
865
	struct fw_cdev_add_descriptor *a = &arg->add_descriptor;
866
	struct descriptor_resource *r;
867
	int ret;
868

869
	/* Access policy: Allow this ioctl only on local nodes' device files. */
870
	if (!client->device->is_local)
871 872
		return -ENOSYS;

873
	if (a->length > 256)
874 875
		return -EINVAL;

876
	r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL);
877
	if (r == NULL)
878 879
		return -ENOMEM;

880
	if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) {
881 882
		ret = -EFAULT;
		goto failed;
883 884
	}

885 886 887
	r->descriptor.length    = a->length;
	r->descriptor.immediate = a->immediate;
	r->descriptor.key       = a->key;
888
	r->descriptor.data      = r->data;
889

890
	ret = fw_core_add_descriptor(&r->descriptor);
891 892
	if (ret < 0)
		goto failed;
893

894 895
	r->resource.release = release_descriptor;
	ret = add_client_resource(client, &r->resource, GFP_KERNEL);
896
	if (ret < 0) {
897
		fw_core_remove_descriptor(&r->descriptor);
898 899
		goto failed;
	}
900
	a->handle = r->resource.handle;
901 902

	return 0;
903
 failed:
904
	kfree(r);
905 906

	return ret;
907 908
}

909
static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg)
910
{
911
	return release_client_resource(client, arg->remove_descriptor.handle,
912
				       release_descriptor, NULL);
913 914
}

915 916
static void iso_callback(struct fw_iso_context *context, u32 cycle,
			 size_t header_length, void *header, void *data)
917 918
{
	struct client *client = data;
919
	struct iso_interrupt_event *e;
920

921
	e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC);
922 923
	if (e == NULL) {
		fw_notify("Out of memory when allocating event\n");
924
		return;
925
	}
926 927 928 929 930 931 932
	e->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT;
	e->interrupt.closure   = client->iso_closure;
	e->interrupt.cycle     = cycle;
	e->interrupt.header_length = header_length;
	memcpy(e->interrupt.header, header, header_length);
	queue_event(client, &e->event, &e->interrupt,
		    sizeof(e->interrupt) + header_length, NULL, 0);
933 934
}

935
static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
936
{
937
	struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
938
	struct fw_iso_context *context;
939

940 941 942
	BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT ||
		     FW_CDEV_ISO_CONTEXT_RECEIVE  != FW_ISO_CONTEXT_RECEIVE);

943
	if (a->channel > 63)
944 945
		return -EINVAL;

946
	switch (a->type) {
947
	case FW_ISO_CONTEXT_RECEIVE:
948
		if (a->header_size < 4 || (a->header_size & 3))
949 950 951 952
			return -EINVAL;
		break;

	case FW_ISO_CONTEXT_TRANSMIT:
953
		if (a->speed > SCODE_3200)
954 955 956 957
			return -EINVAL;
		break;

	default:
958
		return -EINVAL;
959 960
	}

961 962 963
	context = fw_iso_context_create(client->device->card, a->type,
					a->channel, a->speed, a->header_size,
					iso_callback, client);
964 965 966
	if (IS_ERR(context))
		return PTR_ERR(context);

967 968 969 970 971 972 973
	/* We only support one context at this time. */
	spin_lock_irq(&client->lock);
	if (client->iso_context != NULL) {
		spin_unlock_irq(&client->lock);
		fw_iso_context_destroy(context);
		return -EBUSY;
	}
974
	client->iso_closure = a->closure;
975
	client->iso_context = context;
976
	spin_unlock_irq(&client->lock);
977

978
	a->handle = 0;
979

980 981 982
	return 0;
}

983 984 985 986
/* Macros for decoding the iso packet control header. */
#define GET_PAYLOAD_LENGTH(v)	((v) & 0xffff)
#define GET_INTERRUPT(v)	(((v) >> 16) & 0x01)
#define GET_SKIP(v)		(((v) >> 17) & 0x01)
987 988
#define GET_TAG(v)		(((v) >> 18) & 0x03)
#define GET_SY(v)		(((v) >> 20) & 0x0f)
989 990
#define GET_HEADER_LENGTH(v)	(((v) >> 24) & 0xff)

991
static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
992
{
993
	struct fw_cdev_queue_iso *a = &arg->queue_iso;
994
	struct fw_cdev_iso_packet __user *p, *end, *next;
995
	struct fw_iso_context *ctx = client->iso_context;
996
	unsigned long payload, buffer_end, header_length;
997
	u32 control;
998 999 1000 1001 1002 1003
	int count;
	struct {
		struct fw_iso_packet packet;
		u8 header[256];
	} u;

1004
	if (ctx == NULL || a->handle != 0)
1005 1006
		return -EINVAL;

1007 1008
	/*
	 * If the user passes a non-NULL data pointer, has mmap()'ed
1009 1010
	 * the iso buffer, and the pointer points inside the buffer,
	 * we setup the payload pointers accordingly.  Otherwise we
1011
	 * set them both to 0, which will still let packets with
1012 1013
	 * payload_length == 0 through.  In other words, if no packets
	 * use the indirect payload, the iso buffer need not be mapped
1014
	 * and the a->data pointer is ignored.
1015
	 */
1016

1017
	payload = (unsigned long)a->data - client->vm_start;
1018
	buffer_end = client->buffer.page_count << PAGE_SHIFT;
1019
	if (a->data == 0 || client->buffer.pages == NULL ||
1020
	    payload >= buffer_end) {
1021
		payload = 0;
1022
		buffer_end = 0;
1023 1024
	}

1025
	p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets);
A
Al Viro 已提交
1026

1027
	if (!access_ok(VERIFY_READ, p, a->size))
1028 1029
		return -EFAULT;

1030
	end = (void __user *)p + a->size;
1031 1032
	count = 0;
	while (p < end) {
1033
		if (get_user(control, &p->control))
1034
			return -EFAULT;
1035 1036 1037 1038 1039 1040
		u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
		u.packet.interrupt = GET_INTERRUPT(control);
		u.packet.skip = GET_SKIP(control);
		u.packet.tag = GET_TAG(control);
		u.packet.sy = GET_SY(control);
		u.packet.header_length = GET_HEADER_LENGTH(control);
1041

1042
		if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
1043 1044
			if (u.packet.header_length % 4 != 0)
				return -EINVAL;
1045 1046
			header_length = u.packet.header_length;
		} else {
1047 1048 1049 1050
			/*
			 * We require that header_length is a multiple of
			 * the fixed header size, ctx->header_size.
			 */
1051 1052
			if (u.packet.header_length == 0 ||
			    u.packet.header_length % ctx->header_size != 0)
1053 1054 1055 1056
				return -EINVAL;
			header_length = 0;
		}

1057
		next = (struct fw_cdev_iso_packet __user *)
1058
			&p->header[header_length / 4];
1059 1060 1061
		if (next > end)
			return -EINVAL;
		if (__copy_from_user
1062
		    (u.packet.header, p->header, header_length))
1063
			return -EFAULT;
1064
		if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
1065 1066
		    u.packet.header_length + u.packet.payload_length > 0)
			return -EINVAL;
1067
		if (payload + u.packet.payload_length > buffer_end)
1068 1069
			return -EINVAL;

1070 1071
		if (fw_iso_context_queue(ctx, &u.packet,
					 &client->buffer, payload))
1072 1073 1074 1075 1076 1077 1078
			break;

		p = next;
		payload += u.packet.payload_length;
		count++;
	}

1079 1080 1081
	a->size    -= uptr_to_u64(p) - a->packets;
	a->packets  = uptr_to_u64(p);
	a->data     = client->vm_start + payload;
1082 1083 1084 1085

	return count;
}

1086
static int ioctl_start_iso(struct client *client, union ioctl_arg *arg)
1087
{
1088
	struct fw_cdev_start_iso *a = &arg->start_iso;
1089

1090 1091 1092 1093 1094 1095 1096
	BUILD_BUG_ON(
	    FW_CDEV_ISO_CONTEXT_MATCH_TAG0 != FW_ISO_CONTEXT_MATCH_TAG0 ||
	    FW_CDEV_ISO_CONTEXT_MATCH_TAG1 != FW_ISO_CONTEXT_MATCH_TAG1 ||
	    FW_CDEV_ISO_CONTEXT_MATCH_TAG2 != FW_ISO_CONTEXT_MATCH_TAG2 ||
	    FW_CDEV_ISO_CONTEXT_MATCH_TAG3 != FW_ISO_CONTEXT_MATCH_TAG3 ||
	    FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS != FW_ISO_CONTEXT_MATCH_ALL_TAGS);

1097
	if (client->iso_context == NULL || a->handle != 0)
1098
		return -EINVAL;
1099

1100 1101 1102
	if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE &&
	    (a->tags == 0 || a->tags > 15 || a->sync > 15))
		return -EINVAL;
1103

1104 1105
	return fw_iso_context_start(client->iso_context,
				    a->cycle, a->sync, a->tags);
1106 1107
}

1108
static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg)
1109
{
1110
	struct fw_cdev_stop_iso *a = &arg->stop_iso;
1111

1112
	if (client->iso_context == NULL || a->handle != 0)
1113 1114
		return -EINVAL;

1115 1116 1117
	return fw_iso_context_stop(client->iso_context);
}

1118
static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
1119
{
1120
	struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
1121
	struct fw_card *card = client->device->card;
1122
	struct timespec ts = {0, 0};
1123
	u32 cycle_time;
1124
	int ret = 0;
1125

1126
	local_irq_disable();
1127

1128
	cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME);
1129

1130
	switch (a->clk_id) {
1131 1132 1133 1134 1135 1136
	case CLOCK_REALTIME:      getnstimeofday(&ts);                   break;
	case CLOCK_MONOTONIC:     do_posix_clock_monotonic_gettime(&ts); break;
	case CLOCK_MONOTONIC_RAW: getrawmonotonic(&ts);                  break;
	default:
		ret = -EINVAL;
	}
1137

1138
	local_irq_enable();
1139

1140 1141 1142
	a->tv_sec      = ts.tv_sec;
	a->tv_nsec     = ts.tv_nsec;
	a->cycle_timer = cycle_time;
1143 1144 1145 1146

	return ret;
}

1147
static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg)
1148
{
1149
	struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer;
1150 1151 1152
	struct fw_cdev_get_cycle_timer2 ct2;

	ct2.clk_id = CLOCK_REALTIME;
1153
	ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2);
1154

1155 1156
	a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC;
	a->cycle_timer = ct2.cycle_timer;
1157

1158 1159 1160
	return 0;
}

1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175
static void iso_resource_work(struct work_struct *work)
{
	struct iso_resource_event *e;
	struct iso_resource *r =
			container_of(work, struct iso_resource, work.work);
	struct client *client = r->client;
	int generation, channel, bandwidth, todo;
	bool skip, free, success;

	spin_lock_irq(&client->lock);
	generation = client->device->generation;
	todo = r->todo;
	/* Allow 1000ms grace period for other reallocations. */
	if (todo == ISO_RES_ALLOC &&
	    time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) {
1176
		schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
1177 1178 1179 1180 1181 1182
		skip = true;
	} else {
		/* We could be called twice within the same generation. */
		skip = todo == ISO_RES_REALLOC &&
		       r->generation == generation;
	}
1183 1184 1185
	free = todo == ISO_RES_DEALLOC ||
	       todo == ISO_RES_ALLOC_ONCE ||
	       todo == ISO_RES_DEALLOC_ONCE;
1186 1187 1188 1189 1190 1191 1192 1193 1194 1195
	r->generation = generation;
	spin_unlock_irq(&client->lock);

	if (skip)
		goto out;

	bandwidth = r->bandwidth;

	fw_iso_resource_manage(client->device->card, generation,
			r->channels, &channel, &bandwidth,
1196 1197
			todo == ISO_RES_ALLOC ||
			todo == ISO_RES_REALLOC ||
1198 1199
			todo == ISO_RES_ALLOC_ONCE,
			r->transaction_data);
1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231
	/*
	 * Is this generation outdated already?  As long as this resource sticks
	 * in the idr, it will be scheduled again for a newer generation or at
	 * shutdown.
	 */
	if (channel == -EAGAIN &&
	    (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
		goto out;

	success = channel >= 0 || bandwidth > 0;

	spin_lock_irq(&client->lock);
	/*
	 * Transit from allocation to reallocation, except if the client
	 * requested deallocation in the meantime.
	 */
	if (r->todo == ISO_RES_ALLOC)
		r->todo = ISO_RES_REALLOC;
	/*
	 * Allocation or reallocation failure?  Pull this resource out of the
	 * idr and prepare for deletion, unless the client is shutting down.
	 */
	if (r->todo == ISO_RES_REALLOC && !success &&
	    !client->in_shutdown &&
	    idr_find(&client->resource_idr, r->resource.handle)) {
		idr_remove(&client->resource_idr, r->resource.handle);
		client_put(client);
		free = true;
	}
	spin_unlock_irq(&client->lock);

	if (todo == ISO_RES_ALLOC && channel >= 0)
1232
		r->channels = 1ULL << channel;
1233 1234 1235 1236

	if (todo == ISO_RES_REALLOC && success)
		goto out;

1237
	if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
1238 1239 1240 1241 1242 1243
		e = r->e_alloc;
		r->e_alloc = NULL;
	} else {
		e = r->e_dealloc;
		r->e_dealloc = NULL;
	}
1244 1245 1246
	e->iso_resource.handle    = r->resource.handle;
	e->iso_resource.channel   = channel;
	e->iso_resource.bandwidth = bandwidth;
1247 1248

	queue_event(client, &e->event,
1249
		    &e->iso_resource, sizeof(e->iso_resource), NULL, 0);
1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268

	if (free) {
		cancel_delayed_work(&r->work);
		kfree(r->e_alloc);
		kfree(r->e_dealloc);
		kfree(r);
	}
 out:
	client_put(client);
}

static void release_iso_resource(struct client *client,
				 struct client_resource *resource)
{
	struct iso_resource *r =
		container_of(resource, struct iso_resource, resource);

	spin_lock_irq(&client->lock);
	r->todo = ISO_RES_DEALLOC;
1269
	schedule_iso_resource(r, 0);
1270 1271 1272
	spin_unlock_irq(&client->lock);
}

1273 1274
static int init_iso_resource(struct client *client,
		struct fw_cdev_allocate_iso_resource *request, int todo)
1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294
{
	struct iso_resource_event *e1, *e2;
	struct iso_resource *r;
	int ret;

	if ((request->channels == 0 && request->bandwidth == 0) ||
	    request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
	    request->bandwidth < 0)
		return -EINVAL;

	r  = kmalloc(sizeof(*r), GFP_KERNEL);
	e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
	e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
	if (r == NULL || e1 == NULL || e2 == NULL) {
		ret = -ENOMEM;
		goto fail;
	}

	INIT_DELAYED_WORK(&r->work, iso_resource_work);
	r->client	= client;
1295
	r->todo		= todo;
1296 1297 1298 1299 1300 1301
	r->generation	= -1;
	r->channels	= request->channels;
	r->bandwidth	= request->bandwidth;
	r->e_alloc	= e1;
	r->e_dealloc	= e2;

1302 1303 1304 1305
	e1->iso_resource.closure = request->closure;
	e1->iso_resource.type    = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
	e2->iso_resource.closure = request->closure;
	e2->iso_resource.type    = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
1306

1307 1308 1309
	if (todo == ISO_RES_ALLOC) {
		r->resource.release = release_iso_resource;
		ret = add_client_resource(client, &r->resource, GFP_KERNEL);
1310 1311
		if (ret < 0)
			goto fail;
1312 1313 1314
	} else {
		r->resource.release = NULL;
		r->resource.handle = -1;
1315
		schedule_iso_resource(r, 0);
1316
	}
1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327
	request->handle = r->resource.handle;

	return 0;
 fail:
	kfree(r);
	kfree(e1);
	kfree(e2);

	return ret;
}

1328 1329
static int ioctl_allocate_iso_resource(struct client *client,
				       union ioctl_arg *arg)
1330
{
1331 1332
	return init_iso_resource(client,
			&arg->allocate_iso_resource, ISO_RES_ALLOC);
1333 1334
}

1335 1336
static int ioctl_deallocate_iso_resource(struct client *client,
					 union ioctl_arg *arg)
1337
{
1338 1339
	return release_client_resource(client,
			arg->deallocate.handle, release_iso_resource, NULL);
1340 1341
}

1342 1343
static int ioctl_allocate_iso_resource_once(struct client *client,
					    union ioctl_arg *arg)
1344
{
1345 1346
	return init_iso_resource(client,
			&arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE);
1347 1348
}

1349 1350
static int ioctl_deallocate_iso_resource_once(struct client *client,
					      union ioctl_arg *arg)
1351
{
1352 1353
	return init_iso_resource(client,
			&arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE);
1354 1355
}

1356 1357 1358 1359 1360
/*
 * Returns a speed code:  Maximum speed to or from this device,
 * limited by the device's link speed, the local node's link speed,
 * and all PHY port speeds between the two links.
 */
1361
static int ioctl_get_speed(struct client *client, union ioctl_arg *arg)
1362
{
1363
	return client->device->max_speed;
1364 1365
}

1366 1367
static int ioctl_send_broadcast_request(struct client *client,
					union ioctl_arg *arg)
1368
{
1369
	struct fw_cdev_send_request *a = &arg->send_request;
1370

1371
	switch (a->tcode) {
1372 1373 1374 1375 1376 1377 1378
	case TCODE_WRITE_QUADLET_REQUEST:
	case TCODE_WRITE_BLOCK_REQUEST:
		break;
	default:
		return -EINVAL;
	}

1379
	/* Security policy: Only allow accesses to Units Space. */
1380
	if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
1381 1382
		return -EACCES;

1383
	return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100);
1384 1385
}

1386
static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg)
1387
{
1388
	struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet;
1389 1390
	struct fw_cdev_send_request request;
	int dest;
1391

1392 1393
	if (a->speed > client->device->card->link_speed ||
	    a->length > 1024 << a->speed)
1394
		return -EIO;
1395

1396
	if (a->tag > 3 || a->channel > 63 || a->sy > 15)
1397 1398
		return -EINVAL;

1399
	dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy);
1400
	request.tcode		= TCODE_STREAM_DATA;
1401 1402 1403 1404
	request.length		= a->length;
	request.closure		= a->closure;
	request.data		= a->data;
	request.generation	= a->generation;
1405

1406
	return init_request(client, &request, dest, a->speed);
1407 1408
}

1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427
static void outbound_phy_packet_callback(struct fw_packet *packet,
					 struct fw_card *card, int status)
{
	struct outbound_phy_packet_event *e =
		container_of(packet, struct outbound_phy_packet_event, p);

	switch (status) {
	/* expected: */
	case ACK_COMPLETE:	e->phy_packet.rcode = RCODE_COMPLETE;	break;
	/* should never happen with PHY packets: */
	case ACK_PENDING:	e->phy_packet.rcode = RCODE_COMPLETE;	break;
	case ACK_BUSY_X:
	case ACK_BUSY_A:
	case ACK_BUSY_B:	e->phy_packet.rcode = RCODE_BUSY;	break;
	case ACK_DATA_ERROR:	e->phy_packet.rcode = RCODE_DATA_ERROR;	break;
	case ACK_TYPE_ERROR:	e->phy_packet.rcode = RCODE_TYPE_ERROR;	break;
	/* stale generation; cancelled; on certain controllers: no ack */
	default:		e->phy_packet.rcode = status;		break;
	}
S
Stefan Richter 已提交
1428
	e->phy_packet.data[0] = packet->timestamp;
1429

S
Stefan Richter 已提交
1430 1431
	queue_event(e->client, &e->event, &e->phy_packet,
		    sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0);
1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444
	client_put(e->client);
}

static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
{
	struct fw_cdev_send_phy_packet *a = &arg->send_phy_packet;
	struct fw_card *card = client->device->card;
	struct outbound_phy_packet_event *e;

	/* Access policy: Allow this ioctl only on local nodes' device files. */
	if (!client->device->is_local)
		return -ENOSYS;

S
Stefan Richter 已提交
1445
	e = kzalloc(sizeof(*e) + 4, GFP_KERNEL);
1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458
	if (e == NULL)
		return -ENOMEM;

	client_get(client);
	e->client		= client;
	e->p.speed		= SCODE_100;
	e->p.generation		= a->generation;
	e->p.header[0]		= a->data[0];
	e->p.header[1]		= a->data[1];
	e->p.header_length	= 8;
	e->p.callback		= outbound_phy_packet_callback;
	e->phy_packet.closure	= a->closure;
	e->phy_packet.type	= FW_CDEV_EVENT_PHY_PACKET_SENT;
S
Stefan Richter 已提交
1459 1460
	if (is_ping_packet(a->data))
			e->phy_packet.length = 4;
1461 1462 1463 1464 1465 1466

	card->driver->send_request(card, &e->p);

	return 0;
}

1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512
static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg)
{
	struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets;
	struct fw_card *card = client->device->card;

	/* Access policy: Allow this ioctl only on local nodes' device files. */
	if (!client->device->is_local)
		return -ENOSYS;

	spin_lock_irq(&card->lock);

	list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list);
	client->phy_receiver_closure = a->closure;

	spin_unlock_irq(&card->lock);

	return 0;
}

void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
{
	struct client *client;
	struct inbound_phy_packet_event *e;
	unsigned long flags;

	spin_lock_irqsave(&card->lock, flags);

	list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) {
		e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
		if (e == NULL) {
			fw_notify("Out of memory when allocating event\n");
			break;
		}
		e->phy_packet.closure	= client->phy_receiver_closure;
		e->phy_packet.type	= FW_CDEV_EVENT_PHY_PACKET_RECEIVED;
		e->phy_packet.rcode	= RCODE_COMPLETE;
		e->phy_packet.length	= 8;
		e->phy_packet.data[0]	= p->header[1];
		e->phy_packet.data[1]	= p->header[2];
		queue_event(client, &e->event,
			    &e->phy_packet, sizeof(e->phy_packet) + 8, NULL, 0);
	}

	spin_unlock_irqrestore(&card->lock, flags);
}

1513
static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534
	[0x00] = ioctl_get_info,
	[0x01] = ioctl_send_request,
	[0x02] = ioctl_allocate,
	[0x03] = ioctl_deallocate,
	[0x04] = ioctl_send_response,
	[0x05] = ioctl_initiate_bus_reset,
	[0x06] = ioctl_add_descriptor,
	[0x07] = ioctl_remove_descriptor,
	[0x08] = ioctl_create_iso_context,
	[0x09] = ioctl_queue_iso,
	[0x0a] = ioctl_start_iso,
	[0x0b] = ioctl_stop_iso,
	[0x0c] = ioctl_get_cycle_timer,
	[0x0d] = ioctl_allocate_iso_resource,
	[0x0e] = ioctl_deallocate_iso_resource,
	[0x0f] = ioctl_allocate_iso_resource_once,
	[0x10] = ioctl_deallocate_iso_resource_once,
	[0x11] = ioctl_get_speed,
	[0x12] = ioctl_send_broadcast_request,
	[0x13] = ioctl_send_stream_packet,
	[0x14] = ioctl_get_cycle_timer2,
1535
	[0x15] = ioctl_send_phy_packet,
1536
	[0x16] = ioctl_receive_phy_packets,
1537 1538
};

1539 1540
static int dispatch_ioctl(struct client *client,
			  unsigned int cmd, void __user *arg)
1541
{
1542
	union ioctl_arg buffer;
1543
	int ret;
1544

1545 1546 1547
	if (fw_device_is_shutdown(client->device))
		return -ENODEV;

1548
	if (_IOC_TYPE(cmd) != '#' ||
1549 1550
	    _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) ||
	    _IOC_SIZE(cmd) > sizeof(buffer))
1551
		return -EINVAL;
1552

1553 1554 1555 1556 1557
	if (_IOC_DIR(cmd) == _IOC_READ)
		memset(&buffer, 0, _IOC_SIZE(cmd));

	if (_IOC_DIR(cmd) & _IOC_WRITE)
		if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
1558 1559
			return -EFAULT;

1560
	ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer);
1561 1562
	if (ret < 0)
		return ret;
1563

1564 1565
	if (_IOC_DIR(cmd) & _IOC_READ)
		if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
1566 1567
			return -EFAULT;

1568
	return ret;
1569 1570
}

1571 1572
static long fw_device_op_ioctl(struct file *file,
			       unsigned int cmd, unsigned long arg)
1573
{
1574
	return dispatch_ioctl(file->private_data, cmd, (void __user *)arg);
1575 1576 1577
}

#ifdef CONFIG_COMPAT
1578 1579
static long fw_device_op_compat_ioctl(struct file *file,
				      unsigned int cmd, unsigned long arg)
1580
{
1581
	return dispatch_ioctl(file->private_data, cmd, compat_ptr(arg));
1582 1583 1584 1585 1586 1587
}
#endif

static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
{
	struct client *client = file->private_data;
1588 1589
	enum dma_data_direction direction;
	unsigned long size;
1590
	int page_count, ret;
1591

1592 1593 1594
	if (fw_device_is_shutdown(client->device))
		return -ENODEV;

1595 1596 1597 1598 1599 1600
	/* FIXME: We could support multiple buffers, but we don't. */
	if (client->buffer.pages != NULL)
		return -EBUSY;

	if (!(vma->vm_flags & VM_SHARED))
		return -EINVAL;
1601

1602
	if (vma->vm_start & ~PAGE_MASK)
1603 1604 1605
		return -EINVAL;

	client->vm_start = vma->vm_start;
1606 1607 1608 1609 1610 1611 1612 1613 1614 1615
	size = vma->vm_end - vma->vm_start;
	page_count = size >> PAGE_SHIFT;
	if (size & ~PAGE_MASK)
		return -EINVAL;

	if (vma->vm_flags & VM_WRITE)
		direction = DMA_TO_DEVICE;
	else
		direction = DMA_FROM_DEVICE;

1616 1617 1618 1619
	ret = fw_iso_buffer_init(&client->buffer, client->device->card,
				 page_count, direction);
	if (ret < 0)
		return ret;
1620

1621 1622
	ret = fw_iso_buffer_map(&client->buffer, vma);
	if (ret < 0)
1623 1624
		fw_iso_buffer_destroy(&client->buffer, client->device->card);

1625
	return ret;
1626 1627
}

1628 1629
static int shutdown_resource(int id, void *p, void *data)
{
1630
	struct client_resource *resource = p;
1631 1632
	struct client *client = data;

1633
	resource->release(client, resource);
1634
	client_put(client);
1635 1636 1637 1638

	return 0;
}

1639 1640 1641
static int fw_device_op_release(struct inode *inode, struct file *file)
{
	struct client *client = file->private_data;
1642
	struct event *event, *next_event;
1643

1644 1645 1646 1647
	spin_lock_irq(&client->device->card->lock);
	list_del(&client->phy_receiver_link);
	spin_unlock_irq(&client->device->card->lock);

1648 1649 1650 1651
	mutex_lock(&client->device->client_list_mutex);
	list_del(&client->link);
	mutex_unlock(&client->device->client_list_mutex);

1652 1653 1654
	if (client->iso_context)
		fw_iso_context_destroy(client->iso_context);

1655 1656 1657
	if (client->buffer.pages)
		fw_iso_buffer_destroy(&client->buffer, client->device->card);

1658
	/* Freeze client->resource_idr and client->event_list */
1659
	spin_lock_irq(&client->lock);
1660
	client->in_shutdown = true;
1661
	spin_unlock_irq(&client->lock);
1662

1663 1664 1665
	idr_for_each(&client->resource_idr, shutdown_resource, client);
	idr_remove_all(&client->resource_idr);
	idr_destroy(&client->resource_idr);
1666

1667 1668
	list_for_each_entry_safe(event, next_event, &client->event_list, link)
		kfree(event);
1669

1670
	client_put(client);
1671 1672 1673 1674 1675 1676 1677

	return 0;
}

static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
{
	struct client *client = file->private_data;
1678
	unsigned int mask = 0;
1679 1680 1681

	poll_wait(file, &client->wait, pt);

1682 1683
	if (fw_device_is_shutdown(client->device))
		mask |= POLLHUP | POLLERR;
1684
	if (!list_empty(&client->event_list))
1685 1686 1687
		mask |= POLLIN | POLLRDNORM;

	return mask;
1688 1689
}

1690
const struct file_operations fw_device_ops = {
1691
	.owner		= THIS_MODULE,
1692
	.llseek		= no_llseek,
1693 1694 1695 1696
	.open		= fw_device_op_open,
	.read		= fw_device_op_read,
	.unlocked_ioctl	= fw_device_op_ioctl,
	.mmap		= fw_device_op_mmap,
1697 1698
	.release	= fw_device_op_release,
	.poll		= fw_device_op_poll,
1699
#ifdef CONFIG_COMPAT
1700
	.compat_ioctl	= fw_device_op_compat_ioctl,
1701 1702
#endif
};