core-cdev.c 45.5 KB
Newer Older
1 2
/*
 * Char device for device raw access
3
 *
4
 * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software Foundation,
 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 */

21
#include <linux/bug.h>
S
Stefan Richter 已提交
22 23 24 25
#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
26
#include <linux/firewire.h>
S
Stefan Richter 已提交
27 28
#include <linux/firewire-cdev.h>
#include <linux/idr.h>
29
#include <linux/irqflags.h>
30
#include <linux/jiffies.h>
31
#include <linux/kernel.h>
32
#include <linux/kref.h>
S
Stefan Richter 已提交
33 34
#include <linux/mm.h>
#include <linux/module.h>
35
#include <linux/mutex.h>
36
#include <linux/poll.h>
37
#include <linux/sched.h> /* required for linux/wait.h */
38
#include <linux/slab.h>
J
Jay Fenlason 已提交
39
#include <linux/spinlock.h>
40
#include <linux/string.h>
S
Stefan Richter 已提交
41
#include <linux/time.h>
42
#include <linux/uaccess.h>
S
Stefan Richter 已提交
43 44
#include <linux/vmalloc.h>
#include <linux/wait.h>
45
#include <linux/workqueue.h>
S
Stefan Richter 已提交
46

47
#include <asm/system.h>
S
Stefan Richter 已提交
48

49
#include "core.h"
50

51 52 53
/*
 * ABI version history is documented in linux/firewire-cdev.h.
 */
54 55 56
#define FW_CDEV_KERNEL_VERSION			4
#define FW_CDEV_VERSION_EVENT_REQUEST2		4
#define FW_CDEV_VERSION_ALLOCATE_REGION_END	4
57

58
struct client {
59
	u32 version;
60
	struct fw_device *device;
61

62
	spinlock_t lock;
63 64
	bool in_shutdown;
	struct idr resource_idr;
65 66
	struct list_head event_list;
	wait_queue_head_t wait;
67
	wait_queue_head_t tx_flush_wait;
68
	u64 bus_reset_closure;
69

70
	struct fw_iso_context *iso_context;
71
	u64 iso_closure;
72 73
	struct fw_iso_buffer buffer;
	unsigned long vm_start;
74

75 76 77
	struct list_head phy_receiver_link;
	u64 phy_receiver_closure;

78
	struct list_head link;
79
	struct kref kref;
80 81
};

82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
static inline void client_get(struct client *client)
{
	kref_get(&client->kref);
}

static void client_release(struct kref *kref)
{
	struct client *client = container_of(kref, struct client, kref);

	fw_device_put(client->device);
	kfree(client);
}

static void client_put(struct client *client)
{
	kref_put(&client->kref, client_release);
}

100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
struct client_resource;
typedef void (*client_resource_release_fn_t)(struct client *,
					     struct client_resource *);
struct client_resource {
	client_resource_release_fn_t release;
	int handle;
};

struct address_handler_resource {
	struct client_resource resource;
	struct fw_address_handler handler;
	__u64 closure;
	struct client *client;
};

struct outbound_transaction_resource {
	struct client_resource resource;
	struct fw_transaction transaction;
};

struct inbound_transaction_resource {
	struct client_resource resource;
122
	struct fw_card *card;
123 124 125 126 127 128 129 130 131 132 133
	struct fw_request *request;
	void *data;
	size_t length;
};

struct descriptor_resource {
	struct client_resource resource;
	struct fw_descriptor descriptor;
	u32 data[0];
};

134 135 136 137 138
struct iso_resource {
	struct client_resource resource;
	struct client *client;
	/* Schedule work and access todo only with client->lock held. */
	struct delayed_work work;
139 140
	enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
	      ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
141 142 143 144 145 146 147 148
	int generation;
	u64 channels;
	s32 bandwidth;
	struct iso_resource_event *e_alloc, *e_dealloc;
};

static void release_iso_resource(struct client *, struct client_resource *);

149 150 151
static void schedule_iso_resource(struct iso_resource *r, unsigned long delay)
{
	client_get(r->client);
152
	if (!queue_delayed_work(fw_workqueue, &r->work, delay))
153 154 155 156 157 158 159 160 161 162
		client_put(r->client);
}

static void schedule_if_iso_resource(struct client_resource *resource)
{
	if (resource->release == release_iso_resource)
		schedule_iso_resource(container_of(resource,
					struct iso_resource, resource), 0);
}

163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
/*
 * dequeue_event() just kfree()'s the event, so the event has to be
 * the first field in a struct XYZ_event.
 */
struct event {
	struct { void *data; size_t size; } v[2];
	struct list_head link;
};

struct bus_reset_event {
	struct event event;
	struct fw_cdev_event_bus_reset reset;
};

struct outbound_transaction_event {
	struct event event;
	struct client *client;
	struct outbound_transaction_resource r;
	struct fw_cdev_event_response response;
};

struct inbound_transaction_event {
	struct event event;
186 187 188 189
	union {
		struct fw_cdev_event_request request;
		struct fw_cdev_event_request2 request2;
	} req;
190 191 192 193 194 195 196
};

struct iso_interrupt_event {
	struct event event;
	struct fw_cdev_event_iso_interrupt interrupt;
};

197 198 199 200 201
struct iso_interrupt_mc_event {
	struct event event;
	struct fw_cdev_event_iso_interrupt_mc interrupt;
};

202 203
struct iso_resource_event {
	struct event event;
204
	struct fw_cdev_event_iso_resource iso_resource;
205 206
};

207 208 209 210 211 212 213
struct outbound_phy_packet_event {
	struct event event;
	struct client *client;
	struct fw_packet p;
	struct fw_cdev_event_phy_packet phy_packet;
};

214 215 216 217 218
struct inbound_phy_packet_event {
	struct event event;
	struct fw_cdev_event_phy_packet phy_packet;
};

219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
#ifdef CONFIG_COMPAT
static void __user *u64_to_uptr(u64 value)
{
	if (is_compat_task())
		return compat_ptr(value);
	else
		return (void __user *)(unsigned long)value;
}

static u64 uptr_to_u64(void __user *ptr)
{
	if (is_compat_task())
		return ptr_to_compat(ptr);
	else
		return (u64)(unsigned long)ptr;
}
#else
static inline void __user *u64_to_uptr(u64 value)
237 238 239 240
{
	return (void __user *)(unsigned long)value;
}

241
static inline u64 uptr_to_u64(void __user *ptr)
242
{
243
	return (u64)(unsigned long)ptr;
244
}
245
#endif /* CONFIG_COMPAT */
246 247 248 249 250 251

static int fw_device_op_open(struct inode *inode, struct file *file)
{
	struct fw_device *device;
	struct client *client;

252
	device = fw_device_get_by_devt(inode->i_rdev);
253 254
	if (device == NULL)
		return -ENODEV;
255

256 257 258 259 260
	if (fw_device_is_shutdown(device)) {
		fw_device_put(device);
		return -ENODEV;
	}

261
	client = kzalloc(sizeof(*client), GFP_KERNEL);
262 263
	if (client == NULL) {
		fw_device_put(device);
264
		return -ENOMEM;
265
	}
266

267
	client->device = device;
268
	spin_lock_init(&client->lock);
269 270
	idr_init(&client->resource_idr);
	INIT_LIST_HEAD(&client->event_list);
271
	init_waitqueue_head(&client->wait);
272
	init_waitqueue_head(&client->tx_flush_wait);
273
	INIT_LIST_HEAD(&client->phy_receiver_link);
274
	INIT_LIST_HEAD(&client->link);
275
	kref_init(&client->kref);
276 277 278

	file->private_data = client;

279
	return nonseekable_open(inode, file);
280 281 282 283 284 285 286 287 288 289 290 291 292
}

static void queue_event(struct client *client, struct event *event,
			void *data0, size_t size0, void *data1, size_t size1)
{
	unsigned long flags;

	event->v[0].data = data0;
	event->v[0].size = size0;
	event->v[1].data = data1;
	event->v[1].size = size1;

	spin_lock_irqsave(&client->lock, flags);
293 294 295 296
	if (client->in_shutdown)
		kfree(event);
	else
		list_add_tail(&event->link, &client->event_list);
297
	spin_unlock_irqrestore(&client->lock, flags);
298 299

	wake_up_interruptible(&client->wait);
300 301
}

302 303
static int dequeue_event(struct client *client,
			 char __user *buffer, size_t count)
304 305 306
{
	struct event *event;
	size_t size, total;
307
	int i, ret;
308

309 310 311 312 313
	ret = wait_event_interruptible(client->wait,
			!list_empty(&client->event_list) ||
			fw_device_is_shutdown(client->device));
	if (ret < 0)
		return ret;
314

315 316 317
	if (list_empty(&client->event_list) &&
		       fw_device_is_shutdown(client->device))
		return -ENODEV;
318

319
	spin_lock_irq(&client->lock);
320
	event = list_first_entry(&client->event_list, struct event, link);
321
	list_del(&event->link);
322
	spin_unlock_irq(&client->lock);
323 324 325 326

	total = 0;
	for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
		size = min(event->v[i].size, count - total);
327
		if (copy_to_user(buffer + total, event->v[i].data, size)) {
328
			ret = -EFAULT;
329
			goto out;
330
		}
331 332
		total += size;
	}
333
	ret = total;
334 335 336 337

 out:
	kfree(event);

338
	return ret;
339 340
}

341 342
static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
				 size_t count, loff_t *offset)
343 344 345 346 347 348
{
	struct client *client = file->private_data;

	return dequeue_event(client, buffer, count);
}

349 350
static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
				 struct client *client)
351
{
352
	struct fw_card *card = client->device->card;
J
Jay Fenlason 已提交
353

354
	spin_lock_irq(&card->lock);
355

356
	event->closure	     = client->bus_reset_closure;
357
	event->type          = FW_CDEV_EVENT_BUS_RESET;
358
	event->generation    = client->device->generation;
359
	event->node_id       = client->device->node_id;
360
	event->local_node_id = card->local_node->node_id;
361
	event->bm_node_id    = card->bm_node_id;
362 363
	event->irm_node_id   = card->irm_node->node_id;
	event->root_node_id  = card->root_node->node_id;
J
Jay Fenlason 已提交
364

365
	spin_unlock_irq(&card->lock);
366 367
}

368 369
static void for_each_client(struct fw_device *device,
			    void (*callback)(struct client *client))
370 371 372
{
	struct client *c;

373
	mutex_lock(&device->client_list_mutex);
374 375
	list_for_each_entry(c, &device->client_list, link)
		callback(c);
376
	mutex_unlock(&device->client_list_mutex);
377 378
}

379 380
static int schedule_reallocations(int id, void *p, void *data)
{
381
	schedule_if_iso_resource(p);
382 383 384 385

	return 0;
}

386
static void queue_bus_reset_event(struct client *client)
387
{
388
	struct bus_reset_event *e;
389

390 391
	e = kzalloc(sizeof(*e), GFP_KERNEL);
	if (e == NULL) {
392
		fw_notice(client->device->card, "out of memory when allocating event\n");
393 394 395
		return;
	}

396
	fill_bus_reset_event(&e->reset, client);
397

398 399
	queue_event(client, &e->event,
		    &e->reset, sizeof(e->reset), NULL, 0);
400 401 402 403

	spin_lock_irq(&client->lock);
	idr_for_each(&client->resource_idr, schedule_reallocations, client);
	spin_unlock_irq(&client->lock);
404 405 406 407
}

void fw_device_cdev_update(struct fw_device *device)
{
408 409
	for_each_client(device, queue_bus_reset_event);
}
410

411 412 413 414
static void wake_up_client(struct client *client)
{
	wake_up_interruptible(&client->wait);
}
415

416 417 418
void fw_device_cdev_remove(struct fw_device *device)
{
	for_each_client(device, wake_up_client);
419 420
}

421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
union ioctl_arg {
	struct fw_cdev_get_info			get_info;
	struct fw_cdev_send_request		send_request;
	struct fw_cdev_allocate			allocate;
	struct fw_cdev_deallocate		deallocate;
	struct fw_cdev_send_response		send_response;
	struct fw_cdev_initiate_bus_reset	initiate_bus_reset;
	struct fw_cdev_add_descriptor		add_descriptor;
	struct fw_cdev_remove_descriptor	remove_descriptor;
	struct fw_cdev_create_iso_context	create_iso_context;
	struct fw_cdev_queue_iso		queue_iso;
	struct fw_cdev_start_iso		start_iso;
	struct fw_cdev_stop_iso			stop_iso;
	struct fw_cdev_get_cycle_timer		get_cycle_timer;
	struct fw_cdev_allocate_iso_resource	allocate_iso_resource;
	struct fw_cdev_send_stream_packet	send_stream_packet;
	struct fw_cdev_get_cycle_timer2		get_cycle_timer2;
438
	struct fw_cdev_send_phy_packet		send_phy_packet;
439
	struct fw_cdev_receive_phy_packets	receive_phy_packets;
440
	struct fw_cdev_set_iso_channels		set_iso_channels;
441 442 443
};

static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
444
{
445
	struct fw_cdev_get_info *a = &arg->get_info;
446
	struct fw_cdev_event_bus_reset bus_reset;
447
	unsigned long ret = 0;
448

449
	client->version = a->version;
450
	a->version = FW_CDEV_KERNEL_VERSION;
451
	a->card = client->device->card->index;
452

453 454
	down_read(&fw_device_rwsem);

455 456
	if (a->rom != 0) {
		size_t want = a->rom_length;
457
		size_t have = client->device->config_rom_length * 4;
458

459 460
		ret = copy_to_user(u64_to_uptr(a->rom),
				   client->device->config_rom, min(want, have));
461
	}
462
	a->rom_length = client->device->config_rom_length * 4;
463

464 465 466 467 468
	up_read(&fw_device_rwsem);

	if (ret != 0)
		return -EFAULT;

469 470
	mutex_lock(&client->device->client_list_mutex);

471 472
	client->bus_reset_closure = a->bus_reset_closure;
	if (a->bus_reset != 0) {
473
		fill_bus_reset_event(&bus_reset, client);
474 475
		ret = copy_to_user(u64_to_uptr(a->bus_reset),
				   &bus_reset, sizeof(bus_reset));
476
	}
477 478
	if (ret == 0 && list_empty(&client->link))
		list_add_tail(&client->link, &client->device->client_list);
479

480 481 482
	mutex_unlock(&client->device->client_list_mutex);

	return ret ? -EFAULT : 0;
483 484
}

485 486
static int add_client_resource(struct client *client,
			       struct client_resource *resource, gfp_t gfp_mask)
487 488
{
	unsigned long flags;
489 490 491 492 493
	int ret;

 retry:
	if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
		return -ENOMEM;
494 495

	spin_lock_irqsave(&client->lock, flags);
496 497 498 499 500
	if (client->in_shutdown)
		ret = -ECANCELED;
	else
		ret = idr_get_new(&client->resource_idr, resource,
				  &resource->handle);
501
	if (ret >= 0) {
502
		client_get(client);
503
		schedule_if_iso_resource(resource);
504
	}
505
	spin_unlock_irqrestore(&client->lock, flags);
506 507 508 509 510

	if (ret == -EAGAIN)
		goto retry;

	return ret < 0 ? ret : 0;
511 512
}

513 514
static int release_client_resource(struct client *client, u32 handle,
				   client_resource_release_fn_t release,
515
				   struct client_resource **return_resource)
516
{
517
	struct client_resource *resource;
518

519
	spin_lock_irq(&client->lock);
520
	if (client->in_shutdown)
521
		resource = NULL;
522
	else
523 524
		resource = idr_find(&client->resource_idr, handle);
	if (resource && resource->release == release)
525
		idr_remove(&client->resource_idr, handle);
526
	spin_unlock_irq(&client->lock);
527

528
	if (!(resource && resource->release == release))
529 530
		return -EINVAL;

531 532
	if (return_resource)
		*return_resource = resource;
533
	else
534
		resource->release(client, resource);
535

536 537
	client_put(client);

538 539 540
	return 0;
}

541 542
static void release_transaction(struct client *client,
				struct client_resource *resource)
543 544 545
{
}

546 547
static void complete_transaction(struct fw_card *card, int rcode,
				 void *payload, size_t length, void *data)
548
{
549 550 551
	struct outbound_transaction_event *e = data;
	struct fw_cdev_event_response *rsp = &e->response;
	struct client *client = e->client;
552
	unsigned long flags;
553

554 555
	if (length < rsp->length)
		rsp->length = length;
556
	if (rcode == RCODE_COMPLETE)
557
		memcpy(rsp->data, payload, rsp->length);
558

559
	spin_lock_irqsave(&client->lock, flags);
560 561 562
	idr_remove(&client->resource_idr, e->r.resource.handle);
	if (client->in_shutdown)
		wake_up(&client->tx_flush_wait);
563 564
	spin_unlock_irqrestore(&client->lock, flags);

565 566
	rsp->type = FW_CDEV_EVENT_RESPONSE;
	rsp->rcode = rcode;
567 568

	/*
569
	 * In the case that sizeof(*rsp) doesn't align with the position of the
570 571 572 573 574
	 * data, and the read is short, preserve an extra copy of the data
	 * to stay compatible with a pre-2.6.27 bug.  Since the bug is harmless
	 * for short reads and some apps depended on it, this is both safe
	 * and prudent for compatibility.
	 */
575 576 577
	if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
		queue_event(client, &e->event, rsp, sizeof(*rsp),
			    rsp->data, rsp->length);
578
	else
579
		queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
580
			    NULL, 0);
581

582 583
	/* Drop the idr's reference */
	client_put(client);
584 585
}

586 587 588
static int init_request(struct client *client,
			struct fw_cdev_send_request *request,
			int destination_id, int speed)
589
{
590
	struct outbound_transaction_event *e;
591
	int ret;
592

593 594
	if (request->tcode != TCODE_STREAM_DATA &&
	    (request->length > 4096 || request->length > 512 << speed))
595
		return -EIO;
596

597 598 599 600
	if (request->tcode == TCODE_WRITE_QUADLET_REQUEST &&
	    request->length < 4)
		return -EINVAL;

601 602
	e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
	if (e == NULL)
603 604
		return -ENOMEM;

605 606 607
	e->client = client;
	e->response.length = request->length;
	e->response.closure = request->closure;
608

609
	if (request->data &&
610
	    copy_from_user(e->response.data,
611
			   u64_to_uptr(request->data), request->length)) {
612
		ret = -EFAULT;
613
		goto failed;
614 615
	}

616 617
	e->r.resource.release = release_transaction;
	ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
618 619
	if (ret < 0)
		goto failed;
620

621
	fw_send_request(client->device->card, &e->r.transaction,
622 623 624 625
			request->tcode, destination_id, request->generation,
			speed, request->offset, e->response.data,
			request->length, complete_transaction, e);
	return 0;
626

627
 failed:
628
	kfree(e);
629 630

	return ret;
631 632
}

633
static int ioctl_send_request(struct client *client, union ioctl_arg *arg)
634
{
635
	switch (arg->send_request.tcode) {
636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651
	case TCODE_WRITE_QUADLET_REQUEST:
	case TCODE_WRITE_BLOCK_REQUEST:
	case TCODE_READ_QUADLET_REQUEST:
	case TCODE_READ_BLOCK_REQUEST:
	case TCODE_LOCK_MASK_SWAP:
	case TCODE_LOCK_COMPARE_SWAP:
	case TCODE_LOCK_FETCH_ADD:
	case TCODE_LOCK_LITTLE_ADD:
	case TCODE_LOCK_BOUNDED_ADD:
	case TCODE_LOCK_WRAP_ADD:
	case TCODE_LOCK_VENDOR_DEPENDENT:
		break;
	default:
		return -EINVAL;
	}

652
	return init_request(client, &arg->send_request, client->device->node_id,
653 654 655
			    client->device->max_speed);
}

656 657 658 659 660
static inline bool is_fcp_request(struct fw_request *request)
{
	return request == NULL;
}

661 662
static void release_request(struct client *client,
			    struct client_resource *resource)
663
{
664 665
	struct inbound_transaction_resource *r = container_of(resource,
			struct inbound_transaction_resource, resource);
666

667 668 669
	if (is_fcp_request(r->request))
		kfree(r->data);
	else
670
		fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR);
671 672

	fw_card_put(r->card);
673
	kfree(r);
674 675
}

676
static void handle_request(struct fw_card *card, struct fw_request *request,
677
			   int tcode, int destination, int source,
678
			   int generation, unsigned long long offset,
679
			   void *payload, size_t length, void *callback_data)
680
{
681 682 683
	struct address_handler_resource *handler = callback_data;
	struct inbound_transaction_resource *r;
	struct inbound_transaction_event *e;
684
	size_t event_size0;
685
	void *fcp_frame = NULL;
686
	int ret;
687

688 689 690
	/* card may be different from handler->client->device->card */
	fw_card_get(card);

691
	r = kmalloc(sizeof(*r), GFP_ATOMIC);
692
	e = kmalloc(sizeof(*e), GFP_ATOMIC);
693
	if (r == NULL || e == NULL) {
694
		fw_notice(card, "out of memory when allocating event\n");
695
		goto failed;
696
	}
697
	r->card    = card;
698 699 700
	r->request = request;
	r->data    = payload;
	r->length  = length;
701

702 703 704 705 706 707 708 709 710 711 712 713
	if (is_fcp_request(request)) {
		/*
		 * FIXME: Let core-transaction.c manage a
		 * single reference-counted copy?
		 */
		fcp_frame = kmemdup(payload, length, GFP_ATOMIC);
		if (fcp_frame == NULL)
			goto failed;

		r->data = fcp_frame;
	}

714 715
	r->resource.release = release_request;
	ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
716 717
	if (ret < 0)
		goto failed;
718

719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746
	if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) {
		struct fw_cdev_event_request *req = &e->req.request;

		if (tcode & 0x10)
			tcode = TCODE_LOCK_REQUEST;

		req->type	= FW_CDEV_EVENT_REQUEST;
		req->tcode	= tcode;
		req->offset	= offset;
		req->length	= length;
		req->handle	= r->resource.handle;
		req->closure	= handler->closure;
		event_size0	= sizeof(*req);
	} else {
		struct fw_cdev_event_request2 *req = &e->req.request2;

		req->type	= FW_CDEV_EVENT_REQUEST2;
		req->tcode	= tcode;
		req->offset	= offset;
		req->source_node_id = source;
		req->destination_node_id = destination;
		req->card	= card->index;
		req->generation	= generation;
		req->length	= length;
		req->handle	= r->resource.handle;
		req->closure	= handler->closure;
		event_size0	= sizeof(*req);
	}
747

748
	queue_event(handler->client, &e->event,
749
		    &e->req, event_size0, r->data, length);
750 751 752
	return;

 failed:
753
	kfree(r);
754
	kfree(e);
755 756 757
	kfree(fcp_frame);

	if (!is_fcp_request(request))
758
		fw_send_response(card, request, RCODE_CONFLICT_ERROR);
759 760

	fw_card_put(card);
761 762
}

763 764
static void release_address_handler(struct client *client,
				    struct client_resource *resource)
765
{
766 767
	struct address_handler_resource *r =
	    container_of(resource, struct address_handler_resource, resource);
768

769 770
	fw_core_remove_address_handler(&r->handler);
	kfree(r);
771 772
}

773
static int ioctl_allocate(struct client *client, union ioctl_arg *arg)
774
{
775
	struct fw_cdev_allocate *a = &arg->allocate;
776
	struct address_handler_resource *r;
777
	struct fw_address_region region;
778
	int ret;
779

780 781
	r = kmalloc(sizeof(*r), GFP_KERNEL);
	if (r == NULL)
782 783
		return -ENOMEM;

784
	region.start = a->offset;
785 786 787 788 789
	if (client->version < FW_CDEV_VERSION_ALLOCATE_REGION_END)
		region.end = a->offset + a->length;
	else
		region.end = a->region_end;

790
	r->handler.length           = a->length;
791
	r->handler.address_callback = handle_request;
792 793 794
	r->handler.callback_data    = r;
	r->closure   = a->closure;
	r->client    = client;
795

796
	ret = fw_core_add_address_handler(&r->handler, &region);
797
	if (ret < 0) {
798
		kfree(r);
799
		return ret;
800
	}
801
	a->offset = r->handler.offset;
802

803 804
	r->resource.release = release_address_handler;
	ret = add_client_resource(client, &r->resource, GFP_KERNEL);
805
	if (ret < 0) {
806
		release_address_handler(client, &r->resource);
807 808
		return ret;
	}
809
	a->handle = r->resource.handle;
810 811 812 813

	return 0;
}

814
static int ioctl_deallocate(struct client *client, union ioctl_arg *arg)
815
{
816
	return release_client_resource(client, arg->deallocate.handle,
817
				       release_address_handler, NULL);
818 819
}

820
static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
821
{
822
	struct fw_cdev_send_response *a = &arg->send_response;
823
	struct client_resource *resource;
824
	struct inbound_transaction_resource *r;
825
	int ret = 0;
826

827
	if (release_client_resource(client, a->handle,
828
				    release_request, &resource) < 0)
829
		return -EINVAL;
830

831 832
	r = container_of(resource, struct inbound_transaction_resource,
			 resource);
833 834 835
	if (is_fcp_request(r->request))
		goto out;

836 837 838 839 840 841
	if (a->length != fw_get_response_length(r->request)) {
		ret = -EINVAL;
		kfree(r->request);
		goto out;
	}
	if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) {
842 843 844
		ret = -EFAULT;
		kfree(r->request);
		goto out;
845
	}
846
	fw_send_response(r->card, r->request, a->rcode);
847
 out:
848
	fw_card_put(r->card);
849 850
	kfree(r);

851
	return ret;
852 853
}

854
static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg)
855
{
856
	fw_schedule_bus_reset(client->device->card, true,
857
			arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET);
858
	return 0;
859 860
}

861 862 863
static void release_descriptor(struct client *client,
			       struct client_resource *resource)
{
864 865
	struct descriptor_resource *r =
		container_of(resource, struct descriptor_resource, resource);
866

867 868
	fw_core_remove_descriptor(&r->descriptor);
	kfree(r);
869 870
}

871
static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg)
872
{
873
	struct fw_cdev_add_descriptor *a = &arg->add_descriptor;
874
	struct descriptor_resource *r;
875
	int ret;
876

877
	/* Access policy: Allow this ioctl only on local nodes' device files. */
878
	if (!client->device->is_local)
879 880
		return -ENOSYS;

881
	if (a->length > 256)
882 883
		return -EINVAL;

884
	r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL);
885
	if (r == NULL)
886 887
		return -ENOMEM;

888
	if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) {
889 890
		ret = -EFAULT;
		goto failed;
891 892
	}

893 894 895
	r->descriptor.length    = a->length;
	r->descriptor.immediate = a->immediate;
	r->descriptor.key       = a->key;
896
	r->descriptor.data      = r->data;
897

898
	ret = fw_core_add_descriptor(&r->descriptor);
899 900
	if (ret < 0)
		goto failed;
901

902 903
	r->resource.release = release_descriptor;
	ret = add_client_resource(client, &r->resource, GFP_KERNEL);
904
	if (ret < 0) {
905
		fw_core_remove_descriptor(&r->descriptor);
906 907
		goto failed;
	}
908
	a->handle = r->resource.handle;
909 910

	return 0;
911
 failed:
912
	kfree(r);
913 914

	return ret;
915 916
}

917
static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg)
918
{
919
	return release_client_resource(client, arg->remove_descriptor.handle,
920
				       release_descriptor, NULL);
921 922
}

923 924
static void iso_callback(struct fw_iso_context *context, u32 cycle,
			 size_t header_length, void *header, void *data)
925 926
{
	struct client *client = data;
927
	struct iso_interrupt_event *e;
928

929
	e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC);
930
	if (e == NULL) {
931
		fw_notice(context->card, "out of memory when allocating event\n");
932
		return;
933
	}
934 935 936 937 938 939 940
	e->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT;
	e->interrupt.closure   = client->iso_closure;
	e->interrupt.cycle     = cycle;
	e->interrupt.header_length = header_length;
	memcpy(e->interrupt.header, header, header_length);
	queue_event(client, &e->event, &e->interrupt,
		    sizeof(e->interrupt) + header_length, NULL, 0);
941 942
}

943 944 945 946 947 948 949 950
static void iso_mc_callback(struct fw_iso_context *context,
			    dma_addr_t completed, void *data)
{
	struct client *client = data;
	struct iso_interrupt_mc_event *e;

	e = kmalloc(sizeof(*e), GFP_ATOMIC);
	if (e == NULL) {
951
		fw_notice(context->card, "out of memory when allocating event\n");
952 953 954 955 956 957 958 959 960 961
		return;
	}
	e->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL;
	e->interrupt.closure   = client->iso_closure;
	e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer,
						      completed);
	queue_event(client, &e->event, &e->interrupt,
		    sizeof(e->interrupt), NULL, 0);
}

962
static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
963
{
964
	struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
965
	struct fw_iso_context *context;
966
	fw_iso_callback_t cb;
967

968
	BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT ||
969 970 971
		     FW_CDEV_ISO_CONTEXT_RECEIVE  != FW_ISO_CONTEXT_RECEIVE  ||
		     FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL !=
					FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL);
972

973
	switch (a->type) {
974 975
	case FW_ISO_CONTEXT_TRANSMIT:
		if (a->speed > SCODE_3200 || a->channel > 63)
976
			return -EINVAL;
977 978

		cb = iso_callback;
979 980
		break;

981 982 983
	case FW_ISO_CONTEXT_RECEIVE:
		if (a->header_size < 4 || (a->header_size & 3) ||
		    a->channel > 63)
984
			return -EINVAL;
985 986 987 988 989 990

		cb = iso_callback;
		break;

	case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
		cb = (fw_iso_callback_t)iso_mc_callback;
991 992 993
		break;

	default:
994
		return -EINVAL;
995 996
	}

997
	context = fw_iso_context_create(client->device->card, a->type,
998
			a->channel, a->speed, a->header_size, cb, client);
999 1000 1001
	if (IS_ERR(context))
		return PTR_ERR(context);

1002 1003 1004 1005 1006 1007 1008
	/* We only support one context at this time. */
	spin_lock_irq(&client->lock);
	if (client->iso_context != NULL) {
		spin_unlock_irq(&client->lock);
		fw_iso_context_destroy(context);
		return -EBUSY;
	}
1009
	client->iso_closure = a->closure;
1010
	client->iso_context = context;
1011
	spin_unlock_irq(&client->lock);
1012

1013
	a->handle = 0;
1014

1015 1016 1017
	return 0;
}

1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
static int ioctl_set_iso_channels(struct client *client, union ioctl_arg *arg)
{
	struct fw_cdev_set_iso_channels *a = &arg->set_iso_channels;
	struct fw_iso_context *ctx = client->iso_context;

	if (ctx == NULL || a->handle != 0)
		return -EINVAL;

	return fw_iso_context_set_channels(ctx, &a->channels);
}

1029 1030 1031 1032
/* Macros for decoding the iso packet control header. */
#define GET_PAYLOAD_LENGTH(v)	((v) & 0xffff)
#define GET_INTERRUPT(v)	(((v) >> 16) & 0x01)
#define GET_SKIP(v)		(((v) >> 17) & 0x01)
1033 1034
#define GET_TAG(v)		(((v) >> 18) & 0x03)
#define GET_SY(v)		(((v) >> 20) & 0x0f)
1035 1036
#define GET_HEADER_LENGTH(v)	(((v) >> 24) & 0xff)

1037
static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
1038
{
1039
	struct fw_cdev_queue_iso *a = &arg->queue_iso;
1040
	struct fw_cdev_iso_packet __user *p, *end, *next;
1041
	struct fw_iso_context *ctx = client->iso_context;
1042
	unsigned long payload, buffer_end, transmit_header_bytes = 0;
1043
	u32 control;
1044 1045 1046 1047 1048 1049
	int count;
	struct {
		struct fw_iso_packet packet;
		u8 header[256];
	} u;

1050
	if (ctx == NULL || a->handle != 0)
1051 1052
		return -EINVAL;

1053 1054
	/*
	 * If the user passes a non-NULL data pointer, has mmap()'ed
1055 1056
	 * the iso buffer, and the pointer points inside the buffer,
	 * we setup the payload pointers accordingly.  Otherwise we
1057
	 * set them both to 0, which will still let packets with
1058 1059
	 * payload_length == 0 through.  In other words, if no packets
	 * use the indirect payload, the iso buffer need not be mapped
1060
	 * and the a->data pointer is ignored.
1061
	 */
1062
	payload = (unsigned long)a->data - client->vm_start;
1063
	buffer_end = client->buffer.page_count << PAGE_SHIFT;
1064
	if (a->data == 0 || client->buffer.pages == NULL ||
1065
	    payload >= buffer_end) {
1066
		payload = 0;
1067
		buffer_end = 0;
1068 1069
	}

1070 1071
	if (ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL && payload & 3)
		return -EINVAL;
A
Al Viro 已提交
1072

1073
	p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets);
1074
	if (!access_ok(VERIFY_READ, p, a->size))
1075 1076
		return -EFAULT;

1077
	end = (void __user *)p + a->size;
1078 1079
	count = 0;
	while (p < end) {
1080
		if (get_user(control, &p->control))
1081
			return -EFAULT;
1082 1083 1084 1085 1086 1087
		u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
		u.packet.interrupt = GET_INTERRUPT(control);
		u.packet.skip = GET_SKIP(control);
		u.packet.tag = GET_TAG(control);
		u.packet.sy = GET_SY(control);
		u.packet.header_length = GET_HEADER_LENGTH(control);
1088

1089 1090 1091
		switch (ctx->type) {
		case FW_ISO_CONTEXT_TRANSMIT:
			if (u.packet.header_length & 3)
1092
				return -EINVAL;
1093
			transmit_header_bytes = u.packet.header_length;
1094 1095 1096
			break;

		case FW_ISO_CONTEXT_RECEIVE:
1097 1098
			if (u.packet.header_length == 0 ||
			    u.packet.header_length % ctx->header_size != 0)
1099
				return -EINVAL;
1100 1101 1102 1103 1104
			break;

		case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
			if (u.packet.payload_length == 0 ||
			    u.packet.payload_length & 3)
1105
				return -EINVAL;
1106
			break;
1107 1108
		}

1109
		next = (struct fw_cdev_iso_packet __user *)
1110
			&p->header[transmit_header_bytes / 4];
1111 1112 1113
		if (next > end)
			return -EINVAL;
		if (__copy_from_user
1114
		    (u.packet.header, p->header, transmit_header_bytes))
1115
			return -EFAULT;
1116
		if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
1117 1118
		    u.packet.header_length + u.packet.payload_length > 0)
			return -EINVAL;
1119
		if (payload + u.packet.payload_length > buffer_end)
1120 1121
			return -EINVAL;

1122 1123
		if (fw_iso_context_queue(ctx, &u.packet,
					 &client->buffer, payload))
1124 1125 1126 1127 1128 1129
			break;

		p = next;
		payload += u.packet.payload_length;
		count++;
	}
1130
	fw_iso_context_queue_flush(ctx);
1131

1132 1133 1134
	a->size    -= uptr_to_u64(p) - a->packets;
	a->packets  = uptr_to_u64(p);
	a->data     = client->vm_start + payload;
1135 1136 1137 1138

	return count;
}

1139
static int ioctl_start_iso(struct client *client, union ioctl_arg *arg)
1140
{
1141
	struct fw_cdev_start_iso *a = &arg->start_iso;
1142

1143 1144 1145 1146 1147 1148 1149
	BUILD_BUG_ON(
	    FW_CDEV_ISO_CONTEXT_MATCH_TAG0 != FW_ISO_CONTEXT_MATCH_TAG0 ||
	    FW_CDEV_ISO_CONTEXT_MATCH_TAG1 != FW_ISO_CONTEXT_MATCH_TAG1 ||
	    FW_CDEV_ISO_CONTEXT_MATCH_TAG2 != FW_ISO_CONTEXT_MATCH_TAG2 ||
	    FW_CDEV_ISO_CONTEXT_MATCH_TAG3 != FW_ISO_CONTEXT_MATCH_TAG3 ||
	    FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS != FW_ISO_CONTEXT_MATCH_ALL_TAGS);

1150
	if (client->iso_context == NULL || a->handle != 0)
1151
		return -EINVAL;
1152

1153 1154 1155
	if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE &&
	    (a->tags == 0 || a->tags > 15 || a->sync > 15))
		return -EINVAL;
1156

1157 1158
	return fw_iso_context_start(client->iso_context,
				    a->cycle, a->sync, a->tags);
1159 1160
}

1161
static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg)
1162
{
1163
	struct fw_cdev_stop_iso *a = &arg->stop_iso;
1164

1165
	if (client->iso_context == NULL || a->handle != 0)
1166 1167
		return -EINVAL;

1168 1169 1170
	return fw_iso_context_stop(client->iso_context);
}

1171
static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
1172
{
1173
	struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
1174
	struct fw_card *card = client->device->card;
1175
	struct timespec ts = {0, 0};
1176
	u32 cycle_time;
1177
	int ret = 0;
1178

1179
	local_irq_disable();
1180

1181
	cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME);
1182

1183
	switch (a->clk_id) {
1184 1185 1186 1187 1188 1189
	case CLOCK_REALTIME:      getnstimeofday(&ts);                   break;
	case CLOCK_MONOTONIC:     do_posix_clock_monotonic_gettime(&ts); break;
	case CLOCK_MONOTONIC_RAW: getrawmonotonic(&ts);                  break;
	default:
		ret = -EINVAL;
	}
1190

1191
	local_irq_enable();
1192

1193 1194 1195
	a->tv_sec      = ts.tv_sec;
	a->tv_nsec     = ts.tv_nsec;
	a->cycle_timer = cycle_time;
1196 1197 1198 1199

	return ret;
}

1200
static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg)
1201
{
1202
	struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer;
1203 1204 1205
	struct fw_cdev_get_cycle_timer2 ct2;

	ct2.clk_id = CLOCK_REALTIME;
1206
	ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2);
1207

1208 1209
	a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC;
	a->cycle_timer = ct2.cycle_timer;
1210

1211 1212 1213
	return 0;
}

1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227
static void iso_resource_work(struct work_struct *work)
{
	struct iso_resource_event *e;
	struct iso_resource *r =
			container_of(work, struct iso_resource, work.work);
	struct client *client = r->client;
	int generation, channel, bandwidth, todo;
	bool skip, free, success;

	spin_lock_irq(&client->lock);
	generation = client->device->generation;
	todo = r->todo;
	/* Allow 1000ms grace period for other reallocations. */
	if (todo == ISO_RES_ALLOC &&
1228 1229
	    time_before64(get_jiffies_64(),
			  client->device->card->reset_jiffies + HZ)) {
1230
		schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
1231 1232 1233 1234 1235 1236
		skip = true;
	} else {
		/* We could be called twice within the same generation. */
		skip = todo == ISO_RES_REALLOC &&
		       r->generation == generation;
	}
1237 1238 1239
	free = todo == ISO_RES_DEALLOC ||
	       todo == ISO_RES_ALLOC_ONCE ||
	       todo == ISO_RES_DEALLOC_ONCE;
1240 1241 1242 1243 1244 1245 1246 1247 1248 1249
	r->generation = generation;
	spin_unlock_irq(&client->lock);

	if (skip)
		goto out;

	bandwidth = r->bandwidth;

	fw_iso_resource_manage(client->device->card, generation,
			r->channels, &channel, &bandwidth,
1250 1251
			todo == ISO_RES_ALLOC ||
			todo == ISO_RES_REALLOC ||
1252
			todo == ISO_RES_ALLOC_ONCE);
1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284
	/*
	 * Is this generation outdated already?  As long as this resource sticks
	 * in the idr, it will be scheduled again for a newer generation or at
	 * shutdown.
	 */
	if (channel == -EAGAIN &&
	    (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
		goto out;

	success = channel >= 0 || bandwidth > 0;

	spin_lock_irq(&client->lock);
	/*
	 * Transit from allocation to reallocation, except if the client
	 * requested deallocation in the meantime.
	 */
	if (r->todo == ISO_RES_ALLOC)
		r->todo = ISO_RES_REALLOC;
	/*
	 * Allocation or reallocation failure?  Pull this resource out of the
	 * idr and prepare for deletion, unless the client is shutting down.
	 */
	if (r->todo == ISO_RES_REALLOC && !success &&
	    !client->in_shutdown &&
	    idr_find(&client->resource_idr, r->resource.handle)) {
		idr_remove(&client->resource_idr, r->resource.handle);
		client_put(client);
		free = true;
	}
	spin_unlock_irq(&client->lock);

	if (todo == ISO_RES_ALLOC && channel >= 0)
1285
		r->channels = 1ULL << channel;
1286 1287 1288 1289

	if (todo == ISO_RES_REALLOC && success)
		goto out;

1290
	if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
1291 1292 1293 1294 1295 1296
		e = r->e_alloc;
		r->e_alloc = NULL;
	} else {
		e = r->e_dealloc;
		r->e_dealloc = NULL;
	}
1297 1298 1299
	e->iso_resource.handle    = r->resource.handle;
	e->iso_resource.channel   = channel;
	e->iso_resource.bandwidth = bandwidth;
1300 1301

	queue_event(client, &e->event,
1302
		    &e->iso_resource, sizeof(e->iso_resource), NULL, 0);
1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321

	if (free) {
		cancel_delayed_work(&r->work);
		kfree(r->e_alloc);
		kfree(r->e_dealloc);
		kfree(r);
	}
 out:
	client_put(client);
}

static void release_iso_resource(struct client *client,
				 struct client_resource *resource)
{
	struct iso_resource *r =
		container_of(resource, struct iso_resource, resource);

	spin_lock_irq(&client->lock);
	r->todo = ISO_RES_DEALLOC;
1322
	schedule_iso_resource(r, 0);
1323 1324 1325
	spin_unlock_irq(&client->lock);
}

1326 1327
static int init_iso_resource(struct client *client,
		struct fw_cdev_allocate_iso_resource *request, int todo)
1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347
{
	struct iso_resource_event *e1, *e2;
	struct iso_resource *r;
	int ret;

	if ((request->channels == 0 && request->bandwidth == 0) ||
	    request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
	    request->bandwidth < 0)
		return -EINVAL;

	r  = kmalloc(sizeof(*r), GFP_KERNEL);
	e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
	e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
	if (r == NULL || e1 == NULL || e2 == NULL) {
		ret = -ENOMEM;
		goto fail;
	}

	INIT_DELAYED_WORK(&r->work, iso_resource_work);
	r->client	= client;
1348
	r->todo		= todo;
1349 1350 1351 1352 1353 1354
	r->generation	= -1;
	r->channels	= request->channels;
	r->bandwidth	= request->bandwidth;
	r->e_alloc	= e1;
	r->e_dealloc	= e2;

1355 1356 1357 1358
	e1->iso_resource.closure = request->closure;
	e1->iso_resource.type    = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
	e2->iso_resource.closure = request->closure;
	e2->iso_resource.type    = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
1359

1360 1361 1362
	if (todo == ISO_RES_ALLOC) {
		r->resource.release = release_iso_resource;
		ret = add_client_resource(client, &r->resource, GFP_KERNEL);
1363 1364
		if (ret < 0)
			goto fail;
1365 1366 1367
	} else {
		r->resource.release = NULL;
		r->resource.handle = -1;
1368
		schedule_iso_resource(r, 0);
1369
	}
1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380
	request->handle = r->resource.handle;

	return 0;
 fail:
	kfree(r);
	kfree(e1);
	kfree(e2);

	return ret;
}

1381 1382
static int ioctl_allocate_iso_resource(struct client *client,
				       union ioctl_arg *arg)
1383
{
1384 1385
	return init_iso_resource(client,
			&arg->allocate_iso_resource, ISO_RES_ALLOC);
1386 1387
}

1388 1389
static int ioctl_deallocate_iso_resource(struct client *client,
					 union ioctl_arg *arg)
1390
{
1391 1392
	return release_client_resource(client,
			arg->deallocate.handle, release_iso_resource, NULL);
1393 1394
}

1395 1396
static int ioctl_allocate_iso_resource_once(struct client *client,
					    union ioctl_arg *arg)
1397
{
1398 1399
	return init_iso_resource(client,
			&arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE);
1400 1401
}

1402 1403
static int ioctl_deallocate_iso_resource_once(struct client *client,
					      union ioctl_arg *arg)
1404
{
1405 1406
	return init_iso_resource(client,
			&arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE);
1407 1408
}

1409 1410 1411 1412 1413
/*
 * Returns a speed code:  Maximum speed to or from this device,
 * limited by the device's link speed, the local node's link speed,
 * and all PHY port speeds between the two links.
 */
1414
static int ioctl_get_speed(struct client *client, union ioctl_arg *arg)
1415
{
1416
	return client->device->max_speed;
1417 1418
}

1419 1420
static int ioctl_send_broadcast_request(struct client *client,
					union ioctl_arg *arg)
1421
{
1422
	struct fw_cdev_send_request *a = &arg->send_request;
1423

1424
	switch (a->tcode) {
1425 1426 1427 1428 1429 1430 1431
	case TCODE_WRITE_QUADLET_REQUEST:
	case TCODE_WRITE_BLOCK_REQUEST:
		break;
	default:
		return -EINVAL;
	}

1432
	/* Security policy: Only allow accesses to Units Space. */
1433
	if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
1434 1435
		return -EACCES;

1436
	return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100);
1437 1438
}

1439
static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg)
1440
{
1441
	struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet;
1442 1443
	struct fw_cdev_send_request request;
	int dest;
1444

1445 1446
	if (a->speed > client->device->card->link_speed ||
	    a->length > 1024 << a->speed)
1447
		return -EIO;
1448

1449
	if (a->tag > 3 || a->channel > 63 || a->sy > 15)
1450 1451
		return -EINVAL;

1452
	dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy);
1453
	request.tcode		= TCODE_STREAM_DATA;
1454 1455 1456 1457
	request.length		= a->length;
	request.closure		= a->closure;
	request.data		= a->data;
	request.generation	= a->generation;
1458

1459
	return init_request(client, &request, dest, a->speed);
1460 1461
}

1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480
static void outbound_phy_packet_callback(struct fw_packet *packet,
					 struct fw_card *card, int status)
{
	struct outbound_phy_packet_event *e =
		container_of(packet, struct outbound_phy_packet_event, p);

	switch (status) {
	/* expected: */
	case ACK_COMPLETE:	e->phy_packet.rcode = RCODE_COMPLETE;	break;
	/* should never happen with PHY packets: */
	case ACK_PENDING:	e->phy_packet.rcode = RCODE_COMPLETE;	break;
	case ACK_BUSY_X:
	case ACK_BUSY_A:
	case ACK_BUSY_B:	e->phy_packet.rcode = RCODE_BUSY;	break;
	case ACK_DATA_ERROR:	e->phy_packet.rcode = RCODE_DATA_ERROR;	break;
	case ACK_TYPE_ERROR:	e->phy_packet.rcode = RCODE_TYPE_ERROR;	break;
	/* stale generation; cancelled; on certain controllers: no ack */
	default:		e->phy_packet.rcode = status;		break;
	}
S
Stefan Richter 已提交
1481
	e->phy_packet.data[0] = packet->timestamp;
1482

S
Stefan Richter 已提交
1483 1484
	queue_event(e->client, &e->event, &e->phy_packet,
		    sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0);
1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497
	client_put(e->client);
}

static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
{
	struct fw_cdev_send_phy_packet *a = &arg->send_phy_packet;
	struct fw_card *card = client->device->card;
	struct outbound_phy_packet_event *e;

	/* Access policy: Allow this ioctl only on local nodes' device files. */
	if (!client->device->is_local)
		return -ENOSYS;

S
Stefan Richter 已提交
1498
	e = kzalloc(sizeof(*e) + 4, GFP_KERNEL);
1499 1500 1501 1502 1503 1504 1505
	if (e == NULL)
		return -ENOMEM;

	client_get(client);
	e->client		= client;
	e->p.speed		= SCODE_100;
	e->p.generation		= a->generation;
1506 1507 1508 1509
	e->p.header[0]		= TCODE_LINK_INTERNAL << 4;
	e->p.header[1]		= a->data[0];
	e->p.header[2]		= a->data[1];
	e->p.header_length	= 12;
1510 1511 1512
	e->p.callback		= outbound_phy_packet_callback;
	e->phy_packet.closure	= a->closure;
	e->phy_packet.type	= FW_CDEV_EVENT_PHY_PACKET_SENT;
S
Stefan Richter 已提交
1513 1514
	if (is_ping_packet(a->data))
			e->phy_packet.length = 4;
1515 1516 1517 1518 1519 1520

	card->driver->send_request(card, &e->p);

	return 0;
}

1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550
static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg)
{
	struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets;
	struct fw_card *card = client->device->card;

	/* Access policy: Allow this ioctl only on local nodes' device files. */
	if (!client->device->is_local)
		return -ENOSYS;

	spin_lock_irq(&card->lock);

	list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list);
	client->phy_receiver_closure = a->closure;

	spin_unlock_irq(&card->lock);

	return 0;
}

void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
{
	struct client *client;
	struct inbound_phy_packet_event *e;
	unsigned long flags;

	spin_lock_irqsave(&card->lock, flags);

	list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) {
		e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
		if (e == NULL) {
1551
			fw_notice(card, "out of memory when allocating event\n");
1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566
			break;
		}
		e->phy_packet.closure	= client->phy_receiver_closure;
		e->phy_packet.type	= FW_CDEV_EVENT_PHY_PACKET_RECEIVED;
		e->phy_packet.rcode	= RCODE_COMPLETE;
		e->phy_packet.length	= 8;
		e->phy_packet.data[0]	= p->header[1];
		e->phy_packet.data[1]	= p->header[2];
		queue_event(client, &e->event,
			    &e->phy_packet, sizeof(e->phy_packet) + 8, NULL, 0);
	}

	spin_unlock_irqrestore(&card->lock, flags);
}

1567
static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588
	[0x00] = ioctl_get_info,
	[0x01] = ioctl_send_request,
	[0x02] = ioctl_allocate,
	[0x03] = ioctl_deallocate,
	[0x04] = ioctl_send_response,
	[0x05] = ioctl_initiate_bus_reset,
	[0x06] = ioctl_add_descriptor,
	[0x07] = ioctl_remove_descriptor,
	[0x08] = ioctl_create_iso_context,
	[0x09] = ioctl_queue_iso,
	[0x0a] = ioctl_start_iso,
	[0x0b] = ioctl_stop_iso,
	[0x0c] = ioctl_get_cycle_timer,
	[0x0d] = ioctl_allocate_iso_resource,
	[0x0e] = ioctl_deallocate_iso_resource,
	[0x0f] = ioctl_allocate_iso_resource_once,
	[0x10] = ioctl_deallocate_iso_resource_once,
	[0x11] = ioctl_get_speed,
	[0x12] = ioctl_send_broadcast_request,
	[0x13] = ioctl_send_stream_packet,
	[0x14] = ioctl_get_cycle_timer2,
1589
	[0x15] = ioctl_send_phy_packet,
1590
	[0x16] = ioctl_receive_phy_packets,
1591
	[0x17] = ioctl_set_iso_channels,
1592 1593
};

1594 1595
static int dispatch_ioctl(struct client *client,
			  unsigned int cmd, void __user *arg)
1596
{
1597
	union ioctl_arg buffer;
1598
	int ret;
1599

1600 1601 1602
	if (fw_device_is_shutdown(client->device))
		return -ENODEV;

1603
	if (_IOC_TYPE(cmd) != '#' ||
1604 1605
	    _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) ||
	    _IOC_SIZE(cmd) > sizeof(buffer))
1606
		return -ENOTTY;
1607

1608 1609 1610 1611 1612
	if (_IOC_DIR(cmd) == _IOC_READ)
		memset(&buffer, 0, _IOC_SIZE(cmd));

	if (_IOC_DIR(cmd) & _IOC_WRITE)
		if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
1613 1614
			return -EFAULT;

1615
	ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer);
1616 1617
	if (ret < 0)
		return ret;
1618

1619 1620
	if (_IOC_DIR(cmd) & _IOC_READ)
		if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
1621 1622
			return -EFAULT;

1623
	return ret;
1624 1625
}

1626 1627
static long fw_device_op_ioctl(struct file *file,
			       unsigned int cmd, unsigned long arg)
1628
{
1629
	return dispatch_ioctl(file->private_data, cmd, (void __user *)arg);
1630 1631 1632
}

#ifdef CONFIG_COMPAT
1633 1634
static long fw_device_op_compat_ioctl(struct file *file,
				      unsigned int cmd, unsigned long arg)
1635
{
1636
	return dispatch_ioctl(file->private_data, cmd, compat_ptr(arg));
1637 1638 1639 1640 1641 1642
}
#endif

static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
{
	struct client *client = file->private_data;
1643 1644
	enum dma_data_direction direction;
	unsigned long size;
1645
	int page_count, ret;
1646

1647 1648 1649
	if (fw_device_is_shutdown(client->device))
		return -ENODEV;

1650 1651 1652 1653 1654 1655
	/* FIXME: We could support multiple buffers, but we don't. */
	if (client->buffer.pages != NULL)
		return -EBUSY;

	if (!(vma->vm_flags & VM_SHARED))
		return -EINVAL;
1656

1657
	if (vma->vm_start & ~PAGE_MASK)
1658 1659 1660
		return -EINVAL;

	client->vm_start = vma->vm_start;
1661 1662 1663 1664 1665 1666 1667 1668 1669 1670
	size = vma->vm_end - vma->vm_start;
	page_count = size >> PAGE_SHIFT;
	if (size & ~PAGE_MASK)
		return -EINVAL;

	if (vma->vm_flags & VM_WRITE)
		direction = DMA_TO_DEVICE;
	else
		direction = DMA_FROM_DEVICE;

1671 1672 1673 1674
	ret = fw_iso_buffer_init(&client->buffer, client->device->card,
				 page_count, direction);
	if (ret < 0)
		return ret;
1675

1676 1677
	ret = fw_iso_buffer_map(&client->buffer, vma);
	if (ret < 0)
1678 1679
		fw_iso_buffer_destroy(&client->buffer, client->device->card);

1680
	return ret;
1681 1682
}

1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701
static int is_outbound_transaction_resource(int id, void *p, void *data)
{
	struct client_resource *resource = p;

	return resource->release == release_transaction;
}

static int has_outbound_transactions(struct client *client)
{
	int ret;

	spin_lock_irq(&client->lock);
	ret = idr_for_each(&client->resource_idr,
			   is_outbound_transaction_resource, NULL);
	spin_unlock_irq(&client->lock);

	return ret;
}

1702 1703
static int shutdown_resource(int id, void *p, void *data)
{
1704
	struct client_resource *resource = p;
1705 1706
	struct client *client = data;

1707
	resource->release(client, resource);
1708
	client_put(client);
1709 1710 1711 1712

	return 0;
}

1713 1714 1715
static int fw_device_op_release(struct inode *inode, struct file *file)
{
	struct client *client = file->private_data;
1716
	struct event *event, *next_event;
1717

1718 1719 1720 1721
	spin_lock_irq(&client->device->card->lock);
	list_del(&client->phy_receiver_link);
	spin_unlock_irq(&client->device->card->lock);

1722 1723 1724 1725
	mutex_lock(&client->device->client_list_mutex);
	list_del(&client->link);
	mutex_unlock(&client->device->client_list_mutex);

1726 1727 1728
	if (client->iso_context)
		fw_iso_context_destroy(client->iso_context);

1729 1730 1731
	if (client->buffer.pages)
		fw_iso_buffer_destroy(&client->buffer, client->device->card);

1732
	/* Freeze client->resource_idr and client->event_list */
1733
	spin_lock_irq(&client->lock);
1734
	client->in_shutdown = true;
1735
	spin_unlock_irq(&client->lock);
1736

1737 1738
	wait_event(client->tx_flush_wait, !has_outbound_transactions(client));

1739 1740 1741
	idr_for_each(&client->resource_idr, shutdown_resource, client);
	idr_remove_all(&client->resource_idr);
	idr_destroy(&client->resource_idr);
1742

1743 1744
	list_for_each_entry_safe(event, next_event, &client->event_list, link)
		kfree(event);
1745

1746
	client_put(client);
1747 1748 1749 1750 1751 1752 1753

	return 0;
}

static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
{
	struct client *client = file->private_data;
1754
	unsigned int mask = 0;
1755 1756 1757

	poll_wait(file, &client->wait, pt);

1758 1759
	if (fw_device_is_shutdown(client->device))
		mask |= POLLHUP | POLLERR;
1760
	if (!list_empty(&client->event_list))
1761 1762 1763
		mask |= POLLIN | POLLRDNORM;

	return mask;
1764 1765
}

1766
const struct file_operations fw_device_ops = {
1767
	.owner		= THIS_MODULE,
1768
	.llseek		= no_llseek,
1769 1770 1771 1772
	.open		= fw_device_op_open,
	.read		= fw_device_op_read,
	.unlocked_ioctl	= fw_device_op_ioctl,
	.mmap		= fw_device_op_mmap,
1773 1774
	.release	= fw_device_op_release,
	.poll		= fw_device_op_poll,
1775
#ifdef CONFIG_COMPAT
1776
	.compat_ioctl	= fw_device_op_compat_ioctl,
1777 1778
#endif
};