core-cdev.c 45.0 KB
Newer Older
1 2
/*
 * Char device for device raw access
3
 *
4
 * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software Foundation,
 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 */

21
#include <linux/bug.h>
S
Stefan Richter 已提交
22 23 24 25
#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
26
#include <linux/firewire.h>
S
Stefan Richter 已提交
27 28
#include <linux/firewire-cdev.h>
#include <linux/idr.h>
29
#include <linux/irqflags.h>
30
#include <linux/jiffies.h>
31
#include <linux/kernel.h>
32
#include <linux/kref.h>
S
Stefan Richter 已提交
33 34
#include <linux/mm.h>
#include <linux/module.h>
35
#include <linux/mutex.h>
36
#include <linux/poll.h>
37
#include <linux/sched.h> /* required for linux/wait.h */
38
#include <linux/slab.h>
J
Jay Fenlason 已提交
39
#include <linux/spinlock.h>
40
#include <linux/string.h>
S
Stefan Richter 已提交
41
#include <linux/time.h>
42
#include <linux/uaccess.h>
S
Stefan Richter 已提交
43 44
#include <linux/vmalloc.h>
#include <linux/wait.h>
45
#include <linux/workqueue.h>
S
Stefan Richter 已提交
46

47
#include <asm/system.h>
S
Stefan Richter 已提交
48

49
#include "core.h"
50

51 52 53
/*
 * ABI version history is documented in linux/firewire-cdev.h.
 */
54 55 56
#define FW_CDEV_KERNEL_VERSION			4
#define FW_CDEV_VERSION_EVENT_REQUEST2		4
#define FW_CDEV_VERSION_ALLOCATE_REGION_END	4
57

58
struct client {
59
	u32 version;
60
	struct fw_device *device;
61

62
	spinlock_t lock;
63 64
	bool in_shutdown;
	struct idr resource_idr;
65 66
	struct list_head event_list;
	wait_queue_head_t wait;
67
	wait_queue_head_t tx_flush_wait;
68
	u64 bus_reset_closure;
69

70
	struct fw_iso_context *iso_context;
71
	u64 iso_closure;
72 73
	struct fw_iso_buffer buffer;
	unsigned long vm_start;
74

75 76 77
	struct list_head phy_receiver_link;
	u64 phy_receiver_closure;

78
	struct list_head link;
79
	struct kref kref;
80 81
};

82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
static inline void client_get(struct client *client)
{
	kref_get(&client->kref);
}

static void client_release(struct kref *kref)
{
	struct client *client = container_of(kref, struct client, kref);

	fw_device_put(client->device);
	kfree(client);
}

static void client_put(struct client *client)
{
	kref_put(&client->kref, client_release);
}

100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
struct client_resource;
typedef void (*client_resource_release_fn_t)(struct client *,
					     struct client_resource *);
struct client_resource {
	client_resource_release_fn_t release;
	int handle;
};

struct address_handler_resource {
	struct client_resource resource;
	struct fw_address_handler handler;
	__u64 closure;
	struct client *client;
};

struct outbound_transaction_resource {
	struct client_resource resource;
	struct fw_transaction transaction;
};

struct inbound_transaction_resource {
	struct client_resource resource;
122
	struct fw_card *card;
123 124 125 126 127 128 129 130 131 132 133
	struct fw_request *request;
	void *data;
	size_t length;
};

struct descriptor_resource {
	struct client_resource resource;
	struct fw_descriptor descriptor;
	u32 data[0];
};

134 135 136 137 138
struct iso_resource {
	struct client_resource resource;
	struct client *client;
	/* Schedule work and access todo only with client->lock held. */
	struct delayed_work work;
139 140
	enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
	      ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
141 142 143
	int generation;
	u64 channels;
	s32 bandwidth;
144
	__be32 transaction_data[2];
145 146 147 148 149
	struct iso_resource_event *e_alloc, *e_dealloc;
};

static void release_iso_resource(struct client *, struct client_resource *);

150 151 152 153 154 155 156 157 158 159 160 161 162 163
static void schedule_iso_resource(struct iso_resource *r, unsigned long delay)
{
	client_get(r->client);
	if (!schedule_delayed_work(&r->work, delay))
		client_put(r->client);
}

static void schedule_if_iso_resource(struct client_resource *resource)
{
	if (resource->release == release_iso_resource)
		schedule_iso_resource(container_of(resource,
					struct iso_resource, resource), 0);
}

164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
/*
 * dequeue_event() just kfree()'s the event, so the event has to be
 * the first field in a struct XYZ_event.
 */
struct event {
	struct { void *data; size_t size; } v[2];
	struct list_head link;
};

struct bus_reset_event {
	struct event event;
	struct fw_cdev_event_bus_reset reset;
};

struct outbound_transaction_event {
	struct event event;
	struct client *client;
	struct outbound_transaction_resource r;
	struct fw_cdev_event_response response;
};

struct inbound_transaction_event {
	struct event event;
187 188 189 190
	union {
		struct fw_cdev_event_request request;
		struct fw_cdev_event_request2 request2;
	} req;
191 192 193 194 195 196 197
};

struct iso_interrupt_event {
	struct event event;
	struct fw_cdev_event_iso_interrupt interrupt;
};

198 199 200 201 202
struct iso_interrupt_mc_event {
	struct event event;
	struct fw_cdev_event_iso_interrupt_mc interrupt;
};

203 204
struct iso_resource_event {
	struct event event;
205
	struct fw_cdev_event_iso_resource iso_resource;
206 207
};

208 209 210 211 212 213 214
struct outbound_phy_packet_event {
	struct event event;
	struct client *client;
	struct fw_packet p;
	struct fw_cdev_event_phy_packet phy_packet;
};

215 216 217 218 219
struct inbound_phy_packet_event {
	struct event event;
	struct fw_cdev_event_phy_packet phy_packet;
};

220
static inline void __user *u64_to_uptr(__u64 value)
221 222 223 224
{
	return (void __user *)(unsigned long)value;
}

225
static inline __u64 uptr_to_u64(void __user *ptr)
226 227 228 229 230 231 232 233 234
{
	return (__u64)(unsigned long)ptr;
}

static int fw_device_op_open(struct inode *inode, struct file *file)
{
	struct fw_device *device;
	struct client *client;

235
	device = fw_device_get_by_devt(inode->i_rdev);
236 237
	if (device == NULL)
		return -ENODEV;
238

239 240 241 242 243
	if (fw_device_is_shutdown(device)) {
		fw_device_put(device);
		return -ENODEV;
	}

244
	client = kzalloc(sizeof(*client), GFP_KERNEL);
245 246
	if (client == NULL) {
		fw_device_put(device);
247
		return -ENOMEM;
248
	}
249

250
	client->device = device;
251
	spin_lock_init(&client->lock);
252 253
	idr_init(&client->resource_idr);
	INIT_LIST_HEAD(&client->event_list);
254
	init_waitqueue_head(&client->wait);
255
	init_waitqueue_head(&client->tx_flush_wait);
256
	INIT_LIST_HEAD(&client->phy_receiver_link);
257
	kref_init(&client->kref);
258 259 260

	file->private_data = client;

261
	mutex_lock(&device->client_list_mutex);
262
	list_add_tail(&client->link, &device->client_list);
263
	mutex_unlock(&device->client_list_mutex);
264

265
	return nonseekable_open(inode, file);
266 267 268 269 270 271 272 273 274 275 276 277 278
}

static void queue_event(struct client *client, struct event *event,
			void *data0, size_t size0, void *data1, size_t size1)
{
	unsigned long flags;

	event->v[0].data = data0;
	event->v[0].size = size0;
	event->v[1].data = data1;
	event->v[1].size = size1;

	spin_lock_irqsave(&client->lock, flags);
279 280 281 282
	if (client->in_shutdown)
		kfree(event);
	else
		list_add_tail(&event->link, &client->event_list);
283
	spin_unlock_irqrestore(&client->lock, flags);
284 285

	wake_up_interruptible(&client->wait);
286 287
}

288 289
static int dequeue_event(struct client *client,
			 char __user *buffer, size_t count)
290 291 292
{
	struct event *event;
	size_t size, total;
293
	int i, ret;
294

295 296 297 298 299
	ret = wait_event_interruptible(client->wait,
			!list_empty(&client->event_list) ||
			fw_device_is_shutdown(client->device));
	if (ret < 0)
		return ret;
300

301 302 303
	if (list_empty(&client->event_list) &&
		       fw_device_is_shutdown(client->device))
		return -ENODEV;
304

305
	spin_lock_irq(&client->lock);
306
	event = list_first_entry(&client->event_list, struct event, link);
307
	list_del(&event->link);
308
	spin_unlock_irq(&client->lock);
309 310 311 312

	total = 0;
	for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
		size = min(event->v[i].size, count - total);
313
		if (copy_to_user(buffer + total, event->v[i].data, size)) {
314
			ret = -EFAULT;
315
			goto out;
316
		}
317 318
		total += size;
	}
319
	ret = total;
320 321 322 323

 out:
	kfree(event);

324
	return ret;
325 326
}

327 328
static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
				 size_t count, loff_t *offset)
329 330 331 332 333 334
{
	struct client *client = file->private_data;

	return dequeue_event(client, buffer, count);
}

335 336
static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
				 struct client *client)
337
{
338
	struct fw_card *card = client->device->card;
J
Jay Fenlason 已提交
339

340
	spin_lock_irq(&card->lock);
341

342
	event->closure	     = client->bus_reset_closure;
343
	event->type          = FW_CDEV_EVENT_BUS_RESET;
344
	event->generation    = client->device->generation;
345
	event->node_id       = client->device->node_id;
346
	event->local_node_id = card->local_node->node_id;
347
	event->bm_node_id    = card->bm_node_id;
348 349
	event->irm_node_id   = card->irm_node->node_id;
	event->root_node_id  = card->root_node->node_id;
J
Jay Fenlason 已提交
350

351
	spin_unlock_irq(&card->lock);
352 353
}

354 355
static void for_each_client(struct fw_device *device,
			    void (*callback)(struct client *client))
356 357 358
{
	struct client *c;

359
	mutex_lock(&device->client_list_mutex);
360 361
	list_for_each_entry(c, &device->client_list, link)
		callback(c);
362
	mutex_unlock(&device->client_list_mutex);
363 364
}

365 366
static int schedule_reallocations(int id, void *p, void *data)
{
367
	schedule_if_iso_resource(p);
368 369 370 371

	return 0;
}

372
static void queue_bus_reset_event(struct client *client)
373
{
374
	struct bus_reset_event *e;
375

376 377
	e = kzalloc(sizeof(*e), GFP_KERNEL);
	if (e == NULL) {
378
		fw_notify("Out of memory when allocating event\n");
379 380 381
		return;
	}

382
	fill_bus_reset_event(&e->reset, client);
383

384 385
	queue_event(client, &e->event,
		    &e->reset, sizeof(e->reset), NULL, 0);
386 387 388 389

	spin_lock_irq(&client->lock);
	idr_for_each(&client->resource_idr, schedule_reallocations, client);
	spin_unlock_irq(&client->lock);
390 391 392 393
}

void fw_device_cdev_update(struct fw_device *device)
{
394 395
	for_each_client(device, queue_bus_reset_event);
}
396

397 398 399 400
static void wake_up_client(struct client *client)
{
	wake_up_interruptible(&client->wait);
}
401

402 403 404
void fw_device_cdev_remove(struct fw_device *device)
{
	for_each_client(device, wake_up_client);
405 406
}

407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423
union ioctl_arg {
	struct fw_cdev_get_info			get_info;
	struct fw_cdev_send_request		send_request;
	struct fw_cdev_allocate			allocate;
	struct fw_cdev_deallocate		deallocate;
	struct fw_cdev_send_response		send_response;
	struct fw_cdev_initiate_bus_reset	initiate_bus_reset;
	struct fw_cdev_add_descriptor		add_descriptor;
	struct fw_cdev_remove_descriptor	remove_descriptor;
	struct fw_cdev_create_iso_context	create_iso_context;
	struct fw_cdev_queue_iso		queue_iso;
	struct fw_cdev_start_iso		start_iso;
	struct fw_cdev_stop_iso			stop_iso;
	struct fw_cdev_get_cycle_timer		get_cycle_timer;
	struct fw_cdev_allocate_iso_resource	allocate_iso_resource;
	struct fw_cdev_send_stream_packet	send_stream_packet;
	struct fw_cdev_get_cycle_timer2		get_cycle_timer2;
424
	struct fw_cdev_send_phy_packet		send_phy_packet;
425
	struct fw_cdev_receive_phy_packets	receive_phy_packets;
426
	struct fw_cdev_set_iso_channels		set_iso_channels;
427 428 429
};

static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
430
{
431
	struct fw_cdev_get_info *a = &arg->get_info;
432
	struct fw_cdev_event_bus_reset bus_reset;
433
	unsigned long ret = 0;
434

435
	client->version = a->version;
436
	a->version = FW_CDEV_KERNEL_VERSION;
437
	a->card = client->device->card->index;
438

439 440
	down_read(&fw_device_rwsem);

441 442
	if (a->rom != 0) {
		size_t want = a->rom_length;
443
		size_t have = client->device->config_rom_length * 4;
444

445 446
		ret = copy_to_user(u64_to_uptr(a->rom),
				   client->device->config_rom, min(want, have));
447
	}
448
	a->rom_length = client->device->config_rom_length * 4;
449

450 451 452 453 454
	up_read(&fw_device_rwsem);

	if (ret != 0)
		return -EFAULT;

455 456
	client->bus_reset_closure = a->bus_reset_closure;
	if (a->bus_reset != 0) {
457
		fill_bus_reset_event(&bus_reset, client);
458 459
		if (copy_to_user(u64_to_uptr(a->bus_reset),
				 &bus_reset, sizeof(bus_reset)))
460 461
			return -EFAULT;
	}
462 463 464 465

	return 0;
}

466 467
static int add_client_resource(struct client *client,
			       struct client_resource *resource, gfp_t gfp_mask)
468 469
{
	unsigned long flags;
470 471 472 473 474
	int ret;

 retry:
	if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
		return -ENOMEM;
475 476

	spin_lock_irqsave(&client->lock, flags);
477 478 479 480 481
	if (client->in_shutdown)
		ret = -ECANCELED;
	else
		ret = idr_get_new(&client->resource_idr, resource,
				  &resource->handle);
482
	if (ret >= 0) {
483
		client_get(client);
484
		schedule_if_iso_resource(resource);
485
	}
486
	spin_unlock_irqrestore(&client->lock, flags);
487 488 489 490 491

	if (ret == -EAGAIN)
		goto retry;

	return ret < 0 ? ret : 0;
492 493
}

494 495
static int release_client_resource(struct client *client, u32 handle,
				   client_resource_release_fn_t release,
496
				   struct client_resource **return_resource)
497
{
498
	struct client_resource *resource;
499

500
	spin_lock_irq(&client->lock);
501
	if (client->in_shutdown)
502
		resource = NULL;
503
	else
504 505
		resource = idr_find(&client->resource_idr, handle);
	if (resource && resource->release == release)
506
		idr_remove(&client->resource_idr, handle);
507
	spin_unlock_irq(&client->lock);
508

509
	if (!(resource && resource->release == release))
510 511
		return -EINVAL;

512 513
	if (return_resource)
		*return_resource = resource;
514
	else
515
		resource->release(client, resource);
516

517 518
	client_put(client);

519 520 521
	return 0;
}

522 523
static void release_transaction(struct client *client,
				struct client_resource *resource)
524 525 526
{
}

527 528
static void complete_transaction(struct fw_card *card, int rcode,
				 void *payload, size_t length, void *data)
529
{
530 531 532
	struct outbound_transaction_event *e = data;
	struct fw_cdev_event_response *rsp = &e->response;
	struct client *client = e->client;
533
	unsigned long flags;
534

535 536
	if (length < rsp->length)
		rsp->length = length;
537
	if (rcode == RCODE_COMPLETE)
538
		memcpy(rsp->data, payload, rsp->length);
539

540
	spin_lock_irqsave(&client->lock, flags);
541 542 543
	idr_remove(&client->resource_idr, e->r.resource.handle);
	if (client->in_shutdown)
		wake_up(&client->tx_flush_wait);
544 545
	spin_unlock_irqrestore(&client->lock, flags);

546 547
	rsp->type = FW_CDEV_EVENT_RESPONSE;
	rsp->rcode = rcode;
548 549

	/*
550
	 * In the case that sizeof(*rsp) doesn't align with the position of the
551 552 553 554 555
	 * data, and the read is short, preserve an extra copy of the data
	 * to stay compatible with a pre-2.6.27 bug.  Since the bug is harmless
	 * for short reads and some apps depended on it, this is both safe
	 * and prudent for compatibility.
	 */
556 557 558
	if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
		queue_event(client, &e->event, rsp, sizeof(*rsp),
			    rsp->data, rsp->length);
559
	else
560
		queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
561
			    NULL, 0);
562

563 564
	/* Drop the idr's reference */
	client_put(client);
565 566
}

567 568 569
static int init_request(struct client *client,
			struct fw_cdev_send_request *request,
			int destination_id, int speed)
570
{
571
	struct outbound_transaction_event *e;
572
	int ret;
573

574 575
	if (request->tcode != TCODE_STREAM_DATA &&
	    (request->length > 4096 || request->length > 512 << speed))
576
		return -EIO;
577

578 579 580 581
	if (request->tcode == TCODE_WRITE_QUADLET_REQUEST &&
	    request->length < 4)
		return -EINVAL;

582 583
	e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
	if (e == NULL)
584 585
		return -ENOMEM;

586 587 588
	e->client = client;
	e->response.length = request->length;
	e->response.closure = request->closure;
589

590
	if (request->data &&
591
	    copy_from_user(e->response.data,
592
			   u64_to_uptr(request->data), request->length)) {
593
		ret = -EFAULT;
594
		goto failed;
595 596
	}

597 598
	e->r.resource.release = release_transaction;
	ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
599 600
	if (ret < 0)
		goto failed;
601

602
	fw_send_request(client->device->card, &e->r.transaction,
603 604 605 606
			request->tcode, destination_id, request->generation,
			speed, request->offset, e->response.data,
			request->length, complete_transaction, e);
	return 0;
607

608
 failed:
609
	kfree(e);
610 611

	return ret;
612 613
}

614
static int ioctl_send_request(struct client *client, union ioctl_arg *arg)
615
{
616
	switch (arg->send_request.tcode) {
617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632
	case TCODE_WRITE_QUADLET_REQUEST:
	case TCODE_WRITE_BLOCK_REQUEST:
	case TCODE_READ_QUADLET_REQUEST:
	case TCODE_READ_BLOCK_REQUEST:
	case TCODE_LOCK_MASK_SWAP:
	case TCODE_LOCK_COMPARE_SWAP:
	case TCODE_LOCK_FETCH_ADD:
	case TCODE_LOCK_LITTLE_ADD:
	case TCODE_LOCK_BOUNDED_ADD:
	case TCODE_LOCK_WRAP_ADD:
	case TCODE_LOCK_VENDOR_DEPENDENT:
		break;
	default:
		return -EINVAL;
	}

633
	return init_request(client, &arg->send_request, client->device->node_id,
634 635 636
			    client->device->max_speed);
}

637 638 639 640 641
static inline bool is_fcp_request(struct fw_request *request)
{
	return request == NULL;
}

642 643
static void release_request(struct client *client,
			    struct client_resource *resource)
644
{
645 646
	struct inbound_transaction_resource *r = container_of(resource,
			struct inbound_transaction_resource, resource);
647

648 649 650
	if (is_fcp_request(r->request))
		kfree(r->data);
	else
651
		fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR);
652 653

	fw_card_put(r->card);
654
	kfree(r);
655 656
}

657
static void handle_request(struct fw_card *card, struct fw_request *request,
658
			   int tcode, int destination, int source,
659
			   int generation, unsigned long long offset,
660
			   void *payload, size_t length, void *callback_data)
661
{
662 663 664
	struct address_handler_resource *handler = callback_data;
	struct inbound_transaction_resource *r;
	struct inbound_transaction_event *e;
665
	size_t event_size0;
666
	void *fcp_frame = NULL;
667
	int ret;
668

669 670 671
	/* card may be different from handler->client->device->card */
	fw_card_get(card);

672
	r = kmalloc(sizeof(*r), GFP_ATOMIC);
673
	e = kmalloc(sizeof(*e), GFP_ATOMIC);
674 675
	if (r == NULL || e == NULL) {
		fw_notify("Out of memory when allocating event\n");
676
		goto failed;
677
	}
678
	r->card    = card;
679 680 681
	r->request = request;
	r->data    = payload;
	r->length  = length;
682

683 684 685 686 687 688 689 690 691 692 693 694
	if (is_fcp_request(request)) {
		/*
		 * FIXME: Let core-transaction.c manage a
		 * single reference-counted copy?
		 */
		fcp_frame = kmemdup(payload, length, GFP_ATOMIC);
		if (fcp_frame == NULL)
			goto failed;

		r->data = fcp_frame;
	}

695 696
	r->resource.release = release_request;
	ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
697 698
	if (ret < 0)
		goto failed;
699

700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727
	if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) {
		struct fw_cdev_event_request *req = &e->req.request;

		if (tcode & 0x10)
			tcode = TCODE_LOCK_REQUEST;

		req->type	= FW_CDEV_EVENT_REQUEST;
		req->tcode	= tcode;
		req->offset	= offset;
		req->length	= length;
		req->handle	= r->resource.handle;
		req->closure	= handler->closure;
		event_size0	= sizeof(*req);
	} else {
		struct fw_cdev_event_request2 *req = &e->req.request2;

		req->type	= FW_CDEV_EVENT_REQUEST2;
		req->tcode	= tcode;
		req->offset	= offset;
		req->source_node_id = source;
		req->destination_node_id = destination;
		req->card	= card->index;
		req->generation	= generation;
		req->length	= length;
		req->handle	= r->resource.handle;
		req->closure	= handler->closure;
		event_size0	= sizeof(*req);
	}
728

729
	queue_event(handler->client, &e->event,
730
		    &e->req, event_size0, r->data, length);
731 732 733
	return;

 failed:
734
	kfree(r);
735
	kfree(e);
736 737 738
	kfree(fcp_frame);

	if (!is_fcp_request(request))
739
		fw_send_response(card, request, RCODE_CONFLICT_ERROR);
740 741

	fw_card_put(card);
742 743
}

744 745
static void release_address_handler(struct client *client,
				    struct client_resource *resource)
746
{
747 748
	struct address_handler_resource *r =
	    container_of(resource, struct address_handler_resource, resource);
749

750 751
	fw_core_remove_address_handler(&r->handler);
	kfree(r);
752 753
}

754
static int ioctl_allocate(struct client *client, union ioctl_arg *arg)
755
{
756
	struct fw_cdev_allocate *a = &arg->allocate;
757
	struct address_handler_resource *r;
758
	struct fw_address_region region;
759
	int ret;
760

761 762
	r = kmalloc(sizeof(*r), GFP_KERNEL);
	if (r == NULL)
763 764
		return -ENOMEM;

765
	region.start = a->offset;
766 767 768 769 770
	if (client->version < FW_CDEV_VERSION_ALLOCATE_REGION_END)
		region.end = a->offset + a->length;
	else
		region.end = a->region_end;

771
	r->handler.length           = a->length;
772
	r->handler.address_callback = handle_request;
773 774 775
	r->handler.callback_data    = r;
	r->closure   = a->closure;
	r->client    = client;
776

777
	ret = fw_core_add_address_handler(&r->handler, &region);
778
	if (ret < 0) {
779
		kfree(r);
780
		return ret;
781
	}
782
	a->offset = r->handler.offset;
783

784 785
	r->resource.release = release_address_handler;
	ret = add_client_resource(client, &r->resource, GFP_KERNEL);
786
	if (ret < 0) {
787
		release_address_handler(client, &r->resource);
788 789
		return ret;
	}
790
	a->handle = r->resource.handle;
791 792 793 794

	return 0;
}

795
static int ioctl_deallocate(struct client *client, union ioctl_arg *arg)
796
{
797
	return release_client_resource(client, arg->deallocate.handle,
798
				       release_address_handler, NULL);
799 800
}

801
static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
802
{
803
	struct fw_cdev_send_response *a = &arg->send_response;
804
	struct client_resource *resource;
805
	struct inbound_transaction_resource *r;
806
	int ret = 0;
807

808
	if (release_client_resource(client, a->handle,
809
				    release_request, &resource) < 0)
810
		return -EINVAL;
811

812 813
	r = container_of(resource, struct inbound_transaction_resource,
			 resource);
814 815 816
	if (is_fcp_request(r->request))
		goto out;

817 818 819 820 821 822
	if (a->length != fw_get_response_length(r->request)) {
		ret = -EINVAL;
		kfree(r->request);
		goto out;
	}
	if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) {
823 824 825
		ret = -EFAULT;
		kfree(r->request);
		goto out;
826
	}
827
	fw_send_response(r->card, r->request, a->rcode);
828
 out:
829
	fw_card_put(r->card);
830 831
	kfree(r);

832
	return ret;
833 834
}

835
static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg)
836
{
837
	fw_schedule_bus_reset(client->device->card, true,
838
			arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET);
839
	return 0;
840 841
}

842 843 844
static void release_descriptor(struct client *client,
			       struct client_resource *resource)
{
845 846
	struct descriptor_resource *r =
		container_of(resource, struct descriptor_resource, resource);
847

848 849
	fw_core_remove_descriptor(&r->descriptor);
	kfree(r);
850 851
}

852
static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg)
853
{
854
	struct fw_cdev_add_descriptor *a = &arg->add_descriptor;
855
	struct descriptor_resource *r;
856
	int ret;
857

858
	/* Access policy: Allow this ioctl only on local nodes' device files. */
859
	if (!client->device->is_local)
860 861
		return -ENOSYS;

862
	if (a->length > 256)
863 864
		return -EINVAL;

865
	r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL);
866
	if (r == NULL)
867 868
		return -ENOMEM;

869
	if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) {
870 871
		ret = -EFAULT;
		goto failed;
872 873
	}

874 875 876
	r->descriptor.length    = a->length;
	r->descriptor.immediate = a->immediate;
	r->descriptor.key       = a->key;
877
	r->descriptor.data      = r->data;
878

879
	ret = fw_core_add_descriptor(&r->descriptor);
880 881
	if (ret < 0)
		goto failed;
882

883 884
	r->resource.release = release_descriptor;
	ret = add_client_resource(client, &r->resource, GFP_KERNEL);
885
	if (ret < 0) {
886
		fw_core_remove_descriptor(&r->descriptor);
887 888
		goto failed;
	}
889
	a->handle = r->resource.handle;
890 891

	return 0;
892
 failed:
893
	kfree(r);
894 895

	return ret;
896 897
}

898
static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg)
899
{
900
	return release_client_resource(client, arg->remove_descriptor.handle,
901
				       release_descriptor, NULL);
902 903
}

904 905
static void iso_callback(struct fw_iso_context *context, u32 cycle,
			 size_t header_length, void *header, void *data)
906 907
{
	struct client *client = data;
908
	struct iso_interrupt_event *e;
909

910
	e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC);
911 912
	if (e == NULL) {
		fw_notify("Out of memory when allocating event\n");
913
		return;
914
	}
915 916 917 918 919 920 921
	e->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT;
	e->interrupt.closure   = client->iso_closure;
	e->interrupt.cycle     = cycle;
	e->interrupt.header_length = header_length;
	memcpy(e->interrupt.header, header, header_length);
	queue_event(client, &e->event, &e->interrupt,
		    sizeof(e->interrupt) + header_length, NULL, 0);
922 923
}

924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942
static void iso_mc_callback(struct fw_iso_context *context,
			    dma_addr_t completed, void *data)
{
	struct client *client = data;
	struct iso_interrupt_mc_event *e;

	e = kmalloc(sizeof(*e), GFP_ATOMIC);
	if (e == NULL) {
		fw_notify("Out of memory when allocating event\n");
		return;
	}
	e->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL;
	e->interrupt.closure   = client->iso_closure;
	e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer,
						      completed);
	queue_event(client, &e->event, &e->interrupt,
		    sizeof(e->interrupt), NULL, 0);
}

943
static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
944
{
945
	struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
946
	struct fw_iso_context *context;
947
	fw_iso_callback_t cb;
948

949
	BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT ||
950 951 952
		     FW_CDEV_ISO_CONTEXT_RECEIVE  != FW_ISO_CONTEXT_RECEIVE  ||
		     FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL !=
					FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL);
953

954
	switch (a->type) {
955 956
	case FW_ISO_CONTEXT_TRANSMIT:
		if (a->speed > SCODE_3200 || a->channel > 63)
957
			return -EINVAL;
958 959

		cb = iso_callback;
960 961
		break;

962 963 964
	case FW_ISO_CONTEXT_RECEIVE:
		if (a->header_size < 4 || (a->header_size & 3) ||
		    a->channel > 63)
965
			return -EINVAL;
966 967 968 969 970 971

		cb = iso_callback;
		break;

	case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
		cb = (fw_iso_callback_t)iso_mc_callback;
972 973 974
		break;

	default:
975
		return -EINVAL;
976 977
	}

978
	context = fw_iso_context_create(client->device->card, a->type,
979
			a->channel, a->speed, a->header_size, cb, client);
980 981 982
	if (IS_ERR(context))
		return PTR_ERR(context);

983 984 985 986 987 988 989
	/* We only support one context at this time. */
	spin_lock_irq(&client->lock);
	if (client->iso_context != NULL) {
		spin_unlock_irq(&client->lock);
		fw_iso_context_destroy(context);
		return -EBUSY;
	}
990
	client->iso_closure = a->closure;
991
	client->iso_context = context;
992
	spin_unlock_irq(&client->lock);
993

994
	a->handle = 0;
995

996 997 998
	return 0;
}

999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009
static int ioctl_set_iso_channels(struct client *client, union ioctl_arg *arg)
{
	struct fw_cdev_set_iso_channels *a = &arg->set_iso_channels;
	struct fw_iso_context *ctx = client->iso_context;

	if (ctx == NULL || a->handle != 0)
		return -EINVAL;

	return fw_iso_context_set_channels(ctx, &a->channels);
}

1010 1011 1012 1013
/* Macros for decoding the iso packet control header. */
#define GET_PAYLOAD_LENGTH(v)	((v) & 0xffff)
#define GET_INTERRUPT(v)	(((v) >> 16) & 0x01)
#define GET_SKIP(v)		(((v) >> 17) & 0x01)
1014 1015
#define GET_TAG(v)		(((v) >> 18) & 0x03)
#define GET_SY(v)		(((v) >> 20) & 0x0f)
1016 1017
#define GET_HEADER_LENGTH(v)	(((v) >> 24) & 0xff)

1018
static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
1019
{
1020
	struct fw_cdev_queue_iso *a = &arg->queue_iso;
1021
	struct fw_cdev_iso_packet __user *p, *end, *next;
1022
	struct fw_iso_context *ctx = client->iso_context;
1023
	unsigned long payload, buffer_end, transmit_header_bytes = 0;
1024
	u32 control;
1025 1026 1027 1028 1029 1030
	int count;
	struct {
		struct fw_iso_packet packet;
		u8 header[256];
	} u;

1031
	if (ctx == NULL || a->handle != 0)
1032 1033
		return -EINVAL;

1034 1035
	/*
	 * If the user passes a non-NULL data pointer, has mmap()'ed
1036 1037
	 * the iso buffer, and the pointer points inside the buffer,
	 * we setup the payload pointers accordingly.  Otherwise we
1038
	 * set them both to 0, which will still let packets with
1039 1040
	 * payload_length == 0 through.  In other words, if no packets
	 * use the indirect payload, the iso buffer need not be mapped
1041
	 * and the a->data pointer is ignored.
1042
	 */
1043
	payload = (unsigned long)a->data - client->vm_start;
1044
	buffer_end = client->buffer.page_count << PAGE_SHIFT;
1045
	if (a->data == 0 || client->buffer.pages == NULL ||
1046
	    payload >= buffer_end) {
1047
		payload = 0;
1048
		buffer_end = 0;
1049 1050
	}

1051 1052
	if (ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL && payload & 3)
		return -EINVAL;
A
Al Viro 已提交
1053

1054
	p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets);
1055
	if (!access_ok(VERIFY_READ, p, a->size))
1056 1057
		return -EFAULT;

1058
	end = (void __user *)p + a->size;
1059 1060
	count = 0;
	while (p < end) {
1061
		if (get_user(control, &p->control))
1062
			return -EFAULT;
1063 1064 1065 1066 1067 1068
		u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
		u.packet.interrupt = GET_INTERRUPT(control);
		u.packet.skip = GET_SKIP(control);
		u.packet.tag = GET_TAG(control);
		u.packet.sy = GET_SY(control);
		u.packet.header_length = GET_HEADER_LENGTH(control);
1069

1070 1071 1072
		switch (ctx->type) {
		case FW_ISO_CONTEXT_TRANSMIT:
			if (u.packet.header_length & 3)
1073
				return -EINVAL;
1074
			transmit_header_bytes = u.packet.header_length;
1075 1076 1077
			break;

		case FW_ISO_CONTEXT_RECEIVE:
1078 1079
			if (u.packet.header_length == 0 ||
			    u.packet.header_length % ctx->header_size != 0)
1080
				return -EINVAL;
1081 1082 1083 1084 1085
			break;

		case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
			if (u.packet.payload_length == 0 ||
			    u.packet.payload_length & 3)
1086
				return -EINVAL;
1087
			break;
1088 1089
		}

1090
		next = (struct fw_cdev_iso_packet __user *)
1091
			&p->header[transmit_header_bytes / 4];
1092 1093 1094
		if (next > end)
			return -EINVAL;
		if (__copy_from_user
1095
		    (u.packet.header, p->header, transmit_header_bytes))
1096
			return -EFAULT;
1097
		if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
1098 1099
		    u.packet.header_length + u.packet.payload_length > 0)
			return -EINVAL;
1100
		if (payload + u.packet.payload_length > buffer_end)
1101 1102
			return -EINVAL;

1103 1104
		if (fw_iso_context_queue(ctx, &u.packet,
					 &client->buffer, payload))
1105 1106 1107 1108 1109 1110 1111
			break;

		p = next;
		payload += u.packet.payload_length;
		count++;
	}

1112 1113 1114
	a->size    -= uptr_to_u64(p) - a->packets;
	a->packets  = uptr_to_u64(p);
	a->data     = client->vm_start + payload;
1115 1116 1117 1118

	return count;
}

1119
static int ioctl_start_iso(struct client *client, union ioctl_arg *arg)
1120
{
1121
	struct fw_cdev_start_iso *a = &arg->start_iso;
1122

1123 1124 1125 1126 1127 1128 1129
	BUILD_BUG_ON(
	    FW_CDEV_ISO_CONTEXT_MATCH_TAG0 != FW_ISO_CONTEXT_MATCH_TAG0 ||
	    FW_CDEV_ISO_CONTEXT_MATCH_TAG1 != FW_ISO_CONTEXT_MATCH_TAG1 ||
	    FW_CDEV_ISO_CONTEXT_MATCH_TAG2 != FW_ISO_CONTEXT_MATCH_TAG2 ||
	    FW_CDEV_ISO_CONTEXT_MATCH_TAG3 != FW_ISO_CONTEXT_MATCH_TAG3 ||
	    FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS != FW_ISO_CONTEXT_MATCH_ALL_TAGS);

1130
	if (client->iso_context == NULL || a->handle != 0)
1131
		return -EINVAL;
1132

1133 1134 1135
	if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE &&
	    (a->tags == 0 || a->tags > 15 || a->sync > 15))
		return -EINVAL;
1136

1137 1138
	return fw_iso_context_start(client->iso_context,
				    a->cycle, a->sync, a->tags);
1139 1140
}

1141
static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg)
1142
{
1143
	struct fw_cdev_stop_iso *a = &arg->stop_iso;
1144

1145
	if (client->iso_context == NULL || a->handle != 0)
1146 1147
		return -EINVAL;

1148 1149 1150
	return fw_iso_context_stop(client->iso_context);
}

1151
static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
1152
{
1153
	struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
1154
	struct fw_card *card = client->device->card;
1155
	struct timespec ts = {0, 0};
1156
	u32 cycle_time;
1157
	int ret = 0;
1158

1159
	local_irq_disable();
1160

1161
	cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME);
1162

1163
	switch (a->clk_id) {
1164 1165 1166 1167 1168 1169
	case CLOCK_REALTIME:      getnstimeofday(&ts);                   break;
	case CLOCK_MONOTONIC:     do_posix_clock_monotonic_gettime(&ts); break;
	case CLOCK_MONOTONIC_RAW: getrawmonotonic(&ts);                  break;
	default:
		ret = -EINVAL;
	}
1170

1171
	local_irq_enable();
1172

1173 1174 1175
	a->tv_sec      = ts.tv_sec;
	a->tv_nsec     = ts.tv_nsec;
	a->cycle_timer = cycle_time;
1176 1177 1178 1179

	return ret;
}

1180
static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg)
1181
{
1182
	struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer;
1183 1184 1185
	struct fw_cdev_get_cycle_timer2 ct2;

	ct2.clk_id = CLOCK_REALTIME;
1186
	ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2);
1187

1188 1189
	a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC;
	a->cycle_timer = ct2.cycle_timer;
1190

1191 1192 1193
	return 0;
}

1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207
static void iso_resource_work(struct work_struct *work)
{
	struct iso_resource_event *e;
	struct iso_resource *r =
			container_of(work, struct iso_resource, work.work);
	struct client *client = r->client;
	int generation, channel, bandwidth, todo;
	bool skip, free, success;

	spin_lock_irq(&client->lock);
	generation = client->device->generation;
	todo = r->todo;
	/* Allow 1000ms grace period for other reallocations. */
	if (todo == ISO_RES_ALLOC &&
1208 1209
	    time_before64(get_jiffies_64(),
			  client->device->card->reset_jiffies + HZ)) {
1210
		schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
1211 1212 1213 1214 1215 1216
		skip = true;
	} else {
		/* We could be called twice within the same generation. */
		skip = todo == ISO_RES_REALLOC &&
		       r->generation == generation;
	}
1217 1218 1219
	free = todo == ISO_RES_DEALLOC ||
	       todo == ISO_RES_ALLOC_ONCE ||
	       todo == ISO_RES_DEALLOC_ONCE;
1220 1221 1222 1223 1224 1225 1226 1227 1228 1229
	r->generation = generation;
	spin_unlock_irq(&client->lock);

	if (skip)
		goto out;

	bandwidth = r->bandwidth;

	fw_iso_resource_manage(client->device->card, generation,
			r->channels, &channel, &bandwidth,
1230 1231
			todo == ISO_RES_ALLOC ||
			todo == ISO_RES_REALLOC ||
1232 1233
			todo == ISO_RES_ALLOC_ONCE,
			r->transaction_data);
1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265
	/*
	 * Is this generation outdated already?  As long as this resource sticks
	 * in the idr, it will be scheduled again for a newer generation or at
	 * shutdown.
	 */
	if (channel == -EAGAIN &&
	    (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
		goto out;

	success = channel >= 0 || bandwidth > 0;

	spin_lock_irq(&client->lock);
	/*
	 * Transit from allocation to reallocation, except if the client
	 * requested deallocation in the meantime.
	 */
	if (r->todo == ISO_RES_ALLOC)
		r->todo = ISO_RES_REALLOC;
	/*
	 * Allocation or reallocation failure?  Pull this resource out of the
	 * idr and prepare for deletion, unless the client is shutting down.
	 */
	if (r->todo == ISO_RES_REALLOC && !success &&
	    !client->in_shutdown &&
	    idr_find(&client->resource_idr, r->resource.handle)) {
		idr_remove(&client->resource_idr, r->resource.handle);
		client_put(client);
		free = true;
	}
	spin_unlock_irq(&client->lock);

	if (todo == ISO_RES_ALLOC && channel >= 0)
1266
		r->channels = 1ULL << channel;
1267 1268 1269 1270

	if (todo == ISO_RES_REALLOC && success)
		goto out;

1271
	if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
1272 1273 1274 1275 1276 1277
		e = r->e_alloc;
		r->e_alloc = NULL;
	} else {
		e = r->e_dealloc;
		r->e_dealloc = NULL;
	}
1278 1279 1280
	e->iso_resource.handle    = r->resource.handle;
	e->iso_resource.channel   = channel;
	e->iso_resource.bandwidth = bandwidth;
1281 1282

	queue_event(client, &e->event,
1283
		    &e->iso_resource, sizeof(e->iso_resource), NULL, 0);
1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302

	if (free) {
		cancel_delayed_work(&r->work);
		kfree(r->e_alloc);
		kfree(r->e_dealloc);
		kfree(r);
	}
 out:
	client_put(client);
}

static void release_iso_resource(struct client *client,
				 struct client_resource *resource)
{
	struct iso_resource *r =
		container_of(resource, struct iso_resource, resource);

	spin_lock_irq(&client->lock);
	r->todo = ISO_RES_DEALLOC;
1303
	schedule_iso_resource(r, 0);
1304 1305 1306
	spin_unlock_irq(&client->lock);
}

1307 1308
static int init_iso_resource(struct client *client,
		struct fw_cdev_allocate_iso_resource *request, int todo)
1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328
{
	struct iso_resource_event *e1, *e2;
	struct iso_resource *r;
	int ret;

	if ((request->channels == 0 && request->bandwidth == 0) ||
	    request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
	    request->bandwidth < 0)
		return -EINVAL;

	r  = kmalloc(sizeof(*r), GFP_KERNEL);
	e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
	e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
	if (r == NULL || e1 == NULL || e2 == NULL) {
		ret = -ENOMEM;
		goto fail;
	}

	INIT_DELAYED_WORK(&r->work, iso_resource_work);
	r->client	= client;
1329
	r->todo		= todo;
1330 1331 1332 1333 1334 1335
	r->generation	= -1;
	r->channels	= request->channels;
	r->bandwidth	= request->bandwidth;
	r->e_alloc	= e1;
	r->e_dealloc	= e2;

1336 1337 1338 1339
	e1->iso_resource.closure = request->closure;
	e1->iso_resource.type    = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
	e2->iso_resource.closure = request->closure;
	e2->iso_resource.type    = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
1340

1341 1342 1343
	if (todo == ISO_RES_ALLOC) {
		r->resource.release = release_iso_resource;
		ret = add_client_resource(client, &r->resource, GFP_KERNEL);
1344 1345
		if (ret < 0)
			goto fail;
1346 1347 1348
	} else {
		r->resource.release = NULL;
		r->resource.handle = -1;
1349
		schedule_iso_resource(r, 0);
1350
	}
1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361
	request->handle = r->resource.handle;

	return 0;
 fail:
	kfree(r);
	kfree(e1);
	kfree(e2);

	return ret;
}

1362 1363
static int ioctl_allocate_iso_resource(struct client *client,
				       union ioctl_arg *arg)
1364
{
1365 1366
	return init_iso_resource(client,
			&arg->allocate_iso_resource, ISO_RES_ALLOC);
1367 1368
}

1369 1370
static int ioctl_deallocate_iso_resource(struct client *client,
					 union ioctl_arg *arg)
1371
{
1372 1373
	return release_client_resource(client,
			arg->deallocate.handle, release_iso_resource, NULL);
1374 1375
}

1376 1377
static int ioctl_allocate_iso_resource_once(struct client *client,
					    union ioctl_arg *arg)
1378
{
1379 1380
	return init_iso_resource(client,
			&arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE);
1381 1382
}

1383 1384
static int ioctl_deallocate_iso_resource_once(struct client *client,
					      union ioctl_arg *arg)
1385
{
1386 1387
	return init_iso_resource(client,
			&arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE);
1388 1389
}

1390 1391 1392 1393 1394
/*
 * Returns a speed code:  Maximum speed to or from this device,
 * limited by the device's link speed, the local node's link speed,
 * and all PHY port speeds between the two links.
 */
1395
static int ioctl_get_speed(struct client *client, union ioctl_arg *arg)
1396
{
1397
	return client->device->max_speed;
1398 1399
}

1400 1401
static int ioctl_send_broadcast_request(struct client *client,
					union ioctl_arg *arg)
1402
{
1403
	struct fw_cdev_send_request *a = &arg->send_request;
1404

1405
	switch (a->tcode) {
1406 1407 1408 1409 1410 1411 1412
	case TCODE_WRITE_QUADLET_REQUEST:
	case TCODE_WRITE_BLOCK_REQUEST:
		break;
	default:
		return -EINVAL;
	}

1413
	/* Security policy: Only allow accesses to Units Space. */
1414
	if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
1415 1416
		return -EACCES;

1417
	return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100);
1418 1419
}

1420
static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg)
1421
{
1422
	struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet;
1423 1424
	struct fw_cdev_send_request request;
	int dest;
1425

1426 1427
	if (a->speed > client->device->card->link_speed ||
	    a->length > 1024 << a->speed)
1428
		return -EIO;
1429

1430
	if (a->tag > 3 || a->channel > 63 || a->sy > 15)
1431 1432
		return -EINVAL;

1433
	dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy);
1434
	request.tcode		= TCODE_STREAM_DATA;
1435 1436 1437 1438
	request.length		= a->length;
	request.closure		= a->closure;
	request.data		= a->data;
	request.generation	= a->generation;
1439

1440
	return init_request(client, &request, dest, a->speed);
1441 1442
}

1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461
static void outbound_phy_packet_callback(struct fw_packet *packet,
					 struct fw_card *card, int status)
{
	struct outbound_phy_packet_event *e =
		container_of(packet, struct outbound_phy_packet_event, p);

	switch (status) {
	/* expected: */
	case ACK_COMPLETE:	e->phy_packet.rcode = RCODE_COMPLETE;	break;
	/* should never happen with PHY packets: */
	case ACK_PENDING:	e->phy_packet.rcode = RCODE_COMPLETE;	break;
	case ACK_BUSY_X:
	case ACK_BUSY_A:
	case ACK_BUSY_B:	e->phy_packet.rcode = RCODE_BUSY;	break;
	case ACK_DATA_ERROR:	e->phy_packet.rcode = RCODE_DATA_ERROR;	break;
	case ACK_TYPE_ERROR:	e->phy_packet.rcode = RCODE_TYPE_ERROR;	break;
	/* stale generation; cancelled; on certain controllers: no ack */
	default:		e->phy_packet.rcode = status;		break;
	}
S
Stefan Richter 已提交
1462
	e->phy_packet.data[0] = packet->timestamp;
1463

S
Stefan Richter 已提交
1464 1465
	queue_event(e->client, &e->event, &e->phy_packet,
		    sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0);
1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478
	client_put(e->client);
}

static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
{
	struct fw_cdev_send_phy_packet *a = &arg->send_phy_packet;
	struct fw_card *card = client->device->card;
	struct outbound_phy_packet_event *e;

	/* Access policy: Allow this ioctl only on local nodes' device files. */
	if (!client->device->is_local)
		return -ENOSYS;

S
Stefan Richter 已提交
1479
	e = kzalloc(sizeof(*e) + 4, GFP_KERNEL);
1480 1481 1482 1483 1484 1485 1486
	if (e == NULL)
		return -ENOMEM;

	client_get(client);
	e->client		= client;
	e->p.speed		= SCODE_100;
	e->p.generation		= a->generation;
1487 1488 1489 1490
	e->p.header[0]		= TCODE_LINK_INTERNAL << 4;
	e->p.header[1]		= a->data[0];
	e->p.header[2]		= a->data[1];
	e->p.header_length	= 12;
1491 1492 1493
	e->p.callback		= outbound_phy_packet_callback;
	e->phy_packet.closure	= a->closure;
	e->phy_packet.type	= FW_CDEV_EVENT_PHY_PACKET_SENT;
S
Stefan Richter 已提交
1494 1495
	if (is_ping_packet(a->data))
			e->phy_packet.length = 4;
1496 1497 1498 1499 1500 1501

	card->driver->send_request(card, &e->p);

	return 0;
}

1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547
static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg)
{
	struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets;
	struct fw_card *card = client->device->card;

	/* Access policy: Allow this ioctl only on local nodes' device files. */
	if (!client->device->is_local)
		return -ENOSYS;

	spin_lock_irq(&card->lock);

	list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list);
	client->phy_receiver_closure = a->closure;

	spin_unlock_irq(&card->lock);

	return 0;
}

void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
{
	struct client *client;
	struct inbound_phy_packet_event *e;
	unsigned long flags;

	spin_lock_irqsave(&card->lock, flags);

	list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) {
		e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
		if (e == NULL) {
			fw_notify("Out of memory when allocating event\n");
			break;
		}
		e->phy_packet.closure	= client->phy_receiver_closure;
		e->phy_packet.type	= FW_CDEV_EVENT_PHY_PACKET_RECEIVED;
		e->phy_packet.rcode	= RCODE_COMPLETE;
		e->phy_packet.length	= 8;
		e->phy_packet.data[0]	= p->header[1];
		e->phy_packet.data[1]	= p->header[2];
		queue_event(client, &e->event,
			    &e->phy_packet, sizeof(e->phy_packet) + 8, NULL, 0);
	}

	spin_unlock_irqrestore(&card->lock, flags);
}

1548
static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569
	[0x00] = ioctl_get_info,
	[0x01] = ioctl_send_request,
	[0x02] = ioctl_allocate,
	[0x03] = ioctl_deallocate,
	[0x04] = ioctl_send_response,
	[0x05] = ioctl_initiate_bus_reset,
	[0x06] = ioctl_add_descriptor,
	[0x07] = ioctl_remove_descriptor,
	[0x08] = ioctl_create_iso_context,
	[0x09] = ioctl_queue_iso,
	[0x0a] = ioctl_start_iso,
	[0x0b] = ioctl_stop_iso,
	[0x0c] = ioctl_get_cycle_timer,
	[0x0d] = ioctl_allocate_iso_resource,
	[0x0e] = ioctl_deallocate_iso_resource,
	[0x0f] = ioctl_allocate_iso_resource_once,
	[0x10] = ioctl_deallocate_iso_resource_once,
	[0x11] = ioctl_get_speed,
	[0x12] = ioctl_send_broadcast_request,
	[0x13] = ioctl_send_stream_packet,
	[0x14] = ioctl_get_cycle_timer2,
1570
	[0x15] = ioctl_send_phy_packet,
1571
	[0x16] = ioctl_receive_phy_packets,
1572
	[0x17] = ioctl_set_iso_channels,
1573 1574
};

1575 1576
static int dispatch_ioctl(struct client *client,
			  unsigned int cmd, void __user *arg)
1577
{
1578
	union ioctl_arg buffer;
1579
	int ret;
1580

1581 1582 1583
	if (fw_device_is_shutdown(client->device))
		return -ENODEV;

1584
	if (_IOC_TYPE(cmd) != '#' ||
1585 1586
	    _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) ||
	    _IOC_SIZE(cmd) > sizeof(buffer))
1587
		return -EINVAL;
1588

1589 1590 1591 1592 1593
	if (_IOC_DIR(cmd) == _IOC_READ)
		memset(&buffer, 0, _IOC_SIZE(cmd));

	if (_IOC_DIR(cmd) & _IOC_WRITE)
		if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
1594 1595
			return -EFAULT;

1596
	ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer);
1597 1598
	if (ret < 0)
		return ret;
1599

1600 1601
	if (_IOC_DIR(cmd) & _IOC_READ)
		if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
1602 1603
			return -EFAULT;

1604
	return ret;
1605 1606
}

1607 1608
static long fw_device_op_ioctl(struct file *file,
			       unsigned int cmd, unsigned long arg)
1609
{
1610
	return dispatch_ioctl(file->private_data, cmd, (void __user *)arg);
1611 1612 1613
}

#ifdef CONFIG_COMPAT
1614 1615
static long fw_device_op_compat_ioctl(struct file *file,
				      unsigned int cmd, unsigned long arg)
1616
{
1617
	return dispatch_ioctl(file->private_data, cmd, compat_ptr(arg));
1618 1619 1620 1621 1622 1623
}
#endif

static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
{
	struct client *client = file->private_data;
1624 1625
	enum dma_data_direction direction;
	unsigned long size;
1626
	int page_count, ret;
1627

1628 1629 1630
	if (fw_device_is_shutdown(client->device))
		return -ENODEV;

1631 1632 1633 1634 1635 1636
	/* FIXME: We could support multiple buffers, but we don't. */
	if (client->buffer.pages != NULL)
		return -EBUSY;

	if (!(vma->vm_flags & VM_SHARED))
		return -EINVAL;
1637

1638
	if (vma->vm_start & ~PAGE_MASK)
1639 1640 1641
		return -EINVAL;

	client->vm_start = vma->vm_start;
1642 1643 1644 1645 1646 1647 1648 1649 1650 1651
	size = vma->vm_end - vma->vm_start;
	page_count = size >> PAGE_SHIFT;
	if (size & ~PAGE_MASK)
		return -EINVAL;

	if (vma->vm_flags & VM_WRITE)
		direction = DMA_TO_DEVICE;
	else
		direction = DMA_FROM_DEVICE;

1652 1653 1654 1655
	ret = fw_iso_buffer_init(&client->buffer, client->device->card,
				 page_count, direction);
	if (ret < 0)
		return ret;
1656

1657 1658
	ret = fw_iso_buffer_map(&client->buffer, vma);
	if (ret < 0)
1659 1660
		fw_iso_buffer_destroy(&client->buffer, client->device->card);

1661
	return ret;
1662 1663
}

1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682
static int is_outbound_transaction_resource(int id, void *p, void *data)
{
	struct client_resource *resource = p;

	return resource->release == release_transaction;
}

static int has_outbound_transactions(struct client *client)
{
	int ret;

	spin_lock_irq(&client->lock);
	ret = idr_for_each(&client->resource_idr,
			   is_outbound_transaction_resource, NULL);
	spin_unlock_irq(&client->lock);

	return ret;
}

1683 1684
static int shutdown_resource(int id, void *p, void *data)
{
1685
	struct client_resource *resource = p;
1686 1687
	struct client *client = data;

1688
	resource->release(client, resource);
1689
	client_put(client);
1690 1691 1692 1693

	return 0;
}

1694 1695 1696
static int fw_device_op_release(struct inode *inode, struct file *file)
{
	struct client *client = file->private_data;
1697
	struct event *event, *next_event;
1698

1699 1700 1701 1702
	spin_lock_irq(&client->device->card->lock);
	list_del(&client->phy_receiver_link);
	spin_unlock_irq(&client->device->card->lock);

1703 1704 1705 1706
	mutex_lock(&client->device->client_list_mutex);
	list_del(&client->link);
	mutex_unlock(&client->device->client_list_mutex);

1707 1708 1709
	if (client->iso_context)
		fw_iso_context_destroy(client->iso_context);

1710 1711 1712
	if (client->buffer.pages)
		fw_iso_buffer_destroy(&client->buffer, client->device->card);

1713
	/* Freeze client->resource_idr and client->event_list */
1714
	spin_lock_irq(&client->lock);
1715
	client->in_shutdown = true;
1716
	spin_unlock_irq(&client->lock);
1717

1718 1719
	wait_event(client->tx_flush_wait, !has_outbound_transactions(client));

1720 1721 1722
	idr_for_each(&client->resource_idr, shutdown_resource, client);
	idr_remove_all(&client->resource_idr);
	idr_destroy(&client->resource_idr);
1723

1724 1725
	list_for_each_entry_safe(event, next_event, &client->event_list, link)
		kfree(event);
1726

1727
	client_put(client);
1728 1729 1730 1731 1732 1733 1734

	return 0;
}

static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
{
	struct client *client = file->private_data;
1735
	unsigned int mask = 0;
1736 1737 1738

	poll_wait(file, &client->wait, pt);

1739 1740
	if (fw_device_is_shutdown(client->device))
		mask |= POLLHUP | POLLERR;
1741
	if (!list_empty(&client->event_list))
1742 1743 1744
		mask |= POLLIN | POLLRDNORM;

	return mask;
1745 1746
}

1747
const struct file_operations fw_device_ops = {
1748
	.owner		= THIS_MODULE,
1749
	.llseek		= no_llseek,
1750 1751 1752 1753
	.open		= fw_device_op_open,
	.read		= fw_device_op_read,
	.unlocked_ioctl	= fw_device_op_ioctl,
	.mmap		= fw_device_op_mmap,
1754 1755
	.release	= fw_device_op_release,
	.poll		= fw_device_op_poll,
1756
#ifdef CONFIG_COMPAT
1757
	.compat_ioctl	= fw_device_op_compat_ioctl,
1758 1759
#endif
};