core-cdev.c 45.0 KB
Newer Older
1 2
/*
 * Char device for device raw access
3
 *
4
 * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software Foundation,
 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 */

21
#include <linux/bug.h>
S
Stefan Richter 已提交
22 23 24 25
#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
26
#include <linux/firewire.h>
S
Stefan Richter 已提交
27 28
#include <linux/firewire-cdev.h>
#include <linux/idr.h>
29
#include <linux/irqflags.h>
30
#include <linux/jiffies.h>
31
#include <linux/kernel.h>
32
#include <linux/kref.h>
S
Stefan Richter 已提交
33 34
#include <linux/mm.h>
#include <linux/module.h>
35
#include <linux/mutex.h>
36
#include <linux/poll.h>
37
#include <linux/sched.h> /* required for linux/wait.h */
38
#include <linux/slab.h>
J
Jay Fenlason 已提交
39
#include <linux/spinlock.h>
40
#include <linux/string.h>
S
Stefan Richter 已提交
41
#include <linux/time.h>
42
#include <linux/uaccess.h>
S
Stefan Richter 已提交
43 44
#include <linux/vmalloc.h>
#include <linux/wait.h>
45
#include <linux/workqueue.h>
S
Stefan Richter 已提交
46

47
#include <asm/system.h>
S
Stefan Richter 已提交
48

49
#include "core.h"
50

51 52 53
/*
 * ABI version history is documented in linux/firewire-cdev.h.
 */
54 55 56
#define FW_CDEV_KERNEL_VERSION			4
#define FW_CDEV_VERSION_EVENT_REQUEST2		4
#define FW_CDEV_VERSION_ALLOCATE_REGION_END	4
57

58
struct client {
59
	u32 version;
60
	struct fw_device *device;
61

62
	spinlock_t lock;
63 64
	bool in_shutdown;
	struct idr resource_idr;
65 66
	struct list_head event_list;
	wait_queue_head_t wait;
67
	wait_queue_head_t tx_flush_wait;
68
	u64 bus_reset_closure;
69

70
	struct fw_iso_context *iso_context;
71
	u64 iso_closure;
72 73
	struct fw_iso_buffer buffer;
	unsigned long vm_start;
74

75 76 77
	struct list_head phy_receiver_link;
	u64 phy_receiver_closure;

78
	struct list_head link;
79
	struct kref kref;
80 81
};

82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
static inline void client_get(struct client *client)
{
	kref_get(&client->kref);
}

static void client_release(struct kref *kref)
{
	struct client *client = container_of(kref, struct client, kref);

	fw_device_put(client->device);
	kfree(client);
}

static void client_put(struct client *client)
{
	kref_put(&client->kref, client_release);
}

100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
struct client_resource;
typedef void (*client_resource_release_fn_t)(struct client *,
					     struct client_resource *);
struct client_resource {
	client_resource_release_fn_t release;
	int handle;
};

struct address_handler_resource {
	struct client_resource resource;
	struct fw_address_handler handler;
	__u64 closure;
	struct client *client;
};

struct outbound_transaction_resource {
	struct client_resource resource;
	struct fw_transaction transaction;
};

struct inbound_transaction_resource {
	struct client_resource resource;
122
	struct fw_card *card;
123 124 125 126 127 128 129 130 131 132 133
	struct fw_request *request;
	void *data;
	size_t length;
};

struct descriptor_resource {
	struct client_resource resource;
	struct fw_descriptor descriptor;
	u32 data[0];
};

134 135 136 137 138
struct iso_resource {
	struct client_resource resource;
	struct client *client;
	/* Schedule work and access todo only with client->lock held. */
	struct delayed_work work;
139 140
	enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
	      ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
141 142 143 144 145 146 147 148
	int generation;
	u64 channels;
	s32 bandwidth;
	struct iso_resource_event *e_alloc, *e_dealloc;
};

static void release_iso_resource(struct client *, struct client_resource *);

149 150 151
static void schedule_iso_resource(struct iso_resource *r, unsigned long delay)
{
	client_get(r->client);
152
	if (!queue_delayed_work(fw_workqueue, &r->work, delay))
153 154 155 156 157 158 159 160 161 162
		client_put(r->client);
}

static void schedule_if_iso_resource(struct client_resource *resource)
{
	if (resource->release == release_iso_resource)
		schedule_iso_resource(container_of(resource,
					struct iso_resource, resource), 0);
}

163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
/*
 * dequeue_event() just kfree()'s the event, so the event has to be
 * the first field in a struct XYZ_event.
 */
struct event {
	struct { void *data; size_t size; } v[2];
	struct list_head link;
};

struct bus_reset_event {
	struct event event;
	struct fw_cdev_event_bus_reset reset;
};

struct outbound_transaction_event {
	struct event event;
	struct client *client;
	struct outbound_transaction_resource r;
	struct fw_cdev_event_response response;
};

struct inbound_transaction_event {
	struct event event;
186 187 188 189
	union {
		struct fw_cdev_event_request request;
		struct fw_cdev_event_request2 request2;
	} req;
190 191 192 193 194 195 196
};

struct iso_interrupt_event {
	struct event event;
	struct fw_cdev_event_iso_interrupt interrupt;
};

197 198 199 200 201
struct iso_interrupt_mc_event {
	struct event event;
	struct fw_cdev_event_iso_interrupt_mc interrupt;
};

202 203
struct iso_resource_event {
	struct event event;
204
	struct fw_cdev_event_iso_resource iso_resource;
205 206
};

207 208 209 210 211 212 213
struct outbound_phy_packet_event {
	struct event event;
	struct client *client;
	struct fw_packet p;
	struct fw_cdev_event_phy_packet phy_packet;
};

214 215 216 217 218
struct inbound_phy_packet_event {
	struct event event;
	struct fw_cdev_event_phy_packet phy_packet;
};

219
static inline void __user *u64_to_uptr(__u64 value)
220 221 222 223
{
	return (void __user *)(unsigned long)value;
}

224
static inline __u64 uptr_to_u64(void __user *ptr)
225 226 227 228 229 230 231 232 233
{
	return (__u64)(unsigned long)ptr;
}

static int fw_device_op_open(struct inode *inode, struct file *file)
{
	struct fw_device *device;
	struct client *client;

234
	device = fw_device_get_by_devt(inode->i_rdev);
235 236
	if (device == NULL)
		return -ENODEV;
237

238 239 240 241 242
	if (fw_device_is_shutdown(device)) {
		fw_device_put(device);
		return -ENODEV;
	}

243
	client = kzalloc(sizeof(*client), GFP_KERNEL);
244 245
	if (client == NULL) {
		fw_device_put(device);
246
		return -ENOMEM;
247
	}
248

249
	client->device = device;
250
	spin_lock_init(&client->lock);
251 252
	idr_init(&client->resource_idr);
	INIT_LIST_HEAD(&client->event_list);
253
	init_waitqueue_head(&client->wait);
254
	init_waitqueue_head(&client->tx_flush_wait);
255
	INIT_LIST_HEAD(&client->phy_receiver_link);
256
	kref_init(&client->kref);
257 258 259

	file->private_data = client;

260
	mutex_lock(&device->client_list_mutex);
261
	list_add_tail(&client->link, &device->client_list);
262
	mutex_unlock(&device->client_list_mutex);
263

264
	return nonseekable_open(inode, file);
265 266 267 268 269 270 271 272 273 274 275 276 277
}

static void queue_event(struct client *client, struct event *event,
			void *data0, size_t size0, void *data1, size_t size1)
{
	unsigned long flags;

	event->v[0].data = data0;
	event->v[0].size = size0;
	event->v[1].data = data1;
	event->v[1].size = size1;

	spin_lock_irqsave(&client->lock, flags);
278 279 280 281
	if (client->in_shutdown)
		kfree(event);
	else
		list_add_tail(&event->link, &client->event_list);
282
	spin_unlock_irqrestore(&client->lock, flags);
283 284

	wake_up_interruptible(&client->wait);
285 286
}

287 288
static int dequeue_event(struct client *client,
			 char __user *buffer, size_t count)
289 290 291
{
	struct event *event;
	size_t size, total;
292
	int i, ret;
293

294 295 296 297 298
	ret = wait_event_interruptible(client->wait,
			!list_empty(&client->event_list) ||
			fw_device_is_shutdown(client->device));
	if (ret < 0)
		return ret;
299

300 301 302
	if (list_empty(&client->event_list) &&
		       fw_device_is_shutdown(client->device))
		return -ENODEV;
303

304
	spin_lock_irq(&client->lock);
305
	event = list_first_entry(&client->event_list, struct event, link);
306
	list_del(&event->link);
307
	spin_unlock_irq(&client->lock);
308 309 310 311

	total = 0;
	for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
		size = min(event->v[i].size, count - total);
312
		if (copy_to_user(buffer + total, event->v[i].data, size)) {
313
			ret = -EFAULT;
314
			goto out;
315
		}
316 317
		total += size;
	}
318
	ret = total;
319 320 321 322

 out:
	kfree(event);

323
	return ret;
324 325
}

326 327
static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
				 size_t count, loff_t *offset)
328 329 330 331 332 333
{
	struct client *client = file->private_data;

	return dequeue_event(client, buffer, count);
}

334 335
static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
				 struct client *client)
336
{
337
	struct fw_card *card = client->device->card;
J
Jay Fenlason 已提交
338

339
	spin_lock_irq(&card->lock);
340

341
	event->closure	     = client->bus_reset_closure;
342
	event->type          = FW_CDEV_EVENT_BUS_RESET;
343
	event->generation    = client->device->generation;
344
	event->node_id       = client->device->node_id;
345
	event->local_node_id = card->local_node->node_id;
346
	event->bm_node_id    = card->bm_node_id;
347 348
	event->irm_node_id   = card->irm_node->node_id;
	event->root_node_id  = card->root_node->node_id;
J
Jay Fenlason 已提交
349

350
	spin_unlock_irq(&card->lock);
351 352
}

353 354
static void for_each_client(struct fw_device *device,
			    void (*callback)(struct client *client))
355 356 357
{
	struct client *c;

358
	mutex_lock(&device->client_list_mutex);
359 360
	list_for_each_entry(c, &device->client_list, link)
		callback(c);
361
	mutex_unlock(&device->client_list_mutex);
362 363
}

364 365
static int schedule_reallocations(int id, void *p, void *data)
{
366
	schedule_if_iso_resource(p);
367 368 369 370

	return 0;
}

371
static void queue_bus_reset_event(struct client *client)
372
{
373
	struct bus_reset_event *e;
374

375 376
	e = kzalloc(sizeof(*e), GFP_KERNEL);
	if (e == NULL) {
377
		fw_notify("Out of memory when allocating event\n");
378 379 380
		return;
	}

381
	fill_bus_reset_event(&e->reset, client);
382

383 384
	queue_event(client, &e->event,
		    &e->reset, sizeof(e->reset), NULL, 0);
385 386 387 388

	spin_lock_irq(&client->lock);
	idr_for_each(&client->resource_idr, schedule_reallocations, client);
	spin_unlock_irq(&client->lock);
389 390 391 392
}

void fw_device_cdev_update(struct fw_device *device)
{
393 394
	for_each_client(device, queue_bus_reset_event);
}
395

396 397 398 399
static void wake_up_client(struct client *client)
{
	wake_up_interruptible(&client->wait);
}
400

401 402 403
void fw_device_cdev_remove(struct fw_device *device)
{
	for_each_client(device, wake_up_client);
404 405
}

406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
union ioctl_arg {
	struct fw_cdev_get_info			get_info;
	struct fw_cdev_send_request		send_request;
	struct fw_cdev_allocate			allocate;
	struct fw_cdev_deallocate		deallocate;
	struct fw_cdev_send_response		send_response;
	struct fw_cdev_initiate_bus_reset	initiate_bus_reset;
	struct fw_cdev_add_descriptor		add_descriptor;
	struct fw_cdev_remove_descriptor	remove_descriptor;
	struct fw_cdev_create_iso_context	create_iso_context;
	struct fw_cdev_queue_iso		queue_iso;
	struct fw_cdev_start_iso		start_iso;
	struct fw_cdev_stop_iso			stop_iso;
	struct fw_cdev_get_cycle_timer		get_cycle_timer;
	struct fw_cdev_allocate_iso_resource	allocate_iso_resource;
	struct fw_cdev_send_stream_packet	send_stream_packet;
	struct fw_cdev_get_cycle_timer2		get_cycle_timer2;
423
	struct fw_cdev_send_phy_packet		send_phy_packet;
424
	struct fw_cdev_receive_phy_packets	receive_phy_packets;
425
	struct fw_cdev_set_iso_channels		set_iso_channels;
426 427 428
};

static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
429
{
430
	struct fw_cdev_get_info *a = &arg->get_info;
431
	struct fw_cdev_event_bus_reset bus_reset;
432
	unsigned long ret = 0;
433

434
	client->version = a->version;
435
	a->version = FW_CDEV_KERNEL_VERSION;
436
	a->card = client->device->card->index;
437

438 439
	down_read(&fw_device_rwsem);

440 441
	if (a->rom != 0) {
		size_t want = a->rom_length;
442
		size_t have = client->device->config_rom_length * 4;
443

444 445
		ret = copy_to_user(u64_to_uptr(a->rom),
				   client->device->config_rom, min(want, have));
446
	}
447
	a->rom_length = client->device->config_rom_length * 4;
448

449 450 451 452 453
	up_read(&fw_device_rwsem);

	if (ret != 0)
		return -EFAULT;

454 455
	client->bus_reset_closure = a->bus_reset_closure;
	if (a->bus_reset != 0) {
456
		fill_bus_reset_event(&bus_reset, client);
457 458
		if (copy_to_user(u64_to_uptr(a->bus_reset),
				 &bus_reset, sizeof(bus_reset)))
459 460
			return -EFAULT;
	}
461 462 463 464

	return 0;
}

465 466
static int add_client_resource(struct client *client,
			       struct client_resource *resource, gfp_t gfp_mask)
467 468
{
	unsigned long flags;
469 470 471 472 473
	int ret;

 retry:
	if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
		return -ENOMEM;
474 475

	spin_lock_irqsave(&client->lock, flags);
476 477 478 479 480
	if (client->in_shutdown)
		ret = -ECANCELED;
	else
		ret = idr_get_new(&client->resource_idr, resource,
				  &resource->handle);
481
	if (ret >= 0) {
482
		client_get(client);
483
		schedule_if_iso_resource(resource);
484
	}
485
	spin_unlock_irqrestore(&client->lock, flags);
486 487 488 489 490

	if (ret == -EAGAIN)
		goto retry;

	return ret < 0 ? ret : 0;
491 492
}

493 494
static int release_client_resource(struct client *client, u32 handle,
				   client_resource_release_fn_t release,
495
				   struct client_resource **return_resource)
496
{
497
	struct client_resource *resource;
498

499
	spin_lock_irq(&client->lock);
500
	if (client->in_shutdown)
501
		resource = NULL;
502
	else
503 504
		resource = idr_find(&client->resource_idr, handle);
	if (resource && resource->release == release)
505
		idr_remove(&client->resource_idr, handle);
506
	spin_unlock_irq(&client->lock);
507

508
	if (!(resource && resource->release == release))
509 510
		return -EINVAL;

511 512
	if (return_resource)
		*return_resource = resource;
513
	else
514
		resource->release(client, resource);
515

516 517
	client_put(client);

518 519 520
	return 0;
}

521 522
static void release_transaction(struct client *client,
				struct client_resource *resource)
523 524 525
{
}

526 527
static void complete_transaction(struct fw_card *card, int rcode,
				 void *payload, size_t length, void *data)
528
{
529 530 531
	struct outbound_transaction_event *e = data;
	struct fw_cdev_event_response *rsp = &e->response;
	struct client *client = e->client;
532
	unsigned long flags;
533

534 535
	if (length < rsp->length)
		rsp->length = length;
536
	if (rcode == RCODE_COMPLETE)
537
		memcpy(rsp->data, payload, rsp->length);
538

539
	spin_lock_irqsave(&client->lock, flags);
540 541 542
	idr_remove(&client->resource_idr, e->r.resource.handle);
	if (client->in_shutdown)
		wake_up(&client->tx_flush_wait);
543 544
	spin_unlock_irqrestore(&client->lock, flags);

545 546
	rsp->type = FW_CDEV_EVENT_RESPONSE;
	rsp->rcode = rcode;
547 548

	/*
549
	 * In the case that sizeof(*rsp) doesn't align with the position of the
550 551 552 553 554
	 * data, and the read is short, preserve an extra copy of the data
	 * to stay compatible with a pre-2.6.27 bug.  Since the bug is harmless
	 * for short reads and some apps depended on it, this is both safe
	 * and prudent for compatibility.
	 */
555 556 557
	if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
		queue_event(client, &e->event, rsp, sizeof(*rsp),
			    rsp->data, rsp->length);
558
	else
559
		queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
560
			    NULL, 0);
561

562 563
	/* Drop the idr's reference */
	client_put(client);
564 565
}

566 567 568
static int init_request(struct client *client,
			struct fw_cdev_send_request *request,
			int destination_id, int speed)
569
{
570
	struct outbound_transaction_event *e;
571
	int ret;
572

573 574
	if (request->tcode != TCODE_STREAM_DATA &&
	    (request->length > 4096 || request->length > 512 << speed))
575
		return -EIO;
576

577 578 579 580
	if (request->tcode == TCODE_WRITE_QUADLET_REQUEST &&
	    request->length < 4)
		return -EINVAL;

581 582
	e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
	if (e == NULL)
583 584
		return -ENOMEM;

585 586 587
	e->client = client;
	e->response.length = request->length;
	e->response.closure = request->closure;
588

589
	if (request->data &&
590
	    copy_from_user(e->response.data,
591
			   u64_to_uptr(request->data), request->length)) {
592
		ret = -EFAULT;
593
		goto failed;
594 595
	}

596 597
	e->r.resource.release = release_transaction;
	ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
598 599
	if (ret < 0)
		goto failed;
600

601
	fw_send_request(client->device->card, &e->r.transaction,
602 603 604 605
			request->tcode, destination_id, request->generation,
			speed, request->offset, e->response.data,
			request->length, complete_transaction, e);
	return 0;
606

607
 failed:
608
	kfree(e);
609 610

	return ret;
611 612
}

613
static int ioctl_send_request(struct client *client, union ioctl_arg *arg)
614
{
615
	switch (arg->send_request.tcode) {
616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631
	case TCODE_WRITE_QUADLET_REQUEST:
	case TCODE_WRITE_BLOCK_REQUEST:
	case TCODE_READ_QUADLET_REQUEST:
	case TCODE_READ_BLOCK_REQUEST:
	case TCODE_LOCK_MASK_SWAP:
	case TCODE_LOCK_COMPARE_SWAP:
	case TCODE_LOCK_FETCH_ADD:
	case TCODE_LOCK_LITTLE_ADD:
	case TCODE_LOCK_BOUNDED_ADD:
	case TCODE_LOCK_WRAP_ADD:
	case TCODE_LOCK_VENDOR_DEPENDENT:
		break;
	default:
		return -EINVAL;
	}

632
	return init_request(client, &arg->send_request, client->device->node_id,
633 634 635
			    client->device->max_speed);
}

636 637 638 639 640
static inline bool is_fcp_request(struct fw_request *request)
{
	return request == NULL;
}

641 642
static void release_request(struct client *client,
			    struct client_resource *resource)
643
{
644 645
	struct inbound_transaction_resource *r = container_of(resource,
			struct inbound_transaction_resource, resource);
646

647 648 649
	if (is_fcp_request(r->request))
		kfree(r->data);
	else
650
		fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR);
651 652

	fw_card_put(r->card);
653
	kfree(r);
654 655
}

656
static void handle_request(struct fw_card *card, struct fw_request *request,
657
			   int tcode, int destination, int source,
658
			   int generation, unsigned long long offset,
659
			   void *payload, size_t length, void *callback_data)
660
{
661 662 663
	struct address_handler_resource *handler = callback_data;
	struct inbound_transaction_resource *r;
	struct inbound_transaction_event *e;
664
	size_t event_size0;
665
	void *fcp_frame = NULL;
666
	int ret;
667

668 669 670
	/* card may be different from handler->client->device->card */
	fw_card_get(card);

671
	r = kmalloc(sizeof(*r), GFP_ATOMIC);
672
	e = kmalloc(sizeof(*e), GFP_ATOMIC);
673 674
	if (r == NULL || e == NULL) {
		fw_notify("Out of memory when allocating event\n");
675
		goto failed;
676
	}
677
	r->card    = card;
678 679 680
	r->request = request;
	r->data    = payload;
	r->length  = length;
681

682 683 684 685 686 687 688 689 690 691 692 693
	if (is_fcp_request(request)) {
		/*
		 * FIXME: Let core-transaction.c manage a
		 * single reference-counted copy?
		 */
		fcp_frame = kmemdup(payload, length, GFP_ATOMIC);
		if (fcp_frame == NULL)
			goto failed;

		r->data = fcp_frame;
	}

694 695
	r->resource.release = release_request;
	ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
696 697
	if (ret < 0)
		goto failed;
698

699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726
	if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) {
		struct fw_cdev_event_request *req = &e->req.request;

		if (tcode & 0x10)
			tcode = TCODE_LOCK_REQUEST;

		req->type	= FW_CDEV_EVENT_REQUEST;
		req->tcode	= tcode;
		req->offset	= offset;
		req->length	= length;
		req->handle	= r->resource.handle;
		req->closure	= handler->closure;
		event_size0	= sizeof(*req);
	} else {
		struct fw_cdev_event_request2 *req = &e->req.request2;

		req->type	= FW_CDEV_EVENT_REQUEST2;
		req->tcode	= tcode;
		req->offset	= offset;
		req->source_node_id = source;
		req->destination_node_id = destination;
		req->card	= card->index;
		req->generation	= generation;
		req->length	= length;
		req->handle	= r->resource.handle;
		req->closure	= handler->closure;
		event_size0	= sizeof(*req);
	}
727

728
	queue_event(handler->client, &e->event,
729
		    &e->req, event_size0, r->data, length);
730 731 732
	return;

 failed:
733
	kfree(r);
734
	kfree(e);
735 736 737
	kfree(fcp_frame);

	if (!is_fcp_request(request))
738
		fw_send_response(card, request, RCODE_CONFLICT_ERROR);
739 740

	fw_card_put(card);
741 742
}

743 744
static void release_address_handler(struct client *client,
				    struct client_resource *resource)
745
{
746 747
	struct address_handler_resource *r =
	    container_of(resource, struct address_handler_resource, resource);
748

749 750
	fw_core_remove_address_handler(&r->handler);
	kfree(r);
751 752
}

753
static int ioctl_allocate(struct client *client, union ioctl_arg *arg)
754
{
755
	struct fw_cdev_allocate *a = &arg->allocate;
756
	struct address_handler_resource *r;
757
	struct fw_address_region region;
758
	int ret;
759

760 761
	r = kmalloc(sizeof(*r), GFP_KERNEL);
	if (r == NULL)
762 763
		return -ENOMEM;

764
	region.start = a->offset;
765 766 767 768 769
	if (client->version < FW_CDEV_VERSION_ALLOCATE_REGION_END)
		region.end = a->offset + a->length;
	else
		region.end = a->region_end;

770
	r->handler.length           = a->length;
771
	r->handler.address_callback = handle_request;
772 773 774
	r->handler.callback_data    = r;
	r->closure   = a->closure;
	r->client    = client;
775

776
	ret = fw_core_add_address_handler(&r->handler, &region);
777
	if (ret < 0) {
778
		kfree(r);
779
		return ret;
780
	}
781
	a->offset = r->handler.offset;
782

783 784
	r->resource.release = release_address_handler;
	ret = add_client_resource(client, &r->resource, GFP_KERNEL);
785
	if (ret < 0) {
786
		release_address_handler(client, &r->resource);
787 788
		return ret;
	}
789
	a->handle = r->resource.handle;
790 791 792 793

	return 0;
}

794
static int ioctl_deallocate(struct client *client, union ioctl_arg *arg)
795
{
796
	return release_client_resource(client, arg->deallocate.handle,
797
				       release_address_handler, NULL);
798 799
}

800
static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
801
{
802
	struct fw_cdev_send_response *a = &arg->send_response;
803
	struct client_resource *resource;
804
	struct inbound_transaction_resource *r;
805
	int ret = 0;
806

807
	if (release_client_resource(client, a->handle,
808
				    release_request, &resource) < 0)
809
		return -EINVAL;
810

811 812
	r = container_of(resource, struct inbound_transaction_resource,
			 resource);
813 814 815
	if (is_fcp_request(r->request))
		goto out;

816 817 818 819 820 821
	if (a->length != fw_get_response_length(r->request)) {
		ret = -EINVAL;
		kfree(r->request);
		goto out;
	}
	if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) {
822 823 824
		ret = -EFAULT;
		kfree(r->request);
		goto out;
825
	}
826
	fw_send_response(r->card, r->request, a->rcode);
827
 out:
828
	fw_card_put(r->card);
829 830
	kfree(r);

831
	return ret;
832 833
}

834
static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg)
835
{
836
	fw_schedule_bus_reset(client->device->card, true,
837
			arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET);
838
	return 0;
839 840
}

841 842 843
static void release_descriptor(struct client *client,
			       struct client_resource *resource)
{
844 845
	struct descriptor_resource *r =
		container_of(resource, struct descriptor_resource, resource);
846

847 848
	fw_core_remove_descriptor(&r->descriptor);
	kfree(r);
849 850
}

851
static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg)
852
{
853
	struct fw_cdev_add_descriptor *a = &arg->add_descriptor;
854
	struct descriptor_resource *r;
855
	int ret;
856

857
	/* Access policy: Allow this ioctl only on local nodes' device files. */
858
	if (!client->device->is_local)
859 860
		return -ENOSYS;

861
	if (a->length > 256)
862 863
		return -EINVAL;

864
	r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL);
865
	if (r == NULL)
866 867
		return -ENOMEM;

868
	if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) {
869 870
		ret = -EFAULT;
		goto failed;
871 872
	}

873 874 875
	r->descriptor.length    = a->length;
	r->descriptor.immediate = a->immediate;
	r->descriptor.key       = a->key;
876
	r->descriptor.data      = r->data;
877

878
	ret = fw_core_add_descriptor(&r->descriptor);
879 880
	if (ret < 0)
		goto failed;
881

882 883
	r->resource.release = release_descriptor;
	ret = add_client_resource(client, &r->resource, GFP_KERNEL);
884
	if (ret < 0) {
885
		fw_core_remove_descriptor(&r->descriptor);
886 887
		goto failed;
	}
888
	a->handle = r->resource.handle;
889 890

	return 0;
891
 failed:
892
	kfree(r);
893 894

	return ret;
895 896
}

897
static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg)
898
{
899
	return release_client_resource(client, arg->remove_descriptor.handle,
900
				       release_descriptor, NULL);
901 902
}

903 904
static void iso_callback(struct fw_iso_context *context, u32 cycle,
			 size_t header_length, void *header, void *data)
905 906
{
	struct client *client = data;
907
	struct iso_interrupt_event *e;
908

909
	e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC);
910 911
	if (e == NULL) {
		fw_notify("Out of memory when allocating event\n");
912
		return;
913
	}
914 915 916 917 918 919 920
	e->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT;
	e->interrupt.closure   = client->iso_closure;
	e->interrupt.cycle     = cycle;
	e->interrupt.header_length = header_length;
	memcpy(e->interrupt.header, header, header_length);
	queue_event(client, &e->event, &e->interrupt,
		    sizeof(e->interrupt) + header_length, NULL, 0);
921 922
}

923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941
static void iso_mc_callback(struct fw_iso_context *context,
			    dma_addr_t completed, void *data)
{
	struct client *client = data;
	struct iso_interrupt_mc_event *e;

	e = kmalloc(sizeof(*e), GFP_ATOMIC);
	if (e == NULL) {
		fw_notify("Out of memory when allocating event\n");
		return;
	}
	e->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL;
	e->interrupt.closure   = client->iso_closure;
	e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer,
						      completed);
	queue_event(client, &e->event, &e->interrupt,
		    sizeof(e->interrupt), NULL, 0);
}

942
static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
943
{
944
	struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
945
	struct fw_iso_context *context;
946
	fw_iso_callback_t cb;
947

948
	BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT ||
949 950 951
		     FW_CDEV_ISO_CONTEXT_RECEIVE  != FW_ISO_CONTEXT_RECEIVE  ||
		     FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL !=
					FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL);
952

953
	switch (a->type) {
954 955
	case FW_ISO_CONTEXT_TRANSMIT:
		if (a->speed > SCODE_3200 || a->channel > 63)
956
			return -EINVAL;
957 958

		cb = iso_callback;
959 960
		break;

961 962 963
	case FW_ISO_CONTEXT_RECEIVE:
		if (a->header_size < 4 || (a->header_size & 3) ||
		    a->channel > 63)
964
			return -EINVAL;
965 966 967 968 969 970

		cb = iso_callback;
		break;

	case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
		cb = (fw_iso_callback_t)iso_mc_callback;
971 972 973
		break;

	default:
974
		return -EINVAL;
975 976
	}

977
	context = fw_iso_context_create(client->device->card, a->type,
978
			a->channel, a->speed, a->header_size, cb, client);
979 980 981
	if (IS_ERR(context))
		return PTR_ERR(context);

982 983 984 985 986 987 988
	/* We only support one context at this time. */
	spin_lock_irq(&client->lock);
	if (client->iso_context != NULL) {
		spin_unlock_irq(&client->lock);
		fw_iso_context_destroy(context);
		return -EBUSY;
	}
989
	client->iso_closure = a->closure;
990
	client->iso_context = context;
991
	spin_unlock_irq(&client->lock);
992

993
	a->handle = 0;
994

995 996 997
	return 0;
}

998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008
static int ioctl_set_iso_channels(struct client *client, union ioctl_arg *arg)
{
	struct fw_cdev_set_iso_channels *a = &arg->set_iso_channels;
	struct fw_iso_context *ctx = client->iso_context;

	if (ctx == NULL || a->handle != 0)
		return -EINVAL;

	return fw_iso_context_set_channels(ctx, &a->channels);
}

1009 1010 1011 1012
/* Macros for decoding the iso packet control header. */
#define GET_PAYLOAD_LENGTH(v)	((v) & 0xffff)
#define GET_INTERRUPT(v)	(((v) >> 16) & 0x01)
#define GET_SKIP(v)		(((v) >> 17) & 0x01)
1013 1014
#define GET_TAG(v)		(((v) >> 18) & 0x03)
#define GET_SY(v)		(((v) >> 20) & 0x0f)
1015 1016
#define GET_HEADER_LENGTH(v)	(((v) >> 24) & 0xff)

1017
static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
1018
{
1019
	struct fw_cdev_queue_iso *a = &arg->queue_iso;
1020
	struct fw_cdev_iso_packet __user *p, *end, *next;
1021
	struct fw_iso_context *ctx = client->iso_context;
1022
	unsigned long payload, buffer_end, transmit_header_bytes = 0;
1023
	u32 control;
1024 1025 1026 1027 1028 1029
	int count;
	struct {
		struct fw_iso_packet packet;
		u8 header[256];
	} u;

1030
	if (ctx == NULL || a->handle != 0)
1031 1032
		return -EINVAL;

1033 1034
	/*
	 * If the user passes a non-NULL data pointer, has mmap()'ed
1035 1036
	 * the iso buffer, and the pointer points inside the buffer,
	 * we setup the payload pointers accordingly.  Otherwise we
1037
	 * set them both to 0, which will still let packets with
1038 1039
	 * payload_length == 0 through.  In other words, if no packets
	 * use the indirect payload, the iso buffer need not be mapped
1040
	 * and the a->data pointer is ignored.
1041
	 */
1042
	payload = (unsigned long)a->data - client->vm_start;
1043
	buffer_end = client->buffer.page_count << PAGE_SHIFT;
1044
	if (a->data == 0 || client->buffer.pages == NULL ||
1045
	    payload >= buffer_end) {
1046
		payload = 0;
1047
		buffer_end = 0;
1048 1049
	}

1050 1051
	if (ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL && payload & 3)
		return -EINVAL;
A
Al Viro 已提交
1052

1053
	p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets);
1054
	if (!access_ok(VERIFY_READ, p, a->size))
1055 1056
		return -EFAULT;

1057
	end = (void __user *)p + a->size;
1058 1059
	count = 0;
	while (p < end) {
1060
		if (get_user(control, &p->control))
1061
			return -EFAULT;
1062 1063 1064 1065 1066 1067
		u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
		u.packet.interrupt = GET_INTERRUPT(control);
		u.packet.skip = GET_SKIP(control);
		u.packet.tag = GET_TAG(control);
		u.packet.sy = GET_SY(control);
		u.packet.header_length = GET_HEADER_LENGTH(control);
1068

1069 1070 1071
		switch (ctx->type) {
		case FW_ISO_CONTEXT_TRANSMIT:
			if (u.packet.header_length & 3)
1072
				return -EINVAL;
1073
			transmit_header_bytes = u.packet.header_length;
1074 1075 1076
			break;

		case FW_ISO_CONTEXT_RECEIVE:
1077 1078
			if (u.packet.header_length == 0 ||
			    u.packet.header_length % ctx->header_size != 0)
1079
				return -EINVAL;
1080 1081 1082 1083 1084
			break;

		case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
			if (u.packet.payload_length == 0 ||
			    u.packet.payload_length & 3)
1085
				return -EINVAL;
1086
			break;
1087 1088
		}

1089
		next = (struct fw_cdev_iso_packet __user *)
1090
			&p->header[transmit_header_bytes / 4];
1091 1092 1093
		if (next > end)
			return -EINVAL;
		if (__copy_from_user
1094
		    (u.packet.header, p->header, transmit_header_bytes))
1095
			return -EFAULT;
1096
		if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
1097 1098
		    u.packet.header_length + u.packet.payload_length > 0)
			return -EINVAL;
1099
		if (payload + u.packet.payload_length > buffer_end)
1100 1101
			return -EINVAL;

1102 1103
		if (fw_iso_context_queue(ctx, &u.packet,
					 &client->buffer, payload))
1104 1105 1106 1107 1108 1109
			break;

		p = next;
		payload += u.packet.payload_length;
		count++;
	}
1110
	fw_iso_context_queue_flush(ctx);
1111

1112 1113 1114
	a->size    -= uptr_to_u64(p) - a->packets;
	a->packets  = uptr_to_u64(p);
	a->data     = client->vm_start + payload;
1115 1116 1117 1118

	return count;
}

1119
static int ioctl_start_iso(struct client *client, union ioctl_arg *arg)
1120
{
1121
	struct fw_cdev_start_iso *a = &arg->start_iso;
1122

1123 1124 1125 1126 1127 1128 1129
	BUILD_BUG_ON(
	    FW_CDEV_ISO_CONTEXT_MATCH_TAG0 != FW_ISO_CONTEXT_MATCH_TAG0 ||
	    FW_CDEV_ISO_CONTEXT_MATCH_TAG1 != FW_ISO_CONTEXT_MATCH_TAG1 ||
	    FW_CDEV_ISO_CONTEXT_MATCH_TAG2 != FW_ISO_CONTEXT_MATCH_TAG2 ||
	    FW_CDEV_ISO_CONTEXT_MATCH_TAG3 != FW_ISO_CONTEXT_MATCH_TAG3 ||
	    FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS != FW_ISO_CONTEXT_MATCH_ALL_TAGS);

1130
	if (client->iso_context == NULL || a->handle != 0)
1131
		return -EINVAL;
1132

1133 1134 1135
	if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE &&
	    (a->tags == 0 || a->tags > 15 || a->sync > 15))
		return -EINVAL;
1136

1137 1138
	return fw_iso_context_start(client->iso_context,
				    a->cycle, a->sync, a->tags);
1139 1140
}

1141
static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg)
1142
{
1143
	struct fw_cdev_stop_iso *a = &arg->stop_iso;
1144

1145
	if (client->iso_context == NULL || a->handle != 0)
1146 1147
		return -EINVAL;

1148 1149 1150
	return fw_iso_context_stop(client->iso_context);
}

1151
static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
1152
{
1153
	struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
1154
	struct fw_card *card = client->device->card;
1155
	struct timespec ts = {0, 0};
1156
	u32 cycle_time;
1157
	int ret = 0;
1158

1159
	local_irq_disable();
1160

1161
	cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME);
1162

1163
	switch (a->clk_id) {
1164 1165 1166 1167 1168 1169
	case CLOCK_REALTIME:      getnstimeofday(&ts);                   break;
	case CLOCK_MONOTONIC:     do_posix_clock_monotonic_gettime(&ts); break;
	case CLOCK_MONOTONIC_RAW: getrawmonotonic(&ts);                  break;
	default:
		ret = -EINVAL;
	}
1170

1171
	local_irq_enable();
1172

1173 1174 1175
	a->tv_sec      = ts.tv_sec;
	a->tv_nsec     = ts.tv_nsec;
	a->cycle_timer = cycle_time;
1176 1177 1178 1179

	return ret;
}

1180
static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg)
1181
{
1182
	struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer;
1183 1184 1185
	struct fw_cdev_get_cycle_timer2 ct2;

	ct2.clk_id = CLOCK_REALTIME;
1186
	ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2);
1187

1188 1189
	a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC;
	a->cycle_timer = ct2.cycle_timer;
1190

1191 1192 1193
	return 0;
}

1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207
static void iso_resource_work(struct work_struct *work)
{
	struct iso_resource_event *e;
	struct iso_resource *r =
			container_of(work, struct iso_resource, work.work);
	struct client *client = r->client;
	int generation, channel, bandwidth, todo;
	bool skip, free, success;

	spin_lock_irq(&client->lock);
	generation = client->device->generation;
	todo = r->todo;
	/* Allow 1000ms grace period for other reallocations. */
	if (todo == ISO_RES_ALLOC &&
1208 1209
	    time_before64(get_jiffies_64(),
			  client->device->card->reset_jiffies + HZ)) {
1210
		schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
1211 1212 1213 1214 1215 1216
		skip = true;
	} else {
		/* We could be called twice within the same generation. */
		skip = todo == ISO_RES_REALLOC &&
		       r->generation == generation;
	}
1217 1218 1219
	free = todo == ISO_RES_DEALLOC ||
	       todo == ISO_RES_ALLOC_ONCE ||
	       todo == ISO_RES_DEALLOC_ONCE;
1220 1221 1222 1223 1224 1225 1226 1227 1228 1229
	r->generation = generation;
	spin_unlock_irq(&client->lock);

	if (skip)
		goto out;

	bandwidth = r->bandwidth;

	fw_iso_resource_manage(client->device->card, generation,
			r->channels, &channel, &bandwidth,
1230 1231
			todo == ISO_RES_ALLOC ||
			todo == ISO_RES_REALLOC ||
1232
			todo == ISO_RES_ALLOC_ONCE);
1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264
	/*
	 * Is this generation outdated already?  As long as this resource sticks
	 * in the idr, it will be scheduled again for a newer generation or at
	 * shutdown.
	 */
	if (channel == -EAGAIN &&
	    (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
		goto out;

	success = channel >= 0 || bandwidth > 0;

	spin_lock_irq(&client->lock);
	/*
	 * Transit from allocation to reallocation, except if the client
	 * requested deallocation in the meantime.
	 */
	if (r->todo == ISO_RES_ALLOC)
		r->todo = ISO_RES_REALLOC;
	/*
	 * Allocation or reallocation failure?  Pull this resource out of the
	 * idr and prepare for deletion, unless the client is shutting down.
	 */
	if (r->todo == ISO_RES_REALLOC && !success &&
	    !client->in_shutdown &&
	    idr_find(&client->resource_idr, r->resource.handle)) {
		idr_remove(&client->resource_idr, r->resource.handle);
		client_put(client);
		free = true;
	}
	spin_unlock_irq(&client->lock);

	if (todo == ISO_RES_ALLOC && channel >= 0)
1265
		r->channels = 1ULL << channel;
1266 1267 1268 1269

	if (todo == ISO_RES_REALLOC && success)
		goto out;

1270
	if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
1271 1272 1273 1274 1275 1276
		e = r->e_alloc;
		r->e_alloc = NULL;
	} else {
		e = r->e_dealloc;
		r->e_dealloc = NULL;
	}
1277 1278 1279
	e->iso_resource.handle    = r->resource.handle;
	e->iso_resource.channel   = channel;
	e->iso_resource.bandwidth = bandwidth;
1280 1281

	queue_event(client, &e->event,
1282
		    &e->iso_resource, sizeof(e->iso_resource), NULL, 0);
1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301

	if (free) {
		cancel_delayed_work(&r->work);
		kfree(r->e_alloc);
		kfree(r->e_dealloc);
		kfree(r);
	}
 out:
	client_put(client);
}

static void release_iso_resource(struct client *client,
				 struct client_resource *resource)
{
	struct iso_resource *r =
		container_of(resource, struct iso_resource, resource);

	spin_lock_irq(&client->lock);
	r->todo = ISO_RES_DEALLOC;
1302
	schedule_iso_resource(r, 0);
1303 1304 1305
	spin_unlock_irq(&client->lock);
}

1306 1307
static int init_iso_resource(struct client *client,
		struct fw_cdev_allocate_iso_resource *request, int todo)
1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327
{
	struct iso_resource_event *e1, *e2;
	struct iso_resource *r;
	int ret;

	if ((request->channels == 0 && request->bandwidth == 0) ||
	    request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
	    request->bandwidth < 0)
		return -EINVAL;

	r  = kmalloc(sizeof(*r), GFP_KERNEL);
	e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
	e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
	if (r == NULL || e1 == NULL || e2 == NULL) {
		ret = -ENOMEM;
		goto fail;
	}

	INIT_DELAYED_WORK(&r->work, iso_resource_work);
	r->client	= client;
1328
	r->todo		= todo;
1329 1330 1331 1332 1333 1334
	r->generation	= -1;
	r->channels	= request->channels;
	r->bandwidth	= request->bandwidth;
	r->e_alloc	= e1;
	r->e_dealloc	= e2;

1335 1336 1337 1338
	e1->iso_resource.closure = request->closure;
	e1->iso_resource.type    = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
	e2->iso_resource.closure = request->closure;
	e2->iso_resource.type    = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
1339

1340 1341 1342
	if (todo == ISO_RES_ALLOC) {
		r->resource.release = release_iso_resource;
		ret = add_client_resource(client, &r->resource, GFP_KERNEL);
1343 1344
		if (ret < 0)
			goto fail;
1345 1346 1347
	} else {
		r->resource.release = NULL;
		r->resource.handle = -1;
1348
		schedule_iso_resource(r, 0);
1349
	}
1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360
	request->handle = r->resource.handle;

	return 0;
 fail:
	kfree(r);
	kfree(e1);
	kfree(e2);

	return ret;
}

1361 1362
static int ioctl_allocate_iso_resource(struct client *client,
				       union ioctl_arg *arg)
1363
{
1364 1365
	return init_iso_resource(client,
			&arg->allocate_iso_resource, ISO_RES_ALLOC);
1366 1367
}

1368 1369
static int ioctl_deallocate_iso_resource(struct client *client,
					 union ioctl_arg *arg)
1370
{
1371 1372
	return release_client_resource(client,
			arg->deallocate.handle, release_iso_resource, NULL);
1373 1374
}

1375 1376
static int ioctl_allocate_iso_resource_once(struct client *client,
					    union ioctl_arg *arg)
1377
{
1378 1379
	return init_iso_resource(client,
			&arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE);
1380 1381
}

1382 1383
static int ioctl_deallocate_iso_resource_once(struct client *client,
					      union ioctl_arg *arg)
1384
{
1385 1386
	return init_iso_resource(client,
			&arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE);
1387 1388
}

1389 1390 1391 1392 1393
/*
 * Returns a speed code:  Maximum speed to or from this device,
 * limited by the device's link speed, the local node's link speed,
 * and all PHY port speeds between the two links.
 */
1394
static int ioctl_get_speed(struct client *client, union ioctl_arg *arg)
1395
{
1396
	return client->device->max_speed;
1397 1398
}

1399 1400
static int ioctl_send_broadcast_request(struct client *client,
					union ioctl_arg *arg)
1401
{
1402
	struct fw_cdev_send_request *a = &arg->send_request;
1403

1404
	switch (a->tcode) {
1405 1406 1407 1408 1409 1410 1411
	case TCODE_WRITE_QUADLET_REQUEST:
	case TCODE_WRITE_BLOCK_REQUEST:
		break;
	default:
		return -EINVAL;
	}

1412
	/* Security policy: Only allow accesses to Units Space. */
1413
	if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
1414 1415
		return -EACCES;

1416
	return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100);
1417 1418
}

1419
static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg)
1420
{
1421
	struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet;
1422 1423
	struct fw_cdev_send_request request;
	int dest;
1424

1425 1426
	if (a->speed > client->device->card->link_speed ||
	    a->length > 1024 << a->speed)
1427
		return -EIO;
1428

1429
	if (a->tag > 3 || a->channel > 63 || a->sy > 15)
1430 1431
		return -EINVAL;

1432
	dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy);
1433
	request.tcode		= TCODE_STREAM_DATA;
1434 1435 1436 1437
	request.length		= a->length;
	request.closure		= a->closure;
	request.data		= a->data;
	request.generation	= a->generation;
1438

1439
	return init_request(client, &request, dest, a->speed);
1440 1441
}

1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460
static void outbound_phy_packet_callback(struct fw_packet *packet,
					 struct fw_card *card, int status)
{
	struct outbound_phy_packet_event *e =
		container_of(packet, struct outbound_phy_packet_event, p);

	switch (status) {
	/* expected: */
	case ACK_COMPLETE:	e->phy_packet.rcode = RCODE_COMPLETE;	break;
	/* should never happen with PHY packets: */
	case ACK_PENDING:	e->phy_packet.rcode = RCODE_COMPLETE;	break;
	case ACK_BUSY_X:
	case ACK_BUSY_A:
	case ACK_BUSY_B:	e->phy_packet.rcode = RCODE_BUSY;	break;
	case ACK_DATA_ERROR:	e->phy_packet.rcode = RCODE_DATA_ERROR;	break;
	case ACK_TYPE_ERROR:	e->phy_packet.rcode = RCODE_TYPE_ERROR;	break;
	/* stale generation; cancelled; on certain controllers: no ack */
	default:		e->phy_packet.rcode = status;		break;
	}
S
Stefan Richter 已提交
1461
	e->phy_packet.data[0] = packet->timestamp;
1462

S
Stefan Richter 已提交
1463 1464
	queue_event(e->client, &e->event, &e->phy_packet,
		    sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0);
1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477
	client_put(e->client);
}

static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
{
	struct fw_cdev_send_phy_packet *a = &arg->send_phy_packet;
	struct fw_card *card = client->device->card;
	struct outbound_phy_packet_event *e;

	/* Access policy: Allow this ioctl only on local nodes' device files. */
	if (!client->device->is_local)
		return -ENOSYS;

S
Stefan Richter 已提交
1478
	e = kzalloc(sizeof(*e) + 4, GFP_KERNEL);
1479 1480 1481 1482 1483 1484 1485
	if (e == NULL)
		return -ENOMEM;

	client_get(client);
	e->client		= client;
	e->p.speed		= SCODE_100;
	e->p.generation		= a->generation;
1486 1487 1488 1489
	e->p.header[0]		= TCODE_LINK_INTERNAL << 4;
	e->p.header[1]		= a->data[0];
	e->p.header[2]		= a->data[1];
	e->p.header_length	= 12;
1490 1491 1492
	e->p.callback		= outbound_phy_packet_callback;
	e->phy_packet.closure	= a->closure;
	e->phy_packet.type	= FW_CDEV_EVENT_PHY_PACKET_SENT;
S
Stefan Richter 已提交
1493 1494
	if (is_ping_packet(a->data))
			e->phy_packet.length = 4;
1495 1496 1497 1498 1499 1500

	card->driver->send_request(card, &e->p);

	return 0;
}

1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546
static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg)
{
	struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets;
	struct fw_card *card = client->device->card;

	/* Access policy: Allow this ioctl only on local nodes' device files. */
	if (!client->device->is_local)
		return -ENOSYS;

	spin_lock_irq(&card->lock);

	list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list);
	client->phy_receiver_closure = a->closure;

	spin_unlock_irq(&card->lock);

	return 0;
}

void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
{
	struct client *client;
	struct inbound_phy_packet_event *e;
	unsigned long flags;

	spin_lock_irqsave(&card->lock, flags);

	list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) {
		e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
		if (e == NULL) {
			fw_notify("Out of memory when allocating event\n");
			break;
		}
		e->phy_packet.closure	= client->phy_receiver_closure;
		e->phy_packet.type	= FW_CDEV_EVENT_PHY_PACKET_RECEIVED;
		e->phy_packet.rcode	= RCODE_COMPLETE;
		e->phy_packet.length	= 8;
		e->phy_packet.data[0]	= p->header[1];
		e->phy_packet.data[1]	= p->header[2];
		queue_event(client, &e->event,
			    &e->phy_packet, sizeof(e->phy_packet) + 8, NULL, 0);
	}

	spin_unlock_irqrestore(&card->lock, flags);
}

1547
static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568
	[0x00] = ioctl_get_info,
	[0x01] = ioctl_send_request,
	[0x02] = ioctl_allocate,
	[0x03] = ioctl_deallocate,
	[0x04] = ioctl_send_response,
	[0x05] = ioctl_initiate_bus_reset,
	[0x06] = ioctl_add_descriptor,
	[0x07] = ioctl_remove_descriptor,
	[0x08] = ioctl_create_iso_context,
	[0x09] = ioctl_queue_iso,
	[0x0a] = ioctl_start_iso,
	[0x0b] = ioctl_stop_iso,
	[0x0c] = ioctl_get_cycle_timer,
	[0x0d] = ioctl_allocate_iso_resource,
	[0x0e] = ioctl_deallocate_iso_resource,
	[0x0f] = ioctl_allocate_iso_resource_once,
	[0x10] = ioctl_deallocate_iso_resource_once,
	[0x11] = ioctl_get_speed,
	[0x12] = ioctl_send_broadcast_request,
	[0x13] = ioctl_send_stream_packet,
	[0x14] = ioctl_get_cycle_timer2,
1569
	[0x15] = ioctl_send_phy_packet,
1570
	[0x16] = ioctl_receive_phy_packets,
1571
	[0x17] = ioctl_set_iso_channels,
1572 1573
};

1574 1575
static int dispatch_ioctl(struct client *client,
			  unsigned int cmd, void __user *arg)
1576
{
1577
	union ioctl_arg buffer;
1578
	int ret;
1579

1580 1581 1582
	if (fw_device_is_shutdown(client->device))
		return -ENODEV;

1583
	if (_IOC_TYPE(cmd) != '#' ||
1584 1585
	    _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) ||
	    _IOC_SIZE(cmd) > sizeof(buffer))
1586
		return -EINVAL;
1587

1588 1589 1590 1591 1592
	if (_IOC_DIR(cmd) == _IOC_READ)
		memset(&buffer, 0, _IOC_SIZE(cmd));

	if (_IOC_DIR(cmd) & _IOC_WRITE)
		if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
1593 1594
			return -EFAULT;

1595
	ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer);
1596 1597
	if (ret < 0)
		return ret;
1598

1599 1600
	if (_IOC_DIR(cmd) & _IOC_READ)
		if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
1601 1602
			return -EFAULT;

1603
	return ret;
1604 1605
}

1606 1607
static long fw_device_op_ioctl(struct file *file,
			       unsigned int cmd, unsigned long arg)
1608
{
1609
	return dispatch_ioctl(file->private_data, cmd, (void __user *)arg);
1610 1611 1612
}

#ifdef CONFIG_COMPAT
1613 1614
static long fw_device_op_compat_ioctl(struct file *file,
				      unsigned int cmd, unsigned long arg)
1615
{
1616
	return dispatch_ioctl(file->private_data, cmd, compat_ptr(arg));
1617 1618 1619 1620 1621 1622
}
#endif

static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
{
	struct client *client = file->private_data;
1623 1624
	enum dma_data_direction direction;
	unsigned long size;
1625
	int page_count, ret;
1626

1627 1628 1629
	if (fw_device_is_shutdown(client->device))
		return -ENODEV;

1630 1631 1632 1633 1634 1635
	/* FIXME: We could support multiple buffers, but we don't. */
	if (client->buffer.pages != NULL)
		return -EBUSY;

	if (!(vma->vm_flags & VM_SHARED))
		return -EINVAL;
1636

1637
	if (vma->vm_start & ~PAGE_MASK)
1638 1639 1640
		return -EINVAL;

	client->vm_start = vma->vm_start;
1641 1642 1643 1644 1645 1646 1647 1648 1649 1650
	size = vma->vm_end - vma->vm_start;
	page_count = size >> PAGE_SHIFT;
	if (size & ~PAGE_MASK)
		return -EINVAL;

	if (vma->vm_flags & VM_WRITE)
		direction = DMA_TO_DEVICE;
	else
		direction = DMA_FROM_DEVICE;

1651 1652 1653 1654
	ret = fw_iso_buffer_init(&client->buffer, client->device->card,
				 page_count, direction);
	if (ret < 0)
		return ret;
1655

1656 1657
	ret = fw_iso_buffer_map(&client->buffer, vma);
	if (ret < 0)
1658 1659
		fw_iso_buffer_destroy(&client->buffer, client->device->card);

1660
	return ret;
1661 1662
}

1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681
static int is_outbound_transaction_resource(int id, void *p, void *data)
{
	struct client_resource *resource = p;

	return resource->release == release_transaction;
}

static int has_outbound_transactions(struct client *client)
{
	int ret;

	spin_lock_irq(&client->lock);
	ret = idr_for_each(&client->resource_idr,
			   is_outbound_transaction_resource, NULL);
	spin_unlock_irq(&client->lock);

	return ret;
}

1682 1683
static int shutdown_resource(int id, void *p, void *data)
{
1684
	struct client_resource *resource = p;
1685 1686
	struct client *client = data;

1687
	resource->release(client, resource);
1688
	client_put(client);
1689 1690 1691 1692

	return 0;
}

1693 1694 1695
static int fw_device_op_release(struct inode *inode, struct file *file)
{
	struct client *client = file->private_data;
1696
	struct event *event, *next_event;
1697

1698 1699 1700 1701
	spin_lock_irq(&client->device->card->lock);
	list_del(&client->phy_receiver_link);
	spin_unlock_irq(&client->device->card->lock);

1702 1703 1704 1705
	mutex_lock(&client->device->client_list_mutex);
	list_del(&client->link);
	mutex_unlock(&client->device->client_list_mutex);

1706 1707 1708
	if (client->iso_context)
		fw_iso_context_destroy(client->iso_context);

1709 1710 1711
	if (client->buffer.pages)
		fw_iso_buffer_destroy(&client->buffer, client->device->card);

1712
	/* Freeze client->resource_idr and client->event_list */
1713
	spin_lock_irq(&client->lock);
1714
	client->in_shutdown = true;
1715
	spin_unlock_irq(&client->lock);
1716

1717 1718
	wait_event(client->tx_flush_wait, !has_outbound_transactions(client));

1719 1720 1721
	idr_for_each(&client->resource_idr, shutdown_resource, client);
	idr_remove_all(&client->resource_idr);
	idr_destroy(&client->resource_idr);
1722

1723 1724
	list_for_each_entry_safe(event, next_event, &client->event_list, link)
		kfree(event);
1725

1726
	client_put(client);
1727 1728 1729 1730 1731 1732 1733

	return 0;
}

static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
{
	struct client *client = file->private_data;
1734
	unsigned int mask = 0;
1735 1736 1737

	poll_wait(file, &client->wait, pt);

1738 1739
	if (fw_device_is_shutdown(client->device))
		mask |= POLLHUP | POLLERR;
1740
	if (!list_empty(&client->event_list))
1741 1742 1743
		mask |= POLLIN | POLLRDNORM;

	return mask;
1744 1745
}

1746
const struct file_operations fw_device_ops = {
1747
	.owner		= THIS_MODULE,
1748
	.llseek		= no_llseek,
1749 1750 1751 1752
	.open		= fw_device_op_open,
	.read		= fw_device_op_read,
	.unlocked_ioctl	= fw_device_op_ioctl,
	.mmap		= fw_device_op_mmap,
1753 1754
	.release	= fw_device_op_release,
	.poll		= fw_device_op_poll,
1755
#ifdef CONFIG_COMPAT
1756
	.compat_ioctl	= fw_device_op_compat_ioctl,
1757 1758
#endif
};