vme.c 37.2 KB
Newer Older
1 2 3
/*
 * VME Bridge Framework
 *
4 5
 * Author: Martyn Welch <martyn.welch@ge.com>
 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
6 7 8 9 10 11 12 13 14 15
 *
 * Based on work by Tom Armistead and Ajit Prem
 * Copyright 2004 Motorola Inc.
 *
 * This program is free software; you can redistribute  it and/or modify it
 * under  the terms of  the GNU General  Public License as published by the
 * Free Software Foundation;  either version 2 of the  License, or (at your
 * option) any later version.
 */

16 17
#include <linux/init.h>
#include <linux/export.h>
18 19 20 21 22 23 24 25 26 27 28 29
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/highmem.h>
#include <linux/interrupt.h>
#include <linux/pagemap.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/syscalls.h>
30
#include <linux/mutex.h>
31
#include <linux/spinlock.h>
32
#include <linux/slab.h>
33
#include <linux/vme.h>
34 35 36

#include "vme_bridge.h"

37
/* Bitmask and list of registered buses both protected by common mutex */
38
static unsigned int vme_bus_numbers;
39 40
static LIST_HEAD(vme_bus_list);
static DEFINE_MUTEX(vme_buses_lock);
41

42
static int __init vme_init(void);
43

44
static struct vme_dev *dev_to_vme_dev(struct device *dev)
45
{
46
	return container_of(dev, struct vme_dev, dev);
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
}

/*
 * Find the bridge that the resource is associated with.
 */
static struct vme_bridge *find_bridge(struct vme_resource *resource)
{
	/* Get list to search */
	switch (resource->type) {
	case VME_MASTER:
		return list_entry(resource->entry, struct vme_master_resource,
			list)->parent;
		break;
	case VME_SLAVE:
		return list_entry(resource->entry, struct vme_slave_resource,
			list)->parent;
		break;
	case VME_DMA:
		return list_entry(resource->entry, struct vme_dma_resource,
			list)->parent;
		break;
68 69 70 71
	case VME_LM:
		return list_entry(resource->entry, struct vme_lm_resource,
			list)->parent;
		break;
72 73 74 75 76 77 78 79 80 81 82
	default:
		printk(KERN_ERR "Unknown resource type\n");
		return NULL;
		break;
	}
}

/*
 * Allocate a contiguous block of memory for use by the driver. This is used to
 * create the buffers for the slave windows.
 */
83
void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
84 85 86 87
	dma_addr_t *dma)
{
	struct vme_bridge *bridge;

88 89
	if (resource == NULL) {
		printk(KERN_ERR "No resource\n");
90 91 92 93
		return NULL;
	}

	bridge = find_bridge(resource);
94 95
	if (bridge == NULL) {
		printk(KERN_ERR "Can't find bridge\n");
96 97 98 99
		return NULL;
	}

	if (bridge->parent == NULL) {
100
		printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
101 102 103 104
		return NULL;
	}

	if (bridge->alloc_consistent == NULL) {
105 106
		printk(KERN_ERR "alloc_consistent not supported by bridge %s\n",
		       bridge->name);
107 108 109
		return NULL;
	}

110
	return bridge->alloc_consistent(bridge->parent, size, dma);
111 112 113 114 115 116 117 118 119 120 121
}
EXPORT_SYMBOL(vme_alloc_consistent);

/*
 * Free previously allocated contiguous block of memory.
 */
void vme_free_consistent(struct vme_resource *resource, size_t size,
	void *vaddr, dma_addr_t dma)
{
	struct vme_bridge *bridge;

122 123
	if (resource == NULL) {
		printk(KERN_ERR "No resource\n");
124 125 126 127
		return;
	}

	bridge = find_bridge(resource);
128 129
	if (bridge == NULL) {
		printk(KERN_ERR "Can't find bridge\n");
130 131 132
		return;
	}

133
	if (bridge->parent == NULL) {
134
		printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
135 136 137 138
		return;
	}

	if (bridge->free_consistent == NULL) {
139 140
		printk(KERN_ERR "free_consistent not supported by bridge %s\n",
		       bridge->name);
141 142
		return;
	}
143

144
	bridge->free_consistent(bridge->parent, size, vaddr, dma);
145 146 147 148 149 150 151 152
}
EXPORT_SYMBOL(vme_free_consistent);

size_t vme_get_size(struct vme_resource *resource)
{
	int enabled, retval;
	unsigned long long base, size;
	dma_addr_t buf_base;
M
Martyn Welch 已提交
153
	u32 aspace, cycle, dwidth;
154 155 156 157 158

	switch (resource->type) {
	case VME_MASTER:
		retval = vme_master_get(resource, &enabled, &base, &size,
			&aspace, &cycle, &dwidth);
159 160
		if (retval)
			return 0;
161 162 163 164 165 166

		return size;
		break;
	case VME_SLAVE:
		retval = vme_slave_get(resource, &enabled, &base, &size,
			&buf_base, &aspace, &cycle);
167 168
		if (retval)
			return 0;
169 170 171 172 173 174 175 176 177 178 179 180 181 182

		return size;
		break;
	case VME_DMA:
		return 0;
		break;
	default:
		printk(KERN_ERR "Unknown resource type\n");
		return 0;
		break;
	}
}
EXPORT_SYMBOL(vme_get_size);

D
Dmitry Kalinkin 已提交
183 184
int vme_check_window(u32 aspace, unsigned long long vme_base,
		     unsigned long long size)
185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
{
	int retval = 0;

	switch (aspace) {
	case VME_A16:
		if (((vme_base + size) > VME_A16_MAX) ||
				(vme_base > VME_A16_MAX))
			retval = -EFAULT;
		break;
	case VME_A24:
		if (((vme_base + size) > VME_A24_MAX) ||
				(vme_base > VME_A24_MAX))
			retval = -EFAULT;
		break;
	case VME_A32:
		if (((vme_base + size) > VME_A32_MAX) ||
				(vme_base > VME_A32_MAX))
			retval = -EFAULT;
		break;
	case VME_A64:
205 206
		if ((size != 0) && (vme_base > U64_MAX + 1 - size))
			retval = -EFAULT;
207 208 209 210 211 212 213 214 215 216 217 218 219
		break;
	case VME_CRCSR:
		if (((vme_base + size) > VME_CRCSR_MAX) ||
				(vme_base > VME_CRCSR_MAX))
			retval = -EFAULT;
		break;
	case VME_USER1:
	case VME_USER2:
	case VME_USER3:
	case VME_USER4:
		/* User Defined */
		break;
	default:
220
		printk(KERN_ERR "Invalid address space\n");
221 222 223 224 225 226
		retval = -EINVAL;
		break;
	}

	return retval;
}
D
Dmitry Kalinkin 已提交
227
EXPORT_SYMBOL(vme_check_window);
228

229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
static u32 vme_get_aspace(int am)
{
	switch (am) {
	case 0x29:
	case 0x2D:
		return VME_A16;
	case 0x38:
	case 0x39:
	case 0x3A:
	case 0x3B:
	case 0x3C:
	case 0x3D:
	case 0x3E:
	case 0x3F:
		return VME_A24;
	case 0x8:
	case 0x9:
	case 0xA:
	case 0xB:
	case 0xC:
	case 0xD:
	case 0xE:
	case 0xF:
		return VME_A32;
	case 0x0:
	case 0x1:
	case 0x3:
		return VME_A64;
	}

	return 0;
}

262 263 264 265
/*
 * Request a slave image with specific attributes, return some unique
 * identifier.
 */
M
Martyn Welch 已提交
266 267
struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
	u32 cycle)
268 269 270 271 272 273 274
{
	struct vme_bridge *bridge;
	struct list_head *slave_pos = NULL;
	struct vme_slave_resource *allocated_image = NULL;
	struct vme_slave_resource *slave_image = NULL;
	struct vme_resource *resource = NULL;

275
	bridge = vdev->bridge;
276 277 278 279 280 281
	if (bridge == NULL) {
		printk(KERN_ERR "Can't find VME bus\n");
		goto err_bus;
	}

	/* Loop through slave resources */
282
	list_for_each(slave_pos, &bridge->slave_resources) {
283 284 285 286
		slave_image = list_entry(slave_pos,
			struct vme_slave_resource, list);

		if (slave_image == NULL) {
287
			printk(KERN_ERR "Registered NULL Slave resource\n");
288 289 290 291
			continue;
		}

		/* Find an unlocked and compatible image */
292
		mutex_lock(&slave_image->mtx);
293
		if (((slave_image->address_attr & address) == address) &&
294 295 296 297
			((slave_image->cycle_attr & cycle) == cycle) &&
			(slave_image->locked == 0)) {

			slave_image->locked = 1;
298
			mutex_unlock(&slave_image->mtx);
299 300 301
			allocated_image = slave_image;
			break;
		}
302
		mutex_unlock(&slave_image->mtx);
303 304 305 306 307 308 309 310 311 312 313 314
	}

	/* No free image */
	if (allocated_image == NULL)
		goto err_image;

	resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
	if (resource == NULL) {
		printk(KERN_WARNING "Unable to allocate resource structure\n");
		goto err_alloc;
	}
	resource->type = VME_SLAVE;
315
	resource->entry = &allocated_image->list;
316 317 318 319 320

	return resource;

err_alloc:
	/* Unlock image */
321
	mutex_lock(&slave_image->mtx);
322
	slave_image->locked = 0;
323
	mutex_unlock(&slave_image->mtx);
324 325 326 327 328 329
err_image:
err_bus:
	return NULL;
}
EXPORT_SYMBOL(vme_slave_request);

330
int vme_slave_set(struct vme_resource *resource, int enabled,
331
	unsigned long long vme_base, unsigned long long size,
M
Martyn Welch 已提交
332
	dma_addr_t buf_base, u32 aspace, u32 cycle)
333 334 335 336 337 338
{
	struct vme_bridge *bridge = find_bridge(resource);
	struct vme_slave_resource *image;
	int retval;

	if (resource->type != VME_SLAVE) {
339
		printk(KERN_ERR "Not a slave resource\n");
340 341 342 343 344 345
		return -EINVAL;
	}

	image = list_entry(resource->entry, struct vme_slave_resource, list);

	if (bridge->slave_set == NULL) {
346
		printk(KERN_ERR "Function not supported\n");
347 348 349
		return -ENOSYS;
	}

350
	if (!(((image->address_attr & aspace) == aspace) &&
351
		((image->cycle_attr & cycle) == cycle))) {
352
		printk(KERN_ERR "Invalid attributes\n");
353 354 355 356
		return -EINVAL;
	}

	retval = vme_check_window(aspace, vme_base, size);
357
	if (retval)
358 359 360 361 362 363 364
		return retval;

	return bridge->slave_set(image, enabled, vme_base, size, buf_base,
		aspace, cycle);
}
EXPORT_SYMBOL(vme_slave_set);

365
int vme_slave_get(struct vme_resource *resource, int *enabled,
366
	unsigned long long *vme_base, unsigned long long *size,
M
Martyn Welch 已提交
367
	dma_addr_t *buf_base, u32 *aspace, u32 *cycle)
368 369 370 371 372
{
	struct vme_bridge *bridge = find_bridge(resource);
	struct vme_slave_resource *image;

	if (resource->type != VME_SLAVE) {
373
		printk(KERN_ERR "Not a slave resource\n");
374 375 376 377 378
		return -EINVAL;
	}

	image = list_entry(resource->entry, struct vme_slave_resource, list);

379
	if (bridge->slave_get == NULL) {
380
		printk(KERN_ERR "vme_slave_get not supported\n");
381 382 383 384 385 386 387 388 389 390 391 392 393
		return -EINVAL;
	}

	return bridge->slave_get(image, enabled, vme_base, size, buf_base,
		aspace, cycle);
}
EXPORT_SYMBOL(vme_slave_get);

void vme_slave_free(struct vme_resource *resource)
{
	struct vme_slave_resource *slave_image;

	if (resource->type != VME_SLAVE) {
394
		printk(KERN_ERR "Not a slave resource\n");
395 396 397 398 399 400
		return;
	}

	slave_image = list_entry(resource->entry, struct vme_slave_resource,
		list);
	if (slave_image == NULL) {
401
		printk(KERN_ERR "Can't find slave resource\n");
402 403 404 405
		return;
	}

	/* Unlock image */
406
	mutex_lock(&slave_image->mtx);
407 408 409 410
	if (slave_image->locked == 0)
		printk(KERN_ERR "Image is already free\n");

	slave_image->locked = 0;
411
	mutex_unlock(&slave_image->mtx);
412 413 414 415 416 417 418 419 420 421

	/* Free up resource memory */
	kfree(resource);
}
EXPORT_SYMBOL(vme_slave_free);

/*
 * Request a master image with specific attributes, return some unique
 * identifier.
 */
M
Martyn Welch 已提交
422 423
struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
	u32 cycle, u32 dwidth)
424 425 426 427 428 429 430
{
	struct vme_bridge *bridge;
	struct list_head *master_pos = NULL;
	struct vme_master_resource *allocated_image = NULL;
	struct vme_master_resource *master_image = NULL;
	struct vme_resource *resource = NULL;

431
	bridge = vdev->bridge;
432 433 434 435 436 437
	if (bridge == NULL) {
		printk(KERN_ERR "Can't find VME bus\n");
		goto err_bus;
	}

	/* Loop through master resources */
438
	list_for_each(master_pos, &bridge->master_resources) {
439 440 441 442 443 444 445 446 447
		master_image = list_entry(master_pos,
			struct vme_master_resource, list);

		if (master_image == NULL) {
			printk(KERN_WARNING "Registered NULL master resource\n");
			continue;
		}

		/* Find an unlocked and compatible image */
448
		spin_lock(&master_image->lock);
449
		if (((master_image->address_attr & address) == address) &&
450 451 452 453 454
			((master_image->cycle_attr & cycle) == cycle) &&
			((master_image->width_attr & dwidth) == dwidth) &&
			(master_image->locked == 0)) {

			master_image->locked = 1;
455
			spin_unlock(&master_image->lock);
456 457 458
			allocated_image = master_image;
			break;
		}
459
		spin_unlock(&master_image->lock);
460 461 462 463 464 465 466 467 468 469 470 471 472 473
	}

	/* Check to see if we found a resource */
	if (allocated_image == NULL) {
		printk(KERN_ERR "Can't find a suitable resource\n");
		goto err_image;
	}

	resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
	if (resource == NULL) {
		printk(KERN_ERR "Unable to allocate resource structure\n");
		goto err_alloc;
	}
	resource->type = VME_MASTER;
474
	resource->entry = &allocated_image->list;
475 476 477 478 479

	return resource;

err_alloc:
	/* Unlock image */
480
	spin_lock(&master_image->lock);
481
	master_image->locked = 0;
482
	spin_unlock(&master_image->lock);
483 484 485 486 487 488
err_image:
err_bus:
	return NULL;
}
EXPORT_SYMBOL(vme_master_request);

489
int vme_master_set(struct vme_resource *resource, int enabled,
M
Martyn Welch 已提交
490 491
	unsigned long long vme_base, unsigned long long size, u32 aspace,
	u32 cycle, u32 dwidth)
492 493 494 495 496 497
{
	struct vme_bridge *bridge = find_bridge(resource);
	struct vme_master_resource *image;
	int retval;

	if (resource->type != VME_MASTER) {
498
		printk(KERN_ERR "Not a master resource\n");
499 500 501 502 503 504
		return -EINVAL;
	}

	image = list_entry(resource->entry, struct vme_master_resource, list);

	if (bridge->master_set == NULL) {
505
		printk(KERN_WARNING "vme_master_set not supported\n");
506 507 508
		return -EINVAL;
	}

509
	if (!(((image->address_attr & aspace) == aspace) &&
510 511
		((image->cycle_attr & cycle) == cycle) &&
		((image->width_attr & dwidth) == dwidth))) {
512
		printk(KERN_WARNING "Invalid attributes\n");
513 514 515 516
		return -EINVAL;
	}

	retval = vme_check_window(aspace, vme_base, size);
517
	if (retval)
518 519 520 521 522 523 524
		return retval;

	return bridge->master_set(image, enabled, vme_base, size, aspace,
		cycle, dwidth);
}
EXPORT_SYMBOL(vme_master_set);

525
int vme_master_get(struct vme_resource *resource, int *enabled,
M
Martyn Welch 已提交
526 527
	unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
	u32 *cycle, u32 *dwidth)
528 529 530 531 532
{
	struct vme_bridge *bridge = find_bridge(resource);
	struct vme_master_resource *image;

	if (resource->type != VME_MASTER) {
533
		printk(KERN_ERR "Not a master resource\n");
534 535 536 537 538
		return -EINVAL;
	}

	image = list_entry(resource->entry, struct vme_master_resource, list);

539
	if (bridge->master_get == NULL) {
540
		printk(KERN_WARNING "%s not supported\n", __func__);
541 542 543 544 545 546 547 548 549 550 551
		return -EINVAL;
	}

	return bridge->master_get(image, enabled, vme_base, size, aspace,
		cycle, dwidth);
}
EXPORT_SYMBOL(vme_master_get);

/*
 * Read data out of VME space into a buffer.
 */
552
ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
553 554 555 556 557 558 559
	loff_t offset)
{
	struct vme_bridge *bridge = find_bridge(resource);
	struct vme_master_resource *image;
	size_t length;

	if (bridge->master_read == NULL) {
560
		printk(KERN_WARNING "Reading from resource not supported\n");
561 562 563 564
		return -EINVAL;
	}

	if (resource->type != VME_MASTER) {
565
		printk(KERN_ERR "Not a master resource\n");
566 567 568 569 570 571 572 573
		return -EINVAL;
	}

	image = list_entry(resource->entry, struct vme_master_resource, list);

	length = vme_get_size(resource);

	if (offset > length) {
574
		printk(KERN_WARNING "Invalid Offset\n");
575 576 577 578 579 580 581 582 583 584 585 586 587 588
		return -EFAULT;
	}

	if ((offset + count) > length)
		count = length - offset;

	return bridge->master_read(image, buf, count, offset);

}
EXPORT_SYMBOL(vme_master_read);

/*
 * Write data out to VME space from a buffer.
 */
589
ssize_t vme_master_write(struct vme_resource *resource, void *buf,
590 591 592 593 594 595 596
	size_t count, loff_t offset)
{
	struct vme_bridge *bridge = find_bridge(resource);
	struct vme_master_resource *image;
	size_t length;

	if (bridge->master_write == NULL) {
597
		printk(KERN_WARNING "Writing to resource not supported\n");
598 599 600 601
		return -EINVAL;
	}

	if (resource->type != VME_MASTER) {
602
		printk(KERN_ERR "Not a master resource\n");
603 604 605 606 607 608 609 610
		return -EINVAL;
	}

	image = list_entry(resource->entry, struct vme_master_resource, list);

	length = vme_get_size(resource);

	if (offset > length) {
611
		printk(KERN_WARNING "Invalid Offset\n");
612 613 614 615 616 617 618 619 620 621 622 623 624
		return -EFAULT;
	}

	if ((offset + count) > length)
		count = length - offset;

	return bridge->master_write(image, buf, count, offset);
}
EXPORT_SYMBOL(vme_master_write);

/*
 * Perform RMW cycle to provided location.
 */
625
unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
626 627 628 629 630 631
	unsigned int compare, unsigned int swap, loff_t offset)
{
	struct vme_bridge *bridge = find_bridge(resource);
	struct vme_master_resource *image;

	if (bridge->master_rmw == NULL) {
632
		printk(KERN_WARNING "Writing to resource not supported\n");
633 634 635 636
		return -EINVAL;
	}

	if (resource->type != VME_MASTER) {
637
		printk(KERN_ERR "Not a master resource\n");
638 639 640 641 642 643 644 645 646
		return -EINVAL;
	}

	image = list_entry(resource->entry, struct vme_master_resource, list);

	return bridge->master_rmw(image, mask, compare, swap, offset);
}
EXPORT_SYMBOL(vme_master_rmw);

647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672
int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma)
{
	struct vme_master_resource *image;
	phys_addr_t phys_addr;
	unsigned long vma_size;

	if (resource->type != VME_MASTER) {
		pr_err("Not a master resource\n");
		return -EINVAL;
	}

	image = list_entry(resource->entry, struct vme_master_resource, list);
	phys_addr = image->bus_resource.start + (vma->vm_pgoff << PAGE_SHIFT);
	vma_size = vma->vm_end - vma->vm_start;

	if (phys_addr + vma_size > image->bus_resource.end + 1) {
		pr_err("Map size cannot exceed the window size\n");
		return -EFAULT;
	}

	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

	return vm_iomap_memory(vma, phys_addr, vma->vm_end - vma->vm_start);
}
EXPORT_SYMBOL(vme_master_mmap);

673 674 675 676 677
void vme_master_free(struct vme_resource *resource)
{
	struct vme_master_resource *master_image;

	if (resource->type != VME_MASTER) {
678
		printk(KERN_ERR "Not a master resource\n");
679 680 681 682 683 684
		return;
	}

	master_image = list_entry(resource->entry, struct vme_master_resource,
		list);
	if (master_image == NULL) {
685
		printk(KERN_ERR "Can't find master resource\n");
686 687 688 689
		return;
	}

	/* Unlock image */
690
	spin_lock(&master_image->lock);
691 692 693 694
	if (master_image->locked == 0)
		printk(KERN_ERR "Image is already free\n");

	master_image->locked = 0;
695
	spin_unlock(&master_image->lock);
696 697 698 699 700 701 702 703 704 705

	/* Free up resource memory */
	kfree(resource);
}
EXPORT_SYMBOL(vme_master_free);

/*
 * Request a DMA controller with specific attributes, return some unique
 * identifier.
 */
M
Martyn Welch 已提交
706
struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
707 708 709 710 711 712 713 714 715 716
{
	struct vme_bridge *bridge;
	struct list_head *dma_pos = NULL;
	struct vme_dma_resource *allocated_ctrlr = NULL;
	struct vme_dma_resource *dma_ctrlr = NULL;
	struct vme_resource *resource = NULL;

	/* XXX Not checking resource attributes */
	printk(KERN_ERR "No VME resource Attribute tests done\n");

717
	bridge = vdev->bridge;
718 719 720 721 722 723
	if (bridge == NULL) {
		printk(KERN_ERR "Can't find VME bus\n");
		goto err_bus;
	}

	/* Loop through DMA resources */
724
	list_for_each(dma_pos, &bridge->dma_resources) {
725 726 727 728
		dma_ctrlr = list_entry(dma_pos,
			struct vme_dma_resource, list);

		if (dma_ctrlr == NULL) {
729
			printk(KERN_ERR "Registered NULL DMA resource\n");
730 731 732
			continue;
		}

733
		/* Find an unlocked and compatible controller */
734
		mutex_lock(&dma_ctrlr->mtx);
735 736 737
		if (((dma_ctrlr->route_attr & route) == route) &&
			(dma_ctrlr->locked == 0)) {

738
			dma_ctrlr->locked = 1;
739
			mutex_unlock(&dma_ctrlr->mtx);
740 741 742
			allocated_ctrlr = dma_ctrlr;
			break;
		}
743
		mutex_unlock(&dma_ctrlr->mtx);
744 745 746 747 748 749 750 751 752 753 754 755
	}

	/* Check to see if we found a resource */
	if (allocated_ctrlr == NULL)
		goto err_ctrlr;

	resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
	if (resource == NULL) {
		printk(KERN_WARNING "Unable to allocate resource structure\n");
		goto err_alloc;
	}
	resource->type = VME_DMA;
756
	resource->entry = &allocated_ctrlr->list;
757 758 759 760 761

	return resource;

err_alloc:
	/* Unlock image */
762
	mutex_lock(&dma_ctrlr->mtx);
763
	dma_ctrlr->locked = 0;
764
	mutex_unlock(&dma_ctrlr->mtx);
765 766 767 768
err_ctrlr:
err_bus:
	return NULL;
}
769
EXPORT_SYMBOL(vme_dma_request);
770 771 772 773 774 775 776 777 778 779

/*
 * Start new list
 */
struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
{
	struct vme_dma_resource *ctrlr;
	struct vme_dma_list *dma_list;

	if (resource->type != VME_DMA) {
780
		printk(KERN_ERR "Not a DMA resource\n");
781 782 783 784 785
		return NULL;
	}

	ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);

786 787
	dma_list = kmalloc(sizeof(struct vme_dma_list), GFP_KERNEL);
	if (dma_list == NULL) {
788
		printk(KERN_ERR "Unable to allocate memory for new DMA list\n");
789 790
		return NULL;
	}
791
	INIT_LIST_HEAD(&dma_list->entries);
792
	dma_list->parent = ctrlr;
793
	mutex_init(&dma_list->mtx);
794 795 796 797 798 799 800 801

	return dma_list;
}
EXPORT_SYMBOL(vme_new_dma_list);

/*
 * Create "Pattern" type attributes
 */
M
Martyn Welch 已提交
802
struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type)
803 804 805 806
{
	struct vme_dma_attr *attributes;
	struct vme_dma_pattern *pattern_attr;

807 808
	attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
	if (attributes == NULL) {
809
		printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
810 811 812
		goto err_attr;
	}

813 814
	pattern_attr = kmalloc(sizeof(struct vme_dma_pattern), GFP_KERNEL);
	if (pattern_attr == NULL) {
815
		printk(KERN_ERR "Unable to allocate memory for pattern attributes\n");
816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843
		goto err_pat;
	}

	attributes->type = VME_DMA_PATTERN;
	attributes->private = (void *)pattern_attr;

	pattern_attr->pattern = pattern;
	pattern_attr->type = type;

	return attributes;

err_pat:
	kfree(attributes);
err_attr:
	return NULL;
}
EXPORT_SYMBOL(vme_dma_pattern_attribute);

/*
 * Create "PCI" type attributes
 */
struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
{
	struct vme_dma_attr *attributes;
	struct vme_dma_pci *pci_attr;

	/* XXX Run some sanity checks here */

844 845
	attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
	if (attributes == NULL) {
846
		printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
847 848 849
		goto err_attr;
	}

850 851
	pci_attr = kmalloc(sizeof(struct vme_dma_pci), GFP_KERNEL);
	if (pci_attr == NULL) {
852
		printk(KERN_ERR "Unable to allocate memory for PCI attributes\n");
853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875
		goto err_pci;
	}



	attributes->type = VME_DMA_PCI;
	attributes->private = (void *)pci_attr;

	pci_attr->address = address;

	return attributes;

err_pci:
	kfree(attributes);
err_attr:
	return NULL;
}
EXPORT_SYMBOL(vme_dma_pci_attribute);

/*
 * Create "VME" type attributes
 */
struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
M
Martyn Welch 已提交
876
	u32 aspace, u32 cycle, u32 dwidth)
877 878 879 880
{
	struct vme_dma_attr *attributes;
	struct vme_dma_vme *vme_attr;

881
	attributes = kmalloc(
882
		sizeof(struct vme_dma_attr), GFP_KERNEL);
883
	if (attributes == NULL) {
884
		printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
885 886 887
		goto err_attr;
	}

888 889
	vme_attr = kmalloc(sizeof(struct vme_dma_vme), GFP_KERNEL);
	if (vme_attr == NULL) {
890
		printk(KERN_ERR "Unable to allocate memory for VME attributes\n");
891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927
		goto err_vme;
	}

	attributes->type = VME_DMA_VME;
	attributes->private = (void *)vme_attr;

	vme_attr->address = address;
	vme_attr->aspace = aspace;
	vme_attr->cycle = cycle;
	vme_attr->dwidth = dwidth;

	return attributes;

err_vme:
	kfree(attributes);
err_attr:
	return NULL;
}
EXPORT_SYMBOL(vme_dma_vme_attribute);

/*
 * Free attribute
 */
void vme_dma_free_attribute(struct vme_dma_attr *attributes)
{
	kfree(attributes->private);
	kfree(attributes);
}
EXPORT_SYMBOL(vme_dma_free_attribute);

int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
	struct vme_dma_attr *dest, size_t count)
{
	struct vme_bridge *bridge = list->parent->parent;
	int retval;

	if (bridge->dma_list_add == NULL) {
928
		printk(KERN_WARNING "Link List DMA generation not supported\n");
929 930 931
		return -EINVAL;
	}

932
	if (!mutex_trylock(&list->mtx)) {
933
		printk(KERN_ERR "Link List already submitted\n");
934 935 936 937 938
		return -EINVAL;
	}

	retval = bridge->dma_list_add(list, src, dest, count);

939
	mutex_unlock(&list->mtx);
940 941 942 943 944 945 946 947 948 949 950

	return retval;
}
EXPORT_SYMBOL(vme_dma_list_add);

int vme_dma_list_exec(struct vme_dma_list *list)
{
	struct vme_bridge *bridge = list->parent->parent;
	int retval;

	if (bridge->dma_list_exec == NULL) {
951
		printk(KERN_ERR "Link List DMA execution not supported\n");
952 953 954
		return -EINVAL;
	}

955
	mutex_lock(&list->mtx);
956 957 958

	retval = bridge->dma_list_exec(list);

959
	mutex_unlock(&list->mtx);
960 961 962 963 964 965 966 967 968 969 970

	return retval;
}
EXPORT_SYMBOL(vme_dma_list_exec);

int vme_dma_list_free(struct vme_dma_list *list)
{
	struct vme_bridge *bridge = list->parent->parent;
	int retval;

	if (bridge->dma_list_empty == NULL) {
971
		printk(KERN_WARNING "Emptying of Link Lists not supported\n");
972 973 974
		return -EINVAL;
	}

975
	if (!mutex_trylock(&list->mtx)) {
976
		printk(KERN_ERR "Link List in use\n");
977 978 979 980
		return -EINVAL;
	}

	/*
981 982
	 * Empty out all of the entries from the DMA list. We need to go to the
	 * low level driver as DMA entries are driver specific.
983 984 985
	 */
	retval = bridge->dma_list_empty(list);
	if (retval) {
986
		printk(KERN_ERR "Unable to empty link-list entries\n");
987
		mutex_unlock(&list->mtx);
988 989
		return retval;
	}
990
	mutex_unlock(&list->mtx);
991 992 993 994 995 996 997 998 999 1000 1001
	kfree(list);

	return retval;
}
EXPORT_SYMBOL(vme_dma_list_free);

int vme_dma_free(struct vme_resource *resource)
{
	struct vme_dma_resource *ctrlr;

	if (resource->type != VME_DMA) {
1002
		printk(KERN_ERR "Not a DMA resource\n");
1003 1004 1005 1006 1007
		return -EINVAL;
	}

	ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);

1008
	if (!mutex_trylock(&ctrlr->mtx)) {
1009
		printk(KERN_ERR "Resource busy, can't free\n");
1010 1011 1012
		return -EBUSY;
	}

1013
	if (!(list_empty(&ctrlr->pending) && list_empty(&ctrlr->running))) {
1014
		printk(KERN_WARNING "Resource still processing transfers\n");
1015
		mutex_unlock(&ctrlr->mtx);
1016 1017 1018 1019 1020
		return -EBUSY;
	}

	ctrlr->locked = 0;

1021
	mutex_unlock(&ctrlr->mtx);
1022

1023 1024
	kfree(resource);

1025 1026 1027 1028
	return 0;
}
EXPORT_SYMBOL(vme_dma_free);

1029
void vme_bus_error_handler(struct vme_bridge *bridge,
1030
			   unsigned long long address, int am)
1031
{
1032 1033
	struct list_head *handler_pos = NULL;
	struct vme_error_handler *handler;
1034
	int handler_triggered = 0;
1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046
	u32 aspace = vme_get_aspace(am);

	list_for_each(handler_pos, &bridge->vme_error_handlers) {
		handler = list_entry(handler_pos, struct vme_error_handler,
				     list);
		if ((aspace == handler->aspace) &&
		    (address >= handler->start) &&
		    (address < handler->end)) {
			if (!handler->num_errors)
				handler->first_error = address;
			if (handler->num_errors != UINT_MAX)
				handler->num_errors++;
1047
			handler_triggered = 1;
1048
		}
1049
	}
1050 1051 1052 1053 1054

	if (!handler_triggered)
		dev_err(bridge->parent,
			"Unhandled VME access error at address 0x%llx\n",
			address);
1055 1056 1057
}
EXPORT_SYMBOL(vme_bus_error_handler);

1058 1059 1060
struct vme_error_handler *vme_register_error_handler(
	struct vme_bridge *bridge, u32 aspace,
	unsigned long long address, size_t len)
1061
{
1062
	struct vme_error_handler *handler;
1063

1064 1065 1066
	handler = kmalloc(sizeof(*handler), GFP_KERNEL);
	if (!handler)
		return NULL;
1067

1068 1069 1070 1071 1072 1073
	handler->aspace = aspace;
	handler->start = address;
	handler->end = address + len;
	handler->num_errors = 0;
	handler->first_error = 0;
	list_add_tail(&handler->list, &bridge->vme_error_handlers);
1074

1075
	return handler;
1076
}
1077
EXPORT_SYMBOL(vme_register_error_handler);
1078

1079
void vme_unregister_error_handler(struct vme_error_handler *handler)
1080
{
1081 1082
	list_del(&handler->list);
	kfree(handler);
1083
}
1084
EXPORT_SYMBOL(vme_unregister_error_handler);
1085

1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096
void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
{
	void (*call)(int, int, void *);
	void *priv_data;

	call = bridge->irq[level - 1].callback[statid].func;
	priv_data = bridge->irq[level - 1].callback[statid].priv_data;

	if (call != NULL)
		call(level, statid, priv_data);
	else
1097
		printk(KERN_WARNING "Spurious VME interrupt, level:%x, vector:%x\n",
1098
		       level, statid);
1099 1100 1101
}
EXPORT_SYMBOL(vme_irq_handler);

1102
int vme_irq_request(struct vme_dev *vdev, int level, int statid,
1103
	void (*callback)(int, int, void *),
1104 1105 1106 1107
	void *priv_data)
{
	struct vme_bridge *bridge;

1108
	bridge = vdev->bridge;
1109 1110 1111 1112 1113
	if (bridge == NULL) {
		printk(KERN_ERR "Can't find VME bus\n");
		return -EINVAL;
	}

1114
	if ((level < 1) || (level > 7)) {
1115
		printk(KERN_ERR "Invalid interrupt level\n");
1116 1117 1118
		return -EINVAL;
	}

1119 1120
	if (bridge->irq_set == NULL) {
		printk(KERN_ERR "Configuring interrupts not supported\n");
1121 1122 1123
		return -EINVAL;
	}

1124
	mutex_lock(&bridge->irq_mtx);
1125 1126

	if (bridge->irq[level - 1].callback[statid].func) {
1127
		mutex_unlock(&bridge->irq_mtx);
1128 1129 1130 1131 1132 1133 1134 1135 1136
		printk(KERN_WARNING "VME Interrupt already taken\n");
		return -EBUSY;
	}

	bridge->irq[level - 1].count++;
	bridge->irq[level - 1].callback[statid].priv_data = priv_data;
	bridge->irq[level - 1].callback[statid].func = callback;

	/* Enable IRQ level */
1137
	bridge->irq_set(bridge, level, 1, 1);
1138

1139
	mutex_unlock(&bridge->irq_mtx);
1140 1141

	return 0;
1142
}
1143
EXPORT_SYMBOL(vme_irq_request);
1144

1145
void vme_irq_free(struct vme_dev *vdev, int level, int statid)
1146 1147 1148
{
	struct vme_bridge *bridge;

1149
	bridge = vdev->bridge;
1150 1151 1152 1153 1154
	if (bridge == NULL) {
		printk(KERN_ERR "Can't find VME bus\n");
		return;
	}

1155
	if ((level < 1) || (level > 7)) {
1156
		printk(KERN_ERR "Invalid interrupt level\n");
1157 1158 1159
		return;
	}

1160 1161
	if (bridge->irq_set == NULL) {
		printk(KERN_ERR "Configuring interrupts not supported\n");
1162 1163 1164
		return;
	}

1165
	mutex_lock(&bridge->irq_mtx);
1166 1167 1168 1169 1170

	bridge->irq[level - 1].count--;

	/* Disable IRQ level if no more interrupts attached at this level*/
	if (bridge->irq[level - 1].count == 0)
1171
		bridge->irq_set(bridge, level, 0, 1);
1172 1173 1174 1175

	bridge->irq[level - 1].callback[statid].func = NULL;
	bridge->irq[level - 1].callback[statid].priv_data = NULL;

1176
	mutex_unlock(&bridge->irq_mtx);
1177
}
1178
EXPORT_SYMBOL(vme_irq_free);
1179

1180
int vme_irq_generate(struct vme_dev *vdev, int level, int statid)
1181 1182 1183
{
	struct vme_bridge *bridge;

1184
	bridge = vdev->bridge;
1185 1186 1187 1188 1189
	if (bridge == NULL) {
		printk(KERN_ERR "Can't find VME bus\n");
		return -EINVAL;
	}

1190
	if ((level < 1) || (level > 7)) {
1191 1192 1193 1194
		printk(KERN_WARNING "Invalid interrupt level\n");
		return -EINVAL;
	}

1195
	if (bridge->irq_generate == NULL) {
1196
		printk(KERN_WARNING "Interrupt generation not supported\n");
1197 1198 1199
		return -EINVAL;
	}

1200
	return bridge->irq_generate(bridge, level, statid);
1201
}
1202
EXPORT_SYMBOL(vme_irq_generate);
1203

1204 1205 1206
/*
 * Request the location monitor, return resource or NULL
 */
1207
struct vme_resource *vme_lm_request(struct vme_dev *vdev)
1208 1209
{
	struct vme_bridge *bridge;
1210 1211 1212 1213
	struct list_head *lm_pos = NULL;
	struct vme_lm_resource *allocated_lm = NULL;
	struct vme_lm_resource *lm = NULL;
	struct vme_resource *resource = NULL;
1214

1215
	bridge = vdev->bridge;
1216 1217
	if (bridge == NULL) {
		printk(KERN_ERR "Can't find VME bus\n");
1218 1219 1220 1221
		goto err_bus;
	}

	/* Loop through DMA resources */
1222
	list_for_each(lm_pos, &bridge->lm_resources) {
1223 1224 1225 1226
		lm = list_entry(lm_pos,
			struct vme_lm_resource, list);

		if (lm == NULL) {
1227
			printk(KERN_ERR "Registered NULL Location Monitor resource\n");
1228 1229 1230 1231
			continue;
		}

		/* Find an unlocked controller */
1232
		mutex_lock(&lm->mtx);
1233 1234
		if (lm->locked == 0) {
			lm->locked = 1;
1235
			mutex_unlock(&lm->mtx);
1236 1237 1238
			allocated_lm = lm;
			break;
		}
1239
		mutex_unlock(&lm->mtx);
1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251
	}

	/* Check to see if we found a resource */
	if (allocated_lm == NULL)
		goto err_lm;

	resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
	if (resource == NULL) {
		printk(KERN_ERR "Unable to allocate resource structure\n");
		goto err_alloc;
	}
	resource->type = VME_LM;
1252
	resource->entry = &allocated_lm->list;
1253 1254 1255 1256 1257

	return resource;

err_alloc:
	/* Unlock image */
1258
	mutex_lock(&lm->mtx);
1259
	lm->locked = 0;
1260
	mutex_unlock(&lm->mtx);
1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282
err_lm:
err_bus:
	return NULL;
}
EXPORT_SYMBOL(vme_lm_request);

int vme_lm_count(struct vme_resource *resource)
{
	struct vme_lm_resource *lm;

	if (resource->type != VME_LM) {
		printk(KERN_ERR "Not a Location Monitor resource\n");
		return -EINVAL;
	}

	lm = list_entry(resource->entry, struct vme_lm_resource, list);

	return lm->monitors;
}
EXPORT_SYMBOL(vme_lm_count);

int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
M
Martyn Welch 已提交
1283
	u32 aspace, u32 cycle)
1284 1285 1286 1287 1288 1289
{
	struct vme_bridge *bridge = find_bridge(resource);
	struct vme_lm_resource *lm;

	if (resource->type != VME_LM) {
		printk(KERN_ERR "Not a Location Monitor resource\n");
1290 1291 1292
		return -EINVAL;
	}

1293 1294
	lm = list_entry(resource->entry, struct vme_lm_resource, list);

1295
	if (bridge->lm_set == NULL) {
1296
		printk(KERN_ERR "vme_lm_set not supported\n");
1297 1298 1299
		return -EINVAL;
	}

1300
	return bridge->lm_set(lm, lm_base, aspace, cycle);
1301 1302 1303
}
EXPORT_SYMBOL(vme_lm_set);

1304
int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
M
Martyn Welch 已提交
1305
	u32 *aspace, u32 *cycle)
1306
{
1307 1308
	struct vme_bridge *bridge = find_bridge(resource);
	struct vme_lm_resource *lm;
1309

1310 1311
	if (resource->type != VME_LM) {
		printk(KERN_ERR "Not a Location Monitor resource\n");
1312 1313 1314
		return -EINVAL;
	}

1315 1316
	lm = list_entry(resource->entry, struct vme_lm_resource, list);

1317
	if (bridge->lm_get == NULL) {
1318
		printk(KERN_ERR "vme_lm_get not supported\n");
1319 1320 1321
		return -EINVAL;
	}

1322
	return bridge->lm_get(lm, lm_base, aspace, cycle);
1323 1324 1325
}
EXPORT_SYMBOL(vme_lm_get);

1326
int vme_lm_attach(struct vme_resource *resource, int monitor,
1327
	void (*callback)(void *), void *data)
1328
{
1329 1330
	struct vme_bridge *bridge = find_bridge(resource);
	struct vme_lm_resource *lm;
1331

1332 1333
	if (resource->type != VME_LM) {
		printk(KERN_ERR "Not a Location Monitor resource\n");
1334 1335 1336
		return -EINVAL;
	}

1337 1338
	lm = list_entry(resource->entry, struct vme_lm_resource, list);

1339
	if (bridge->lm_attach == NULL) {
1340
		printk(KERN_ERR "vme_lm_attach not supported\n");
1341 1342 1343
		return -EINVAL;
	}

1344
	return bridge->lm_attach(lm, monitor, callback, data);
1345 1346 1347
}
EXPORT_SYMBOL(vme_lm_attach);

1348
int vme_lm_detach(struct vme_resource *resource, int monitor)
1349
{
1350 1351
	struct vme_bridge *bridge = find_bridge(resource);
	struct vme_lm_resource *lm;
1352

1353 1354
	if (resource->type != VME_LM) {
		printk(KERN_ERR "Not a Location Monitor resource\n");
1355 1356 1357
		return -EINVAL;
	}

1358 1359
	lm = list_entry(resource->entry, struct vme_lm_resource, list);

1360
	if (bridge->lm_detach == NULL) {
1361
		printk(KERN_ERR "vme_lm_detach not supported\n");
1362 1363 1364
		return -EINVAL;
	}

1365
	return bridge->lm_detach(lm, monitor);
1366 1367 1368
}
EXPORT_SYMBOL(vme_lm_detach);

1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379
void vme_lm_free(struct vme_resource *resource)
{
	struct vme_lm_resource *lm;

	if (resource->type != VME_LM) {
		printk(KERN_ERR "Not a Location Monitor resource\n");
		return;
	}

	lm = list_entry(resource->entry, struct vme_lm_resource, list);

1380
	mutex_lock(&lm->mtx);
1381

1382 1383 1384 1385
	/* XXX
	 * Check to see that there aren't any callbacks still attached, if
	 * there are we should probably be detaching them!
	 */
1386 1387 1388

	lm->locked = 0;

1389
	mutex_unlock(&lm->mtx);
1390 1391

	kfree(resource);
1392 1393 1394
}
EXPORT_SYMBOL(vme_lm_free);

1395
int vme_slot_num(struct vme_dev *vdev)
1396 1397 1398
{
	struct vme_bridge *bridge;

1399
	bridge = vdev->bridge;
1400 1401 1402 1403 1404 1405
	if (bridge == NULL) {
		printk(KERN_ERR "Can't find VME bus\n");
		return -EINVAL;
	}

	if (bridge->slot_get == NULL) {
1406
		printk(KERN_WARNING "vme_slot_num not supported\n");
1407 1408 1409
		return -EINVAL;
	}

1410
	return bridge->slot_get(bridge);
1411
}
1412
EXPORT_SYMBOL(vme_slot_num);
1413

1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426
int vme_bus_num(struct vme_dev *vdev)
{
	struct vme_bridge *bridge;

	bridge = vdev->bridge;
	if (bridge == NULL) {
		pr_err("Can't find VME bus\n");
		return -EINVAL;
	}

	return bridge->num;
}
EXPORT_SYMBOL(vme_bus_num);
1427 1428 1429

/* - Bridge Registration --------------------------------------------------- */

1430 1431 1432 1433 1434
static void vme_dev_release(struct device *dev)
{
	kfree(dev_to_vme_dev(dev));
}

1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448
/* Common bridge initialization */
struct vme_bridge *vme_init_bridge(struct vme_bridge *bridge)
{
	INIT_LIST_HEAD(&bridge->vme_error_handlers);
	INIT_LIST_HEAD(&bridge->master_resources);
	INIT_LIST_HEAD(&bridge->slave_resources);
	INIT_LIST_HEAD(&bridge->dma_resources);
	INIT_LIST_HEAD(&bridge->lm_resources);
	mutex_init(&bridge->irq_mtx);

	return bridge;
}
EXPORT_SYMBOL(vme_init_bridge);

1449
int vme_register_bridge(struct vme_bridge *bridge)
1450 1451
{
	int i;
1452
	int ret = -1;
1453

1454
	mutex_lock(&vme_buses_lock);
1455
	for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) {
1456 1457 1458
		if ((vme_bus_numbers & (1 << i)) == 0) {
			vme_bus_numbers |= (1 << i);
			bridge->num = i;
1459
			INIT_LIST_HEAD(&bridge->devices);
1460 1461
			list_add_tail(&bridge->bus_list, &vme_bus_list);
			ret = 0;
1462 1463 1464
			break;
		}
	}
1465
	mutex_unlock(&vme_buses_lock);
1466

1467
	return ret;
1468
}
1469
EXPORT_SYMBOL(vme_register_bridge);
1470

1471
void vme_unregister_bridge(struct vme_bridge *bridge)
1472
{
1473 1474 1475
	struct vme_dev *vdev;
	struct vme_dev *tmp;

1476 1477
	mutex_lock(&vme_buses_lock);
	vme_bus_numbers &= ~(1 << bridge->num);
1478 1479 1480 1481 1482
	list_for_each_entry_safe(vdev, tmp, &bridge->devices, bridge_list) {
		list_del(&vdev->drv_list);
		list_del(&vdev->bridge_list);
		device_unregister(&vdev->dev);
	}
1483 1484
	list_del(&bridge->bus_list);
	mutex_unlock(&vme_buses_lock);
1485
}
1486
EXPORT_SYMBOL(vme_unregister_bridge);
1487

1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501
/* - Driver Registration --------------------------------------------------- */

static int __vme_register_driver_bus(struct vme_driver *drv,
	struct vme_bridge *bridge, unsigned int ndevs)
{
	int err;
	unsigned int i;
	struct vme_dev *vdev;
	struct vme_dev *tmp;

	for (i = 0; i < ndevs; i++) {
		vdev = kzalloc(sizeof(struct vme_dev), GFP_KERNEL);
		if (!vdev) {
			err = -ENOMEM;
1502 1503
			goto err_devalloc;
		}
1504
		vdev->num = i;
1505
		vdev->bridge = bridge;
1506 1507
		vdev->dev.platform_data = drv;
		vdev->dev.release = vme_dev_release;
1508 1509
		vdev->dev.parent = bridge->parent;
		vdev->dev.bus = &vme_bus_type;
1510 1511
		dev_set_name(&vdev->dev, "%s.%u-%u", drv->name, bridge->num,
			vdev->num);
1512

1513 1514
		err = device_register(&vdev->dev);
		if (err)
1515 1516
			goto err_reg;

1517 1518 1519 1520 1521 1522 1523
		if (vdev->dev.platform_data) {
			list_add_tail(&vdev->drv_list, &drv->devices);
			list_add_tail(&vdev->bridge_list, &bridge->devices);
		} else
			device_unregister(&vdev->dev);
	}
	return 0;
1524 1525

err_reg:
1526
	put_device(&vdev->dev);
1527
	kfree(vdev);
1528
err_devalloc:
1529 1530 1531
	list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) {
		list_del(&vdev->drv_list);
		list_del(&vdev->bridge_list);
1532
		device_unregister(&vdev->dev);
1533
	}
1534
	return err;
1535 1536
}

1537
static int __vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
1538
{
1539 1540
	struct vme_bridge *bridge;
	int err = 0;
1541

1542 1543 1544 1545 1546 1547 1548
	mutex_lock(&vme_buses_lock);
	list_for_each_entry(bridge, &vme_bus_list, bus_list) {
		/*
		 * This cannot cause trouble as we already have vme_buses_lock
		 * and if the bridge is removed, it will have to go through
		 * vme_unregister_bridge() to do it (which calls remove() on
		 * the bridge which in turn tries to acquire vme_buses_lock and
1549
		 * will have to wait).
1550 1551 1552 1553
		 */
		err = __vme_register_driver_bus(drv, bridge, ndevs);
		if (err)
			break;
1554
	}
1555 1556
	mutex_unlock(&vme_buses_lock);
	return err;
1557 1558
}

1559
int vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
1560
{
1561 1562
	int err;

1563 1564
	drv->driver.name = drv->name;
	drv->driver.bus = &vme_bus_type;
1565 1566 1567 1568 1569
	INIT_LIST_HEAD(&drv->devices);

	err = driver_register(&drv->driver);
	if (err)
		return err;
1570

1571 1572 1573 1574 1575
	err = __vme_register_driver(drv, ndevs);
	if (err)
		driver_unregister(&drv->driver);

	return err;
1576 1577 1578
}
EXPORT_SYMBOL(vme_register_driver);

1579
void vme_unregister_driver(struct vme_driver *drv)
1580
{
1581 1582 1583 1584 1585 1586 1587 1588 1589 1590
	struct vme_dev *dev, *dev_tmp;

	mutex_lock(&vme_buses_lock);
	list_for_each_entry_safe(dev, dev_tmp, &drv->devices, drv_list) {
		list_del(&dev->drv_list);
		list_del(&dev->bridge_list);
		device_unregister(&dev->dev);
	}
	mutex_unlock(&vme_buses_lock);

1591 1592 1593 1594 1595 1596 1597 1598
	driver_unregister(&drv->driver);
}
EXPORT_SYMBOL(vme_unregister_driver);

/* - Bus Registration ------------------------------------------------------ */

static int vme_bus_match(struct device *dev, struct device_driver *drv)
{
1599
	struct vme_driver *vme_drv;
1600

1601
	vme_drv = container_of(drv, struct vme_driver, driver);
1602

1603 1604
	if (dev->platform_data == vme_drv) {
		struct vme_dev *vdev = dev_to_vme_dev(dev);
1605

1606 1607
		if (vme_drv->match && vme_drv->match(vdev))
			return 1;
1608

1609
		dev->platform_data = NULL;
1610 1611 1612 1613 1614 1615 1616
	}
	return 0;
}

static int vme_bus_probe(struct device *dev)
{
	int retval = -ENODEV;
1617 1618
	struct vme_driver *driver;
	struct vme_dev *vdev = dev_to_vme_dev(dev);
1619

1620
	driver = dev->platform_data;
1621

1622
	if (driver->probe != NULL)
1623
		retval = driver->probe(vdev);
1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634

	return retval;
}

struct bus_type vme_bus_type = {
	.name = "vme",
	.match = vme_bus_match,
	.probe = vme_bus_probe,
};
EXPORT_SYMBOL(vme_bus_type);

1635
static int __init vme_init(void)
1636 1637 1638
{
	return bus_register(&vme_bus_type);
}
1639
subsys_initcall(vme_init);