main.c 16.5 KB
Newer Older
1 2 3 4 5 6 7 8
/*
 * Broadcom specific AMBA
 * Bus subsystem
 *
 * Licensed under the GNU/GPL. See COPYING for details.
 */

#include "bcma_private.h"
9
#include <linux/module.h>
10
#include <linux/mmc/sdio_func.h>
11
#include <linux/platform_device.h>
12
#include <linux/pci.h>
13
#include <linux/bcma/bcma.h>
14
#include <linux/slab.h>
15
#include <linux/of_address.h>
H
Hauke Mehrtens 已提交
16
#include <linux/of_irq.h>
17
#include <linux/of_platform.h>
18 19 20 21

MODULE_DESCRIPTION("Broadcom's specific AMBA driver");
MODULE_LICENSE("GPL");

H
Hauke Mehrtens 已提交
22 23 24 25 26 27
/* contains the number the next bus should get. */
static unsigned int bcma_bus_next_num = 0;

/* bcma_buses_mutex locks the bcma_bus_next_num */
static DEFINE_MUTEX(bcma_buses_mutex);

28 29 30
static int bcma_bus_match(struct device *dev, struct device_driver *drv);
static int bcma_device_probe(struct device *dev);
static int bcma_device_remove(struct device *dev);
31
static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env);
32 33 34 35 36 37

static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf)
{
	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
	return sprintf(buf, "0x%03X\n", core->id.manuf);
}
38 39
static DEVICE_ATTR_RO(manuf);

40 41 42 43 44
static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf)
{
	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
	return sprintf(buf, "0x%03X\n", core->id.id);
}
45 46
static DEVICE_ATTR_RO(id);

47 48 49 50 51
static ssize_t rev_show(struct device *dev, struct device_attribute *attr, char *buf)
{
	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
	return sprintf(buf, "0x%02X\n", core->id.rev);
}
52 53
static DEVICE_ATTR_RO(rev);

54 55 56 57 58
static ssize_t class_show(struct device *dev, struct device_attribute *attr, char *buf)
{
	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
	return sprintf(buf, "0x%X\n", core->id.class);
}
59 60 61 62 63 64 65 66
static DEVICE_ATTR_RO(class);

static struct attribute *bcma_device_attrs[] = {
	&dev_attr_manuf.attr,
	&dev_attr_id.attr,
	&dev_attr_rev.attr,
	&dev_attr_class.attr,
	NULL,
67
};
68
ATTRIBUTE_GROUPS(bcma_device);
69 70 71 72 73 74

static struct bus_type bcma_bus_type = {
	.name		= "bcma",
	.match		= bcma_bus_match,
	.probe		= bcma_device_probe,
	.remove		= bcma_device_remove,
75
	.uevent		= bcma_device_uevent,
76
	.dev_groups	= bcma_device_groups,
77 78
};

79 80 81 82 83 84 85
static u16 bcma_cc_core_id(struct bcma_bus *bus)
{
	if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706)
		return BCMA_CORE_4706_CHIPCOMMON;
	return BCMA_CORE_CHIPCOMMON;
}

86 87
struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
					u8 unit)
88 89 90 91 92 93 94 95 96
{
	struct bcma_device *core;

	list_for_each_entry(core, &bus->cores, list) {
		if (core->id.id == coreid && core->core_unit == unit)
			return core;
	}
	return NULL;
}
97
EXPORT_SYMBOL_GPL(bcma_find_core_unit);
98

R
Rafał Miłecki 已提交
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value,
		     int timeout)
{
	unsigned long deadline = jiffies + timeout;
	u32 val;

	do {
		val = bcma_read32(core, reg);
		if ((val & mask) == value)
			return true;
		cpu_relax();
		udelay(10);
	} while (!time_after_eq(jiffies, deadline));

	bcma_warn(core->bus, "Timeout waiting for register 0x%04X!\n", reg);

	return false;
}

118 119 120
static void bcma_release_core_dev(struct device *dev)
{
	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
H
Hauke Mehrtens 已提交
121 122 123 124
	if (core->io_addr)
		iounmap(core->io_addr);
	if (core->io_wrap)
		iounmap(core->io_wrap);
125 126 127
	kfree(core);
}

128 129 130 131 132 133 134 135 136 137 138
static bool bcma_is_core_needed_early(u16 core_id)
{
	switch (core_id) {
	case BCMA_CORE_NS_NAND:
	case BCMA_CORE_NS_QSPI:
		return true;
	}

	return false;
}

139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
static struct device_node *bcma_of_find_child_device(struct platform_device *parent,
						     struct bcma_device *core)
{
	struct device_node *node;
	u64 size;
	const __be32 *reg;

	if (!parent || !parent->dev.of_node)
		return NULL;

	for_each_child_of_node(parent->dev.of_node, node) {
		reg = of_get_address(node, 0, &size, NULL);
		if (!reg)
			continue;
		if (of_translate_address(node, reg) == core->addr)
			return node;
	}
	return NULL;
}

H
Hauke Mehrtens 已提交
159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
static int bcma_of_irq_parse(struct platform_device *parent,
			     struct bcma_device *core,
			     struct of_phandle_args *out_irq, int num)
{
	__be32 laddr[1];
	int rc;

	if (core->dev.of_node) {
		rc = of_irq_parse_one(core->dev.of_node, num, out_irq);
		if (!rc)
			return rc;
	}

	out_irq->np = parent->dev.of_node;
	out_irq->args_count = 1;
	out_irq->args[0] = num;

	laddr[0] = cpu_to_be32(core->addr);
	return of_irq_parse_raw(laddr, out_irq);
}

static unsigned int bcma_of_get_irq(struct platform_device *parent,
				    struct bcma_device *core, int num)
{
	struct of_phandle_args out_irq;
	int ret;

186
	if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent || !parent->dev.of_node)
H
Hauke Mehrtens 已提交
187 188 189 190 191 192 193 194 195 196 197 198
		return 0;

	ret = bcma_of_irq_parse(parent, core, &out_irq, num);
	if (ret) {
		bcma_debug(core->bus, "bcma_of_get_irq() failed with rc=%d\n",
			   ret);
		return 0;
	}

	return irq_create_of_mapping(&out_irq);
}

199 200 201 202 203
static void bcma_of_fill_device(struct platform_device *parent,
				struct bcma_device *core)
{
	struct device_node *node;

204 205 206
	if (!IS_ENABLED(CONFIG_OF_IRQ))
		return;

207 208 209
	node = bcma_of_find_child_device(parent, core);
	if (node)
		core->dev.of_node = node;
H
Hauke Mehrtens 已提交
210 211

	core->irq = bcma_of_get_irq(parent, core, 0);
212 213
}

214 215 216 217 218 219 220 221 222 223 224 225 226
unsigned int bcma_core_irq(struct bcma_device *core, int num)
{
	struct bcma_bus *bus = core->bus;
	unsigned int mips_irq;

	switch (bus->hosttype) {
	case BCMA_HOSTTYPE_PCI:
		return bus->host_pci->irq;
	case BCMA_HOSTTYPE_SOC:
		if (bus->drv_mips.core && num == 0) {
			mips_irq = bcma_core_mips_irq(core);
			return mips_irq <= 4 ? mips_irq + 2 : 0;
		}
H
Hauke Mehrtens 已提交
227 228 229
		if (bus->host_pdev)
			return bcma_of_get_irq(bus->host_pdev, core, num);
		return 0;
230 231 232 233 234 235 236 237
	case BCMA_HOSTTYPE_SDIO:
		return 0;
	}

	return 0;
}
EXPORT_SYMBOL(bcma_core_irq);

238
void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
239 240 241 242 243 244 245 246 247 248 249 250 251
{
	core->dev.release = bcma_release_core_dev;
	core->dev.bus = &bcma_bus_type;
	dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index);

	switch (bus->hosttype) {
	case BCMA_HOSTTYPE_PCI:
		core->dev.parent = &bus->host_pci->dev;
		core->dma_dev = &bus->host_pci->dev;
		core->irq = bus->host_pci->irq;
		break;
	case BCMA_HOSTTYPE_SOC:
		core->dev.dma_mask = &core->dev.coherent_dma_mask;
252 253 254 255 256 257 258
		if (bus->host_pdev) {
			core->dma_dev = &bus->host_pdev->dev;
			core->dev.parent = &bus->host_pdev->dev;
			bcma_of_fill_device(bus->host_pdev, core);
		} else {
			core->dma_dev = &core->dev;
		}
259 260 261 262
		break;
	case BCMA_HOSTTYPE_SDIO:
		break;
	}
263 264
}

265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
struct device *bcma_bus_get_host_dev(struct bcma_bus *bus)
{
	switch (bus->hosttype) {
	case BCMA_HOSTTYPE_PCI:
		if (bus->host_pci)
			return &bus->host_pci->dev;
		else
			return NULL;
	case BCMA_HOSTTYPE_SOC:
		if (bus->host_pdev)
			return &bus->host_pdev->dev;
		else
			return NULL;
	case BCMA_HOSTTYPE_SDIO:
		if (bus->host_sdio)
			return &bus->host_sdio->dev;
		else
			return NULL;
	}
	return NULL;
}

287 288 289 290 291 292 293 294 295 296 297 298
void bcma_init_bus(struct bcma_bus *bus)
{
	mutex_lock(&bcma_buses_mutex);
	bus->num = bcma_bus_next_num++;
	mutex_unlock(&bcma_buses_mutex);

	INIT_LIST_HEAD(&bus->cores);
	bus->nr_cores = 0;

	bcma_detect_chip(bus);
}

299 300 301
static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core)
{
	int err;
302 303 304 305 306 307 308 309 310 311 312 313

	err = device_register(&core->dev);
	if (err) {
		bcma_err(bus, "Could not register dev for core 0x%03X\n",
			 core->id.id);
		put_device(&core->dev);
		return;
	}
	core->dev_registered = true;
}

static int bcma_register_devices(struct bcma_bus *bus)
314 315
{
	struct bcma_device *core;
316
	int err;
317 318 319 320

	list_for_each_entry(core, &bus->cores, list) {
		/* We support that cores ourself */
		switch (core->id.id) {
321
		case BCMA_CORE_4706_CHIPCOMMON:
322
		case BCMA_CORE_CHIPCOMMON:
323
		case BCMA_CORE_NS_CHIPCOMMON_B:
324 325
		case BCMA_CORE_PCI:
		case BCMA_CORE_PCIE:
326
		case BCMA_CORE_PCIE2:
H
Hauke Mehrtens 已提交
327
		case BCMA_CORE_MIPS_74K:
328
		case BCMA_CORE_4706_MAC_GBIT_COMMON:
329 330 331
			continue;
		}

332 333 334 335
		/* Early cores were already registered */
		if (bcma_is_core_needed_early(core->id.id))
			continue;

336 337 338 339 340
		/* Only first GMAC core on BCM4706 is connected and working */
		if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
		    core->core_unit > 0)
			continue;

341
		bcma_register_core(bus, core);
342 343
	}

344
#ifdef CONFIG_BCMA_PFLASH
345 346 347 348 349 350 351
	if (bus->drv_cc.pflash.present) {
		err = platform_device_register(&bcma_pflash_dev);
		if (err)
			bcma_err(bus, "Error registering parallel flash\n");
	}
#endif

352 353 354 355 356 357 358 359
#ifdef CONFIG_BCMA_SFLASH
	if (bus->drv_cc.sflash.present) {
		err = platform_device_register(&bcma_sflash_dev);
		if (err)
			bcma_err(bus, "Error registering serial flash\n");
	}
#endif

360 361 362 363 364 365 366
#ifdef CONFIG_BCMA_NFLASH
	if (bus->drv_cc.nflash.present) {
		err = platform_device_register(&bcma_nflash_dev);
		if (err)
			bcma_err(bus, "Error registering NAND flash\n");
	}
#endif
H
Hauke Mehrtens 已提交
367 368 369 370 371
	err = bcma_gpio_init(&bus->drv_cc);
	if (err == -ENOTSUPP)
		bcma_debug(bus, "GPIO driver not activated\n");
	else if (err)
		bcma_err(bus, "Error registering GPIO driver: %i\n", err);
372

H
Hauke Mehrtens 已提交
373 374 375 376 377 378
	if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
		err = bcma_chipco_watchdog_register(&bus->drv_cc);
		if (err)
			bcma_err(bus, "Error registering watchdog driver\n");
	}

379 380 381
	return 0;
}

382
void bcma_unregister_cores(struct bcma_bus *bus)
383
{
P
Piotr Haber 已提交
384
	struct bcma_device *core, *tmp;
385

P
Piotr Haber 已提交
386
	list_for_each_entry_safe(core, tmp, &bus->cores, list) {
387 388
		if (!core->dev_registered)
			continue;
P
Piotr Haber 已提交
389
		list_del(&core->list);
390
		device_unregister(&core->dev);
391
	}
H
Hauke Mehrtens 已提交
392 393
	if (bus->hosttype == BCMA_HOSTTYPE_SOC)
		platform_device_unregister(bus->drv_cc.watchdog);
394 395 396 397 398 399

	/* Now noone uses internally-handled cores, we can free them */
	list_for_each_entry_safe(core, tmp, &bus->cores, list) {
		list_del(&core->list);
		kfree(core);
	}
400 401
}

402
int bcma_bus_register(struct bcma_bus *bus)
403 404 405
{
	int err;
	struct bcma_device *core;
406
	struct device *dev;
407 408 409 410

	/* Scan for devices (cores) */
	err = bcma_bus_scan(bus);
	if (err) {
411
		bcma_err(bus, "Failed to scan: %d\n", err);
412
		return err;
413 414
	}

H
Hauke Mehrtens 已提交
415 416 417 418 419 420
	/* Early init CC core */
	core = bcma_find_core(bus, bcma_cc_core_id(bus));
	if (core) {
		bus->drv_cc.core = core;
		bcma_core_chipcommon_early_init(&bus->drv_cc);
	}
421 422 423 424 425 426 427

	/* Early init PCIE core */
	core = bcma_find_core(bus, BCMA_CORE_PCIE);
	if (core) {
		bus->drv_pci[0].core = core;
		bcma_core_pci_early_init(&bus->drv_pci[0]);
	}
428

429
	dev = bcma_bus_get_host_dev(bus);
430 431
	if (dev) {
		of_platform_default_populate(dev->of_node, NULL, dev);
432
	}
H
Hauke Mehrtens 已提交
433

434 435 436 437 438 439
	/* Cores providing flash access go before SPROM init */
	list_for_each_entry(core, &bus->cores, list) {
		if (bcma_is_core_needed_early(core->id.id))
			bcma_register_core(bus, core);
	}

H
Hauke Mehrtens 已提交
440 441 442 443 444 445 446
	/* Try to get SPROM */
	err = bcma_sprom_get(bus);
	if (err == -ENOENT) {
		bcma_err(bus, "No SPROM available\n");
	} else if (err)
		bcma_err(bus, "Failed to get SPROM: %d\n", err);

447
	/* Init CC core */
448
	core = bcma_find_core(bus, bcma_cc_core_id(bus));
449 450 451 452 453
	if (core) {
		bus->drv_cc.core = core;
		bcma_core_chipcommon_init(&bus->drv_cc);
	}

454 455 456 457 458 459 460
	/* Init CC core */
	core = bcma_find_core(bus, BCMA_CORE_NS_CHIPCOMMON_B);
	if (core) {
		bus->drv_cc_b.core = core;
		bcma_core_chipcommon_b_init(&bus->drv_cc_b);
	}

H
Hauke Mehrtens 已提交
461 462 463 464 465 466 467
	/* Init MIPS core */
	core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
	if (core) {
		bus->drv_mips.core = core;
		bcma_core_mips_init(&bus->drv_mips);
	}

468
	/* Init PCIE core */
469 470 471 472 473 474 475 476
	core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 0);
	if (core) {
		bus->drv_pci[0].core = core;
		bcma_core_pci_init(&bus->drv_pci[0]);
	}

	/* Init PCIE core */
	core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 1);
477
	if (core) {
478 479
		bus->drv_pci[1].core = core;
		bcma_core_pci_init(&bus->drv_pci[1]);
480 481
	}

482 483 484 485 486 487 488
	/* Init PCIe Gen 2 core */
	core = bcma_find_core_unit(bus, BCMA_CORE_PCIE2, 0);
	if (core) {
		bus->drv_pcie2.core = core;
		bcma_core_pcie2_init(&bus->drv_pcie2);
	}

489 490 491 492 493 494 495
	/* Init GBIT MAC COMMON core */
	core = bcma_find_core(bus, BCMA_CORE_4706_MAC_GBIT_COMMON);
	if (core) {
		bus->drv_gmac_cmn.core = core;
		bcma_core_gmac_cmn_init(&bus->drv_gmac_cmn);
	}

496
	/* Register found cores */
497
	bcma_register_devices(bus);
498

499
	bcma_info(bus, "Bus registered\n");
500 501 502 503 504 505

	return 0;
}

void bcma_bus_unregister(struct bcma_bus *bus)
{
506 507 508 509 510 511 512
	int err;

	err = bcma_gpio_unregister(&bus->drv_cc);
	if (err == -EBUSY)
		bcma_err(bus, "Some GPIOs are still in use.\n");
	else if (err)
		bcma_err(bus, "Can not unregister GPIO driver: %i\n", err);
513

514 515
	bcma_core_chipcommon_b_free(&bus->drv_cc_b);

516 517 518
	bcma_unregister_cores(bus);
}

519 520 521 522 523 524
/*
 * This is a special version of bus registration function designed for SoCs.
 * It scans bus and performs basic initialization of main cores only.
 * Please note it requires memory allocation, however it won't try to sleep.
 */
int __init bcma_bus_early_register(struct bcma_bus *bus)
525 526 527 528
{
	int err;
	struct bcma_device *core;

529 530
	/* Scan for devices (cores) */
	err = bcma_bus_scan(bus);
531
	if (err) {
532
		bcma_err(bus, "Failed to scan bus: %d\n", err);
533 534 535
		return -1;
	}

536
	/* Early init CC core */
537
	core = bcma_find_core(bus, bcma_cc_core_id(bus));
538 539
	if (core) {
		bus->drv_cc.core = core;
540
		bcma_core_chipcommon_early_init(&bus->drv_cc);
541 542
	}

543
	/* Early init MIPS core */
H
Hauke Mehrtens 已提交
544 545 546
	core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
	if (core) {
		bus->drv_mips.core = core;
547
		bcma_core_mips_early_init(&bus->drv_mips);
H
Hauke Mehrtens 已提交
548 549
	}

550
	bcma_info(bus, "Early bus registered\n");
551 552 553 554

	return 0;
}

555
#ifdef CONFIG_PM
556 557
int bcma_bus_suspend(struct bcma_bus *bus)
{
558 559 560 561 562 563 564 565 566 567
	struct bcma_device *core;

	list_for_each_entry(core, &bus->cores, list) {
		struct device_driver *drv = core->dev.driver;
		if (drv) {
			struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
			if (adrv->suspend)
				adrv->suspend(core);
		}
	}
568 569 570
	return 0;
}

571 572 573 574 575
int bcma_bus_resume(struct bcma_bus *bus)
{
	struct bcma_device *core;

	/* Init CC core */
576
	if (bus->drv_cc.core) {
577 578 579 580
		bus->drv_cc.setup_done = false;
		bcma_core_chipcommon_init(&bus->drv_cc);
	}

581 582 583 584 585 586 587 588 589
	list_for_each_entry(core, &bus->cores, list) {
		struct device_driver *drv = core->dev.driver;
		if (drv) {
			struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
			if (adrv->resume)
				adrv->resume(core);
		}
	}

590 591 592 593
	return 0;
}
#endif

594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651
int __bcma_driver_register(struct bcma_driver *drv, struct module *owner)
{
	drv->drv.name = drv->name;
	drv->drv.bus = &bcma_bus_type;
	drv->drv.owner = owner;

	return driver_register(&drv->drv);
}
EXPORT_SYMBOL_GPL(__bcma_driver_register);

void bcma_driver_unregister(struct bcma_driver *drv)
{
	driver_unregister(&drv->drv);
}
EXPORT_SYMBOL_GPL(bcma_driver_unregister);

static int bcma_bus_match(struct device *dev, struct device_driver *drv)
{
	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
	struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
	const struct bcma_device_id *cid = &core->id;
	const struct bcma_device_id *did;

	for (did = adrv->id_table; did->manuf || did->id || did->rev; did++) {
	    if ((did->manuf == cid->manuf || did->manuf == BCMA_ANY_MANUF) &&
		(did->id == cid->id || did->id == BCMA_ANY_ID) &&
		(did->rev == cid->rev || did->rev == BCMA_ANY_REV) &&
		(did->class == cid->class || did->class == BCMA_ANY_CLASS))
			return 1;
	}
	return 0;
}

static int bcma_device_probe(struct device *dev)
{
	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
	struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver,
					       drv);
	int err = 0;

	if (adrv->probe)
		err = adrv->probe(core);

	return err;
}

static int bcma_device_remove(struct device *dev)
{
	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
	struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver,
					       drv);

	if (adrv->remove)
		adrv->remove(core);

	return 0;
}

652 653 654 655 656 657 658 659 660 661
static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env)
{
	struct bcma_device *core = container_of(dev, struct bcma_device, dev);

	return add_uevent_var(env,
			      "MODALIAS=bcma:m%04Xid%04Xrev%02Xcl%02X",
			      core->id.manuf, core->id.id,
			      core->id.rev, core->id.class);
}

662 663 664 665 666 667 668 669
static unsigned int bcma_bus_registered;

/*
 * If built-in, bus has to be registered early, before any driver calls
 * bcma_driver_register.
 * Otherwise registering driver would trigger BUG in driver_register.
 */
static int __init bcma_init_bus_register(void)
670 671 672
{
	int err;

673 674 675
	if (bcma_bus_registered)
		return 0;

676
	err = bus_register(&bcma_bus_type);
677 678 679 680 681 682 683 684 685 686 687 688 689 690 691
	if (!err)
		bcma_bus_registered = 1;

	return err;
}
#ifndef MODULE
fs_initcall(bcma_init_bus_register);
#endif

/* Main initialization has to be done with SPI/mtd/NAND/SPROM available */
static int __init bcma_modinit(void)
{
	int err;

	err = bcma_init_bus_register();
692 693 694
	if (err)
		return err;

695 696 697 698 699
	err = bcma_host_soc_register_driver();
	if (err) {
		pr_err("SoC host initialization failed\n");
		err = 0;
	}
700 701 702 703 704 705 706 707 708 709
#ifdef CONFIG_BCMA_HOST_PCI
	err = bcma_host_pci_init();
	if (err) {
		pr_err("PCI host initialization failed\n");
		err = 0;
	}
#endif

	return err;
}
710
module_init(bcma_modinit);
711 712 713 714 715 716

static void __exit bcma_modexit(void)
{
#ifdef CONFIG_BCMA_HOST_PCI
	bcma_host_pci_exit();
#endif
717
	bcma_host_soc_unregister_driver();
718 719 720
	bus_unregister(&bcma_bus_type);
}
module_exit(bcma_modexit)