core-device.c 33.3 KB
Newer Older
1 2
/*
 * Device probing and sysfs code.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 *
 * Copyright (C) 2005-2006  Kristian Hoegsberg <krh@bitplanet.net>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software Foundation,
 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 */

21
#include <linux/bug.h>
22
#include <linux/ctype.h>
23
#include <linux/delay.h>
24 25
#include <linux/device.h>
#include <linux/errno.h>
26 27
#include <linux/firewire.h>
#include <linux/firewire-constants.h>
28
#include <linux/idr.h>
29
#include <linux/jiffies.h>
30 31
#include <linux/kobject.h>
#include <linux/list.h>
32
#include <linux/mod_devicetable.h>
S
Stefan Richter 已提交
33
#include <linux/module.h>
34
#include <linux/mutex.h>
35 36
#include <linux/rwsem.h>
#include <linux/semaphore.h>
J
Jay Fenlason 已提交
37
#include <linux/spinlock.h>
38 39 40
#include <linux/string.h>
#include <linux/workqueue.h>

S
Stefan Richter 已提交
41 42
#include <asm/atomic.h>
#include <asm/byteorder.h>
43
#include <asm/system.h>
44

45
#include "core.h"
46

47
void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p)
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
{
	ci->p = p + 1;
	ci->end = ci->p + (p[0] >> 16);
}
EXPORT_SYMBOL(fw_csr_iterator_init);

int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value)
{
	*key = *ci->p >> 24;
	*value = *ci->p & 0xffffff;

	return ci->p++ < ci->end;
}
EXPORT_SYMBOL(fw_csr_iterator_next);

63
static const u32 *search_leaf(const u32 *directory, int search_key)
64 65 66 67 68 69 70 71 72
{
	struct fw_csr_iterator ci;
	int last_key = 0, key, value;

	fw_csr_iterator_init(&ci, directory);
	while (fw_csr_iterator_next(&ci, &key, &value)) {
		if (last_key == search_key &&
		    key == (CSR_DESCRIPTOR | CSR_LEAF))
			return ci.p - 1 + value;
73

74 75
		last_key = key;
	}
76

77 78 79
	return NULL;
}

80
static int textual_leaf_to_string(const u32 *block, char *buf, size_t size)
81
{
82 83
	unsigned int quadlets, i;
	char c;
84 85 86 87

	if (!size || !buf)
		return -EINVAL;

88
	quadlets = min(block[0] >> 16, 256U);
89 90 91 92 93 94 95 96 97
	if (quadlets < 2)
		return -ENODATA;

	if (block[1] != 0 || block[2] != 0)
		/* unknown language/character set */
		return -ENODATA;

	block += 3;
	quadlets -= 2;
98 99
	for (i = 0; i < quadlets * 4 && i < size - 1; i++) {
		c = block[i / 4] >> (24 - 8 * (i % 4));
100 101
		if (c == '\0')
			break;
102
		buf[i] = c;
103
	}
104 105 106
	buf[i] = '\0';

	return i;
107 108 109 110
}

/**
 * fw_csr_string - reads a string from the configuration ROM
111
 * @directory: e.g. root directory or unit directory
112 113 114 115
 * @key: the key of the preceding directory entry
 * @buf: where to put the string
 * @size: size of @buf, in bytes
 *
116 117 118
 * The string is taken from a minimal ASCII text descriptor leaf after
 * the immediate entry with @key.  The string is zero-terminated.
 * Returns strlen(buf) or a negative error code.
119
 */
120
int fw_csr_string(const u32 *directory, int key, char *buf, size_t size)
121
{
122
	const u32 *leaf = search_leaf(directory, key);
123 124
	if (!leaf)
		return -ENOENT;
125

126 127 128 129
	return textual_leaf_to_string(leaf, buf, size);
}
EXPORT_SYMBOL(fw_csr_string);

130
static bool is_fw_unit(struct device *dev);
131

132
static int match_unit_directory(const u32 *directory, u32 match_flags,
133
				const struct ieee1394_device_id *id)
134 135 136 137 138 139 140
{
	struct fw_csr_iterator ci;
	int key, value, match;

	match = 0;
	fw_csr_iterator_init(&ci, directory);
	while (fw_csr_iterator_next(&ci, &key, &value)) {
141 142 143 144
		if (key == CSR_VENDOR && value == id->vendor_id)
			match |= IEEE1394_MATCH_VENDOR_ID;
		if (key == CSR_MODEL && value == id->model_id)
			match |= IEEE1394_MATCH_MODEL_ID;
145
		if (key == CSR_SPECIFIER_ID && value == id->specifier_id)
146
			match |= IEEE1394_MATCH_SPECIFIER_ID;
147
		if (key == CSR_VERSION && value == id->version)
148
			match |= IEEE1394_MATCH_VERSION;
149 150
	}

151
	return (match & match_flags) == match_flags;
152 153 154 155 156
}

static int fw_unit_match(struct device *dev, struct device_driver *drv)
{
	struct fw_unit *unit = fw_unit(dev);
157 158
	struct fw_device *device;
	const struct ieee1394_device_id *id;
159 160 161 162 163

	/* We only allow binding to fw_units. */
	if (!is_fw_unit(dev))
		return 0;

164
	device = fw_parent_device(unit);
165
	id = container_of(drv, struct fw_driver, driver)->id_table;
166

167
	for (; id->match_flags != 0; id++) {
168 169 170 171 172 173 174 175 176
		if (match_unit_directory(unit->directory, id->match_flags, id))
			return 1;

		/* Also check vendor ID in the root directory. */
		if ((id->match_flags & IEEE1394_MATCH_VENDOR_ID) &&
		    match_unit_directory(&device->config_rom[5],
				IEEE1394_MATCH_VENDOR_ID, id) &&
		    match_unit_directory(unit->directory, id->match_flags
				& ~IEEE1394_MATCH_VENDOR_ID, id))
177 178 179 180 181 182 183 184
			return 1;
	}

	return 0;
}

static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size)
{
185
	struct fw_device *device = fw_parent_device(unit);
186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
	struct fw_csr_iterator ci;

	int key, value;
	int vendor = 0;
	int model = 0;
	int specifier_id = 0;
	int version = 0;

	fw_csr_iterator_init(&ci, &device->config_rom[5]);
	while (fw_csr_iterator_next(&ci, &key, &value)) {
		switch (key) {
		case CSR_VENDOR:
			vendor = value;
			break;
		case CSR_MODEL:
			model = value;
			break;
		}
	}

	fw_csr_iterator_init(&ci, unit->directory);
	while (fw_csr_iterator_next(&ci, &key, &value)) {
		switch (key) {
		case CSR_SPECIFIER_ID:
			specifier_id = value;
			break;
		case CSR_VERSION:
			version = value;
			break;
		}
	}

	return snprintf(buffer, buffer_size,
			"ieee1394:ven%08Xmo%08Xsp%08Xver%08X",
			vendor, model, specifier_id, version);
}

223
static int fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env)
224 225 226 227
{
	struct fw_unit *unit = fw_unit(dev);
	char modalias[64];

228
	get_modalias(unit, modalias, sizeof(modalias));
229

230
	if (add_uevent_var(env, "MODALIAS=%s", modalias))
231 232 233 234 235 236
		return -ENOMEM;

	return 0;
}

struct bus_type fw_bus_type = {
237
	.name = "firewire",
238 239 240 241 242 243
	.match = fw_unit_match,
};
EXPORT_SYMBOL(fw_bus_type);

int fw_device_enable_phys_dma(struct fw_device *device)
{
244 245 246 247 248
	int generation = device->generation;

	/* device->node_id, accessed below, must not be older than generation */
	smp_rmb();

249 250
	return device->card->driver->enable_phys_dma(device->card,
						     device->node_id,
251
						     generation);
252 253 254
}
EXPORT_SYMBOL(fw_device_enable_phys_dma);

255 256 257 258 259
struct config_rom_attribute {
	struct device_attribute attr;
	u32 key;
};

260 261
static ssize_t show_immediate(struct device *dev,
			      struct device_attribute *dattr, char *buf)
262 263 264 265
{
	struct config_rom_attribute *attr =
		container_of(dattr, struct config_rom_attribute, attr);
	struct fw_csr_iterator ci;
266
	const u32 *dir;
267 268 269
	int key, value, ret = -ENOENT;

	down_read(&fw_device_rwsem);
270 271 272 273 274 275 276 277

	if (is_fw_unit(dev))
		dir = fw_unit(dev)->directory;
	else
		dir = fw_device(dev)->config_rom + 5;

	fw_csr_iterator_init(&ci, dir);
	while (fw_csr_iterator_next(&ci, &key, &value))
278 279 280 281 282 283 284
		if (attr->key == key) {
			ret = snprintf(buf, buf ? PAGE_SIZE : 0,
				       "0x%06x\n", value);
			break;
		}

	up_read(&fw_device_rwsem);
285

286
	return ret;
287 288 289 290 291
}

#define IMMEDIATE_ATTR(name, key)				\
	{ __ATTR(name, S_IRUGO, show_immediate, NULL), key }

292 293
static ssize_t show_text_leaf(struct device *dev,
			      struct device_attribute *dattr, char *buf)
294 295 296
{
	struct config_rom_attribute *attr =
		container_of(dattr, struct config_rom_attribute, attr);
297
	const u32 *dir;
298 299 300
	size_t bufsize;
	char dummy_buf[2];
	int ret;
301

302 303
	down_read(&fw_device_rwsem);

304 305 306 307 308
	if (is_fw_unit(dev))
		dir = fw_unit(dev)->directory;
	else
		dir = fw_device(dev)->config_rom + 5;

309 310 311 312 313
	if (buf) {
		bufsize = PAGE_SIZE - 1;
	} else {
		buf = dummy_buf;
		bufsize = 1;
314 315
	}

316
	ret = fw_csr_string(dir, attr->key, buf, bufsize);
317

318 319 320 321 322 323
	if (ret >= 0) {
		/* Strip trailing whitespace and add newline. */
		while (ret > 0 && isspace(buf[ret - 1]))
			ret--;
		strcpy(buf + ret, "\n");
		ret++;
324
	}
325

326
	up_read(&fw_device_rwsem);
327

328
	return ret;
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
}

#define TEXT_LEAF_ATTR(name, key)				\
	{ __ATTR(name, S_IRUGO, show_text_leaf, NULL), key }

static struct config_rom_attribute config_rom_attributes[] = {
	IMMEDIATE_ATTR(vendor, CSR_VENDOR),
	IMMEDIATE_ATTR(hardware_version, CSR_HARDWARE_VERSION),
	IMMEDIATE_ATTR(specifier_id, CSR_SPECIFIER_ID),
	IMMEDIATE_ATTR(version, CSR_VERSION),
	IMMEDIATE_ATTR(model, CSR_MODEL),
	TEXT_LEAF_ATTR(vendor_name, CSR_VENDOR),
	TEXT_LEAF_ATTR(model_name, CSR_MODEL),
	TEXT_LEAF_ATTR(hardware_version_name, CSR_HARDWARE_VERSION),
};

345 346 347
static void init_fw_attribute_group(struct device *dev,
				    struct device_attribute *attrs,
				    struct fw_attribute_group *group)
348 349
{
	struct device_attribute *attr;
350 351 352 353
	int i, j;

	for (j = 0; attrs[j].attr.name != NULL; j++)
		group->attrs[j] = &attrs[j].attr;
354 355 356 357 358

	for (i = 0; i < ARRAY_SIZE(config_rom_attributes); i++) {
		attr = &config_rom_attributes[i].attr;
		if (attr->show(dev, attr, NULL) < 0)
			continue;
359
		group->attrs[j++] = &attr->attr;
360 361
	}

362
	group->attrs[j] = NULL;
363 364 365
	group->groups[0] = &group->group;
	group->groups[1] = NULL;
	group->group.attrs = group->attrs;
366
	dev->groups = (const struct attribute_group **) group->groups;
367 368
}

369 370
static ssize_t modalias_show(struct device *dev,
			     struct device_attribute *attr, char *buf)
371 372 373 374 375 376 377 378 379 380
{
	struct fw_unit *unit = fw_unit(dev);
	int length;

	length = get_modalias(unit, buf, PAGE_SIZE);
	strcpy(buf + length, "\n");

	return length + 1;
}

381 382
static ssize_t rom_index_show(struct device *dev,
			      struct device_attribute *attr, char *buf)
383
{
384 385
	struct fw_device *device = fw_device(dev->parent);
	struct fw_unit *unit = fw_unit(dev);
386

387 388
	return snprintf(buf, PAGE_SIZE, "%d\n",
			(int)(unit->directory - device->config_rom));
389 390
}

391 392 393 394
static struct device_attribute fw_unit_attributes[] = {
	__ATTR_RO(modalias),
	__ATTR_RO(rom_index),
	__ATTR_NULL,
395 396
};

397 398
static ssize_t config_rom_show(struct device *dev,
			       struct device_attribute *attr, char *buf)
399
{
400
	struct fw_device *device = fw_device(dev);
401
	size_t length;
402

403 404 405 406
	down_read(&fw_device_rwsem);
	length = device->config_rom_length * 4;
	memcpy(buf, device->config_rom, length);
	up_read(&fw_device_rwsem);
407

408
	return length;
409 410
}

411 412
static ssize_t guid_show(struct device *dev,
			 struct device_attribute *attr, char *buf)
413 414
{
	struct fw_device *device = fw_device(dev);
415 416 417 418 419 420
	int ret;

	down_read(&fw_device_rwsem);
	ret = snprintf(buf, PAGE_SIZE, "0x%08x%08x\n",
		       device->config_rom[3], device->config_rom[4]);
	up_read(&fw_device_rwsem);
421

422
	return ret;
423 424
}

425
static int units_sprintf(char *buf, const u32 *directory)
426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
{
	struct fw_csr_iterator ci;
	int key, value;
	int specifier_id = 0;
	int version = 0;

	fw_csr_iterator_init(&ci, directory);
	while (fw_csr_iterator_next(&ci, &key, &value)) {
		switch (key) {
		case CSR_SPECIFIER_ID:
			specifier_id = value;
			break;
		case CSR_VERSION:
			version = value;
			break;
		}
	}

	return sprintf(buf, "0x%06x:0x%06x ", specifier_id, version);
}

static ssize_t units_show(struct device *dev,
			  struct device_attribute *attr, char *buf)
{
	struct fw_device *device = fw_device(dev);
	struct fw_csr_iterator ci;
	int key, value, i = 0;

	down_read(&fw_device_rwsem);
	fw_csr_iterator_init(&ci, &device->config_rom[5]);
	while (fw_csr_iterator_next(&ci, &key, &value)) {
		if (key != (CSR_UNIT | CSR_DIRECTORY))
			continue;
		i += units_sprintf(&buf[i], ci.p + value - 1);
		if (i >= PAGE_SIZE - (8 + 1 + 8 + 1))
			break;
	}
	up_read(&fw_device_rwsem);

	if (i)
		buf[i - 1] = '\n';

	return i;
}

471 472
static struct device_attribute fw_device_attributes[] = {
	__ATTR_RO(config_rom),
473
	__ATTR_RO(guid),
474
	__ATTR_RO(units),
475
	__ATTR_NULL,
476 477
};

478 479
static int read_rom(struct fw_device *device,
		    int generation, int index, u32 *data)
480
{
J
Jay Fenlason 已提交
481
	int rcode;
482 483 484

	/* device->node_id, accessed below, must not be older than generation */
	smp_rmb();
485

J
Jay Fenlason 已提交
486
	rcode = fw_run_transaction(device->card, TCODE_READ_QUADLET_REQUEST,
487
			device->node_id, generation, device->max_speed,
J
Jay Fenlason 已提交
488 489 490
			(CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4,
			data, 4);
	be32_to_cpus(data);
491

J
Jay Fenlason 已提交
492
	return rcode;
493 494
}

495
#define MAX_CONFIG_ROM_SIZE 256
496

497 498 499
/*
 * Read the bus info block, perform a speed probe, and read all of the rest of
 * the config ROM.  We do all this with a cached bus generation.  If the bus
500
 * generation changes under us, read_config_rom will fail and get retried.
501 502 503
 * It's better to start all over in this case because the node from which we
 * are reading the ROM may have changed the ROM during the reset.
 */
504
static int read_config_rom(struct fw_device *device, int generation)
505
{
506 507
	const u32 *old_rom, *new_rom;
	u32 *rom, *stack;
508 509 510
	u32 sp, key;
	int i, end, length, ret = -1;

511 512
	rom = kmalloc(sizeof(*rom) * MAX_CONFIG_ROM_SIZE +
		      sizeof(*stack) * MAX_CONFIG_ROM_SIZE, GFP_KERNEL);
513 514 515
	if (rom == NULL)
		return -ENOMEM;

516 517
	stack = &rom[MAX_CONFIG_ROM_SIZE];
	memset(rom, 0, sizeof(*rom) * MAX_CONFIG_ROM_SIZE);
518

519 520
	device->max_speed = SCODE_100;

521 522
	/* First read the bus info block. */
	for (i = 0; i < 5; i++) {
523
		if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
524
			goto out;
525 526
		/*
		 * As per IEEE1212 7.2, during power-up, devices can
527 528 529 530
		 * reply with a 0 for the first quadlet of the config
		 * rom to indicate that they are booting (for example,
		 * if the firmware is on the disk of a external
		 * harddisk).  In that case we just fail, and the
531 532
		 * retry mechanism will try again later.
		 */
533
		if (i == 0 && rom[i] == 0)
534
			goto out;
535 536
	}

537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557
	device->max_speed = device->node->max_speed;

	/*
	 * Determine the speed of
	 *   - devices with link speed less than PHY speed,
	 *   - devices with 1394b PHY (unless only connected to 1394a PHYs),
	 *   - all devices if there are 1394b repeaters.
	 * Note, we cannot use the bus info block's link_spd as starting point
	 * because some buggy firmwares set it lower than necessary and because
	 * 1394-1995 nodes do not have the field.
	 */
	if ((rom[2] & 0x7) < device->max_speed ||
	    device->max_speed == SCODE_BETA ||
	    device->card->beta_repeaters_present) {
		u32 dummy;

		/* for S1600 and S3200 */
		if (device->max_speed == SCODE_BETA)
			device->max_speed = device->card->link_speed;

		while (device->max_speed > SCODE_100) {
558 559
			if (read_rom(device, generation, 0, &dummy) ==
			    RCODE_COMPLETE)
560 561 562 563 564
				break;
			device->max_speed--;
		}
	}

565 566
	/*
	 * Now parse the config rom.  The config rom is a recursive
567 568 569
	 * directory structure so we parse it using a stack of
	 * references to the blocks that make up the structure.  We
	 * push a reference to the root directory on the stack to
570 571
	 * start things off.
	 */
572 573 574 575
	length = i;
	sp = 0;
	stack[sp++] = 0xc0000005;
	while (sp > 0) {
576 577
		/*
		 * Pop the next block reference of the stack.  The
578 579
		 * lower 24 bits is the offset into the config rom,
		 * the upper 8 bits are the type of the reference the
580 581
		 * block.
		 */
582 583
		key = stack[--sp];
		i = key & 0xffffff;
584
		if (WARN_ON(i >= MAX_CONFIG_ROM_SIZE))
585
			goto out;
586 587

		/* Read header quadlet for the block to get the length. */
588
		if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
589
			goto out;
590
		end = i + (rom[i] >> 16) + 1;
591
		if (end > MAX_CONFIG_ROM_SIZE) {
592
			/*
593 594 595
			 * This block extends outside the config ROM which is
			 * a firmware bug.  Ignore this whole block, i.e.
			 * simply set a fake block length of 0.
596
			 */
597 598 599 600 601 602 603
			fw_error("skipped invalid ROM block %x at %llx\n",
				 rom[i],
				 i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM);
			rom[i] = 0;
			end = i;
		}
		i++;
604

605 606
		/*
		 * Now read in the block.  If this is a directory
607
		 * block, check the entries as we read them to see if
608 609
		 * it references another block, and push it in that case.
		 */
610
		for (; i < end; i++) {
611 612
			if (read_rom(device, generation, i, &rom[i]) !=
			    RCODE_COMPLETE)
613
				goto out;
614

615
			if ((key >> 30) != 3 || (rom[i] >> 30) < 2)
616 617 618 619 620 621 622 623
				continue;
			/*
			 * Offset points outside the ROM.  May be a firmware
			 * bug or an Extended ROM entry (IEEE 1212-2001 clause
			 * 7.7.18).  Simply overwrite this pointer here by a
			 * fake immediate entry so that later iterators over
			 * the ROM don't have to check offsets all the time.
			 */
624
			if (i + (rom[i] & 0xffffff) >= MAX_CONFIG_ROM_SIZE) {
625 626 627 628 629 630 631
				fw_error("skipped unsupported ROM entry %x at %llx\n",
					 rom[i],
					 i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM);
				rom[i] = 0;
				continue;
			}
			stack[sp++] = i + rom[i];
632 633 634 635 636
		}
		if (length < i)
			length = i;
	}

637 638 639
	old_rom = device->config_rom;
	new_rom = kmemdup(rom, length * 4, GFP_KERNEL);
	if (new_rom == NULL)
640
		goto out;
641 642 643

	down_write(&fw_device_rwsem);
	device->config_rom = new_rom;
644
	device->config_rom_length = length;
645 646 647
	up_write(&fw_device_rwsem);

	kfree(old_rom);
648
	ret = 0;
649 650 651
	device->max_rec	= rom[2] >> 12 & 0xf;
	device->cmc	= rom[2] >> 30 & 1;
	device->irmc	= rom[2] >> 31 & 1;
652 653
 out:
	kfree(rom);
654

655
	return ret;
656 657 658 659 660 661 662 663 664
}

static void fw_unit_release(struct device *dev)
{
	struct fw_unit *unit = fw_unit(dev);

	kfree(unit);
}

665 666 667 668 669
static struct device_type fw_unit_type = {
	.uevent		= fw_unit_uevent,
	.release	= fw_unit_release,
};

670
static bool is_fw_unit(struct device *dev)
671
{
672
	return dev->type == &fw_unit_type;
673 674 675 676 677 678 679 680 681 682 683 684 685 686
}

static void create_units(struct fw_device *device)
{
	struct fw_csr_iterator ci;
	struct fw_unit *unit;
	int key, value, i;

	i = 0;
	fw_csr_iterator_init(&ci, &device->config_rom[5]);
	while (fw_csr_iterator_next(&ci, &key, &value)) {
		if (key != (CSR_UNIT | CSR_DIRECTORY))
			continue;

687 688 689 690
		/*
		 * Get the address of the unit directory and try to
		 * match the drivers id_tables against it.
		 */
691
		unit = kzalloc(sizeof(*unit), GFP_KERNEL);
692 693 694 695 696 697 698
		if (unit == NULL) {
			fw_error("failed to allocate memory for unit\n");
			continue;
		}

		unit->directory = ci.p + value - 1;
		unit->device.bus = &fw_bus_type;
699
		unit->device.type = &fw_unit_type;
700
		unit->device.parent = &device->device;
701
		dev_set_name(&unit->device, "%s.%d", dev_name(&device->device), i++);
702

703 704 705
		BUILD_BUG_ON(ARRAY_SIZE(unit->attribute_group.attrs) <
				ARRAY_SIZE(fw_unit_attributes) +
				ARRAY_SIZE(config_rom_attributes));
706 707 708
		init_fw_attribute_group(&unit->device,
					fw_unit_attributes,
					&unit->attribute_group);
709

710 711 712 713 714 715 716
		if (device_register(&unit->device) < 0)
			goto skip_unit;

		continue;

	skip_unit:
		kfree(unit);
717 718 719 720 721
	}
}

static int shutdown_unit(struct device *device, void *data)
{
722
	device_unregister(device);
723 724 725 726

	return 0;
}

727 728 729 730 731 732 733 734
/*
 * fw_device_rwsem acts as dual purpose mutex:
 *   - serializes accesses to fw_device_idr,
 *   - serializes accesses to fw_device.config_rom/.config_rom_length and
 *     fw_unit.directory, unless those accesses happen at safe occasions
 */
DECLARE_RWSEM(fw_device_rwsem);

735
DEFINE_IDR(fw_device_idr);
736 737
int fw_cdev_major;

738
struct fw_device *fw_device_get_by_devt(dev_t devt)
739 740 741
{
	struct fw_device *device;

742
	down_read(&fw_device_rwsem);
743
	device = idr_find(&fw_device_idr, MINOR(devt));
744 745
	if (device)
		fw_device_get(device);
746
	up_read(&fw_device_rwsem);
747 748 749 750

	return device;
}

751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766
/*
 * These defines control the retry behavior for reading the config
 * rom.  It shouldn't be necessary to tweak these; if the device
 * doesn't respond to a config rom read within 10 seconds, it's not
 * going to respond at all.  As for the initial delay, a lot of
 * devices will be able to respond within half a second after bus
 * reset.  On the other hand, it's not really worth being more
 * aggressive than that, since it scales pretty well; if 10 devices
 * are plugged in, they're all getting read within one second.
 */

#define MAX_RETRIES	10
#define RETRY_DELAY	(3 * HZ)
#define INITIAL_DELAY	(HZ / 2)
#define SHUTDOWN_DELAY	(2 * HZ)

767 768 769 770
static void fw_device_shutdown(struct work_struct *work)
{
	struct fw_device *device =
		container_of(work, struct fw_device, work.work);
771 772
	int minor = MINOR(device->device.devt);

773 774
	if (time_is_after_jiffies(device->card->reset_jiffies + SHUTDOWN_DELAY)
	    && !list_empty(&device->card->link)) {
775 776 777 778 779 780 781 782 783
		schedule_delayed_work(&device->work, SHUTDOWN_DELAY);
		return;
	}

	if (atomic_cmpxchg(&device->state,
			   FW_DEVICE_GONE,
			   FW_DEVICE_SHUTDOWN) != FW_DEVICE_GONE)
		return;

784
	fw_device_cdev_remove(device);
785 786
	device_for_each_child(&device->device, NULL, shutdown_unit);
	device_unregister(&device->device);
787

788
	down_write(&fw_device_rwsem);
789
	idr_remove(&fw_device_idr, minor);
790
	up_write(&fw_device_rwsem);
791

792
	fw_device_put(device);
793 794
}

795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815
static void fw_device_release(struct device *dev)
{
	struct fw_device *device = fw_device(dev);
	struct fw_card *card = device->card;
	unsigned long flags;

	/*
	 * Take the card lock so we don't set this to NULL while a
	 * FW_NODE_UPDATED callback is being handled or while the
	 * bus manager work looks at this node.
	 */
	spin_lock_irqsave(&card->lock, flags);
	device->node->data = NULL;
	spin_unlock_irqrestore(&card->lock, flags);

	fw_node_put(device->node);
	kfree(device->config_rom);
	kfree(device);
	fw_card_put(card);
}

816
static struct device_type fw_device_type = {
817
	.release = fw_device_release,
818 819
};

820 821 822 823 824
static bool is_fw_device(struct device *dev)
{
	return dev->type == &fw_device_type;
}

825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846
static int update_unit(struct device *dev, void *data)
{
	struct fw_unit *unit = fw_unit(dev);
	struct fw_driver *driver = (struct fw_driver *)dev->driver;

	if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) {
		down(&dev->sem);
		driver->update(unit);
		up(&dev->sem);
	}

	return 0;
}

static void fw_device_update(struct work_struct *work)
{
	struct fw_device *device =
		container_of(work, struct fw_device, work.work);

	fw_device_cdev_update(device);
	device_for_each_child(&device->device, NULL, update_unit);
}
847

848
/*
849 850 851 852
 * If a device was pending for deletion because its node went away but its
 * bus info block and root directory header matches that of a newly discovered
 * device, revive the existing fw_device.
 * The newly allocated fw_device becomes obsolete instead.
853
 */
854 855 856 857 858 859 860
static int lookup_existing_device(struct device *dev, void *data)
{
	struct fw_device *old = fw_device(dev);
	struct fw_device *new = data;
	struct fw_card *card = new->card;
	int match = 0;

861 862 863
	if (!is_fw_device(dev))
		return 0;

864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884
	down_read(&fw_device_rwsem); /* serialize config_rom access */
	spin_lock_irq(&card->lock);  /* serialize node access */

	if (memcmp(old->config_rom, new->config_rom, 6 * 4) == 0 &&
	    atomic_cmpxchg(&old->state,
			   FW_DEVICE_GONE,
			   FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
		struct fw_node *current_node = new->node;
		struct fw_node *obsolete_node = old->node;

		new->node = obsolete_node;
		new->node->data = new;
		old->node = current_node;
		old->node->data = old;

		old->max_speed = new->max_speed;
		old->node_id = current_node->node_id;
		smp_wmb();  /* update node_id before generation */
		old->generation = card->generation;
		old->config_rom_retries = 0;
		fw_notify("rediscovered device %s\n", dev_name(dev));
885

886 887 888 889 890 891 892 893 894 895 896 897 898 899
		PREPARE_DELAYED_WORK(&old->work, fw_device_update);
		schedule_delayed_work(&old->work, 0);

		if (current_node == card->root_node)
			fw_schedule_bm_work(card, 0);

		match = 1;
	}

	spin_unlock_irq(&card->lock);
	up_read(&fw_device_rwsem);

	return match;
}
900

901 902
enum { BC_UNKNOWN = 0, BC_UNIMPLEMENTED, BC_IMPLEMENTED, };

903
static void set_broadcast_channel(struct fw_device *device, int generation)
904 905 906 907 908 909 910 911
{
	struct fw_card *card = device->card;
	__be32 data;
	int rcode;

	if (!card->broadcast_channel_allocated)
		return;

912 913 914 915 916 917 918 919 920 921 922 923 924 925
	/*
	 * The Broadcast_Channel Valid bit is required by nodes which want to
	 * transmit on this channel.  Such transmissions are practically
	 * exclusive to IP over 1394 (RFC 2734).  IP capable nodes are required
	 * to be IRM capable and have a max_rec of 8 or more.  We use this fact
	 * to narrow down to which nodes we send Broadcast_Channel updates.
	 */
	if (!device->irmc || device->max_rec < 8)
		return;

	/*
	 * Some 1394-1995 nodes crash if this 1394a-2000 register is written.
	 * Perform a read test first.
	 */
926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952
	if (device->bc_implemented == BC_UNKNOWN) {
		rcode = fw_run_transaction(card, TCODE_READ_QUADLET_REQUEST,
				device->node_id, generation, device->max_speed,
				CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL,
				&data, 4);
		switch (rcode) {
		case RCODE_COMPLETE:
			if (data & cpu_to_be32(1 << 31)) {
				device->bc_implemented = BC_IMPLEMENTED;
				break;
			}
			/* else fall through to case address error */
		case RCODE_ADDRESS_ERROR:
			device->bc_implemented = BC_UNIMPLEMENTED;
		}
	}

	if (device->bc_implemented == BC_IMPLEMENTED) {
		data = cpu_to_be32(BROADCAST_CHANNEL_INITIAL |
				   BROADCAST_CHANNEL_VALID);
		fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
				device->node_id, generation, device->max_speed,
				CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL,
				&data, 4);
	}
}

953 954 955 956 957 958 959 960
int fw_device_set_broadcast_channel(struct device *dev, void *gen)
{
	if (is_fw_device(dev))
		set_broadcast_channel(fw_device(dev), (long)gen);

	return 0;
}

961 962 963 964
static void fw_device_init(struct work_struct *work)
{
	struct fw_device *device =
		container_of(work, struct fw_device, work.work);
965
	struct device *revived_dev;
966
	int minor, ret;
967

968 969
	/*
	 * All failure paths here set node->data to NULL, so that we
970
	 * don't try to do device_for_each_child() on a kfree()'d
971 972
	 * device.
	 */
973

974
	if (read_config_rom(device, device->generation) < 0) {
975 976
		if (device->config_rom_retries < MAX_RETRIES &&
		    atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
977 978 979
			device->config_rom_retries++;
			schedule_delayed_work(&device->work, RETRY_DELAY);
		} else {
980
			fw_notify("giving up on config rom for node id %x\n",
981
				  device->node_id);
982
			if (device->node == device->card->root_node)
983
				fw_schedule_bm_work(device->card, 0);
984 985 986 987 988
			fw_device_release(&device->device);
		}
		return;
	}

989 990 991 992 993 994 995 996 997
	revived_dev = device_find_child(device->card->device,
					device, lookup_existing_device);
	if (revived_dev) {
		put_device(revived_dev);
		fw_device_release(&device->device);

		return;
	}

998
	device_initialize(&device->device);
999 1000

	fw_device_get(device);
1001
	down_write(&fw_device_rwsem);
1002
	ret = idr_pre_get(&fw_device_idr, GFP_KERNEL) ?
1003 1004
	      idr_get_new(&fw_device_idr, device, &minor) :
	      -ENOMEM;
1005
	up_write(&fw_device_rwsem);
1006

1007
	if (ret < 0)
1008 1009
		goto error;

1010
	device->device.bus = &fw_bus_type;
1011
	device->device.type = &fw_device_type;
1012
	device->device.parent = device->card->device;
1013
	device->device.devt = MKDEV(fw_cdev_major, minor);
1014
	dev_set_name(&device->device, "fw%d", minor);
1015

1016 1017 1018
	BUILD_BUG_ON(ARRAY_SIZE(device->attribute_group.attrs) <
			ARRAY_SIZE(fw_device_attributes) +
			ARRAY_SIZE(config_rom_attributes));
1019 1020 1021
	init_fw_attribute_group(&device->device,
				fw_device_attributes,
				&device->attribute_group);
1022

1023 1024
	if (device_add(&device->device)) {
		fw_error("Failed to add device.\n");
1025
		goto error_with_cdev;
1026 1027 1028 1029
	}

	create_units(device);

1030 1031
	/*
	 * Transition the device to running state.  If it got pulled
1032 1033 1034 1035 1036
	 * out from under us while we did the intialization work, we
	 * have to shut down the device again here.  Normally, though,
	 * fw_node_event will be responsible for shutting it down when
	 * necessary.  We have to use the atomic cmpxchg here to avoid
	 * racing with the FW_NODE_DESTROYED case in
1037 1038
	 * fw_node_event().
	 */
1039
	if (atomic_cmpxchg(&device->state,
1040 1041 1042 1043
			   FW_DEVICE_INITIALIZING,
			   FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
		PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
		schedule_delayed_work(&device->work, SHUTDOWN_DELAY);
1044 1045 1046 1047
	} else {
		if (device->config_rom_retries)
			fw_notify("created device %s: GUID %08x%08x, S%d00, "
				  "%d config ROM retries\n",
1048
				  dev_name(&device->device),
1049 1050 1051 1052 1053
				  device->config_rom[3], device->config_rom[4],
				  1 << device->max_speed,
				  device->config_rom_retries);
		else
			fw_notify("created device %s: GUID %08x%08x, S%d00\n",
1054
				  dev_name(&device->device),
1055 1056
				  device->config_rom[3], device->config_rom[4],
				  1 << device->max_speed);
1057
		device->config_rom_retries = 0;
1058

1059
		set_broadcast_channel(device, device->generation);
1060
	}
1061

1062 1063
	/*
	 * Reschedule the IRM work if we just finished reading the
1064 1065
	 * root node config rom.  If this races with a bus reset we
	 * just end up running the IRM work a couple of extra times -
1066 1067
	 * pretty harmless.
	 */
1068
	if (device->node == device->card->root_node)
1069
		fw_schedule_bm_work(device->card, 0);
1070 1071 1072

	return;

1073
 error_with_cdev:
1074
	down_write(&fw_device_rwsem);
1075
	idr_remove(&fw_device_idr, minor);
1076
	up_write(&fw_device_rwsem);
S
Stefan Richter 已提交
1077
 error:
1078 1079 1080
	fw_device_put(device);		/* fw_device_idr's reference */

	put_device(&device->device);	/* our reference */
1081 1082
}

1083 1084 1085 1086 1087 1088 1089 1090
enum {
	REREAD_BIB_ERROR,
	REREAD_BIB_GONE,
	REREAD_BIB_UNCHANGED,
	REREAD_BIB_CHANGED,
};

/* Reread and compare bus info block and header of root directory */
1091
static int reread_config_rom(struct fw_device *device, int generation)
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102
{
	u32 q;
	int i;

	for (i = 0; i < 6; i++) {
		if (read_rom(device, generation, i, &q) != RCODE_COMPLETE)
			return REREAD_BIB_ERROR;

		if (i == 0 && q == 0)
			return REREAD_BIB_GONE;

1103
		if (q != device->config_rom[i])
1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116
			return REREAD_BIB_CHANGED;
	}

	return REREAD_BIB_UNCHANGED;
}

static void fw_device_refresh(struct work_struct *work)
{
	struct fw_device *device =
		container_of(work, struct fw_device, work.work);
	struct fw_card *card = device->card;
	int node_id = device->node_id;

1117
	switch (reread_config_rom(device, device->generation)) {
1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132
	case REREAD_BIB_ERROR:
		if (device->config_rom_retries < MAX_RETRIES / 2 &&
		    atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
			device->config_rom_retries++;
			schedule_delayed_work(&device->work, RETRY_DELAY / 2);

			return;
		}
		goto give_up;

	case REREAD_BIB_GONE:
		goto gone;

	case REREAD_BIB_UNCHANGED:
		if (atomic_cmpxchg(&device->state,
1133 1134
				   FW_DEVICE_INITIALIZING,
				   FW_DEVICE_RUNNING) == FW_DEVICE_GONE)
1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150
			goto gone;

		fw_device_update(work);
		device->config_rom_retries = 0;
		goto out;

	case REREAD_BIB_CHANGED:
		break;
	}

	/*
	 * Something changed.  We keep things simple and don't investigate
	 * further.  We just destroy all previous units and create new ones.
	 */
	device_for_each_child(&device->device, NULL, shutdown_unit);

1151
	if (read_config_rom(device, device->generation) < 0) {
1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163
		if (device->config_rom_retries < MAX_RETRIES &&
		    atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
			device->config_rom_retries++;
			schedule_delayed_work(&device->work, RETRY_DELAY);

			return;
		}
		goto give_up;
	}

	create_units(device);

1164 1165 1166
	/* Userspace may want to re-read attributes. */
	kobject_uevent(&device->device.kobj, KOBJ_CHANGE);

1167
	if (atomic_cmpxchg(&device->state,
1168 1169
			   FW_DEVICE_INITIALIZING,
			   FW_DEVICE_RUNNING) == FW_DEVICE_GONE)
1170 1171
		goto gone;

1172
	fw_notify("refreshed device %s\n", dev_name(&device->device));
1173 1174 1175 1176
	device->config_rom_retries = 0;
	goto out;

 give_up:
1177
	fw_notify("giving up on refresh of device %s\n", dev_name(&device->device));
1178
 gone:
1179 1180 1181
	atomic_set(&device->state, FW_DEVICE_GONE);
	PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
	schedule_delayed_work(&device->work, SHUTDOWN_DELAY);
1182 1183
 out:
	if (node_id == card->root_node->node_id)
1184
		fw_schedule_bm_work(card, 0);
1185 1186
}

1187 1188 1189 1190 1191 1192 1193 1194 1195
void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
{
	struct fw_device *device;

	switch (event) {
	case FW_NODE_CREATED:
	case FW_NODE_LINK_ON:
		if (!node->link_on)
			break;
1196
 create:
1197 1198 1199 1200
		device = kzalloc(sizeof(*device), GFP_ATOMIC);
		if (device == NULL)
			break;

1201 1202
		/*
		 * Do minimal intialization of the device here, the
1203 1204 1205 1206 1207 1208 1209
		 * rest will happen in fw_device_init().
		 *
		 * Attention:  A lot of things, even fw_device_get(),
		 * cannot be done before fw_device_init() finished!
		 * You can basically just check device->state and
		 * schedule work until then, but only while holding
		 * card->lock.
1210
		 */
1211
		atomic_set(&device->state, FW_DEVICE_INITIALIZING);
1212
		device->card = fw_card_get(card);
1213 1214 1215
		device->node = fw_node_get(node);
		device->node_id = node->node_id;
		device->generation = card->generation;
1216
		device->is_local = node == card->local_node;
1217
		mutex_init(&device->client_list_mutex);
1218
		INIT_LIST_HEAD(&device->client_list);
1219

1220 1221
		/*
		 * Set the node data to point back to this device so
1222
		 * FW_NODE_UPDATED callbacks can update the node_id
1223 1224
		 * and generation for the device.
		 */
1225 1226
		node->data = device;

1227 1228
		/*
		 * Many devices are slow to respond after bus resets,
1229 1230
		 * especially if they are bus powered and go through
		 * power-up after getting plugged in.  We schedule the
1231 1232
		 * first config rom scan half a second after bus reset.
		 */
1233 1234 1235 1236
		INIT_DELAYED_WORK(&device->work, fw_device_init);
		schedule_delayed_work(&device->work, INITIAL_DELAY);
		break;

1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249
	case FW_NODE_INITIATED_RESET:
		device = node->data;
		if (device == NULL)
			goto create;

		device->node_id = node->node_id;
		smp_wmb();  /* update node_id before generation */
		device->generation = card->generation;
		if (atomic_cmpxchg(&device->state,
			    FW_DEVICE_RUNNING,
			    FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) {
			PREPARE_DELAYED_WORK(&device->work, fw_device_refresh);
			schedule_delayed_work(&device->work,
1250
				device->is_local ? 0 : INITIAL_DELAY);
1251 1252 1253
		}
		break;

1254 1255 1256 1257 1258 1259
	case FW_NODE_UPDATED:
		if (!node->link_on || node->data == NULL)
			break;

		device = node->data;
		device->node_id = node->node_id;
1260
		smp_wmb();  /* update node_id before generation */
1261
		device->generation = card->generation;
1262 1263 1264 1265
		if (atomic_read(&device->state) == FW_DEVICE_RUNNING) {
			PREPARE_DELAYED_WORK(&device->work, fw_device_update);
			schedule_delayed_work(&device->work, 0);
		}
1266 1267 1268 1269 1270 1271 1272
		break;

	case FW_NODE_DESTROYED:
	case FW_NODE_LINK_OFF:
		if (!node->data)
			break;

1273 1274
		/*
		 * Destroy the device associated with the node.  There
1275 1276 1277 1278 1279 1280 1281 1282
		 * are two cases here: either the device is fully
		 * initialized (FW_DEVICE_RUNNING) or we're in the
		 * process of reading its config rom
		 * (FW_DEVICE_INITIALIZING).  If it is fully
		 * initialized we can reuse device->work to schedule a
		 * full fw_device_shutdown().  If not, there's work
		 * scheduled to read it's config rom, and we just put
		 * the device in shutdown state to have that code fail
1283 1284
		 * to create the device.
		 */
1285
		device = node->data;
1286
		if (atomic_xchg(&device->state,
1287
				FW_DEVICE_GONE) == FW_DEVICE_RUNNING) {
1288
			PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
1289 1290
			schedule_delayed_work(&device->work,
				list_empty(&card->link) ? 0 : SHUTDOWN_DELAY);
1291 1292 1293 1294
		}
		break;
	}
}