mdesc.c 24.1 KB
Newer Older
1 2
/* mdesc.c: Sun4V machine description handling.
 *
3
 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
4 5 6
 */
#include <linux/kernel.h>
#include <linux/types.h>
Y
Yinghai Lu 已提交
7
#include <linux/memblock.h>
8
#include <linux/log2.h>
9 10
#include <linux/list.h>
#include <linux/slab.h>
11
#include <linux/mm.h>
12
#include <linux/miscdevice.h>
13
#include <linux/bootmem.h>
14
#include <linux/export.h>
15

16
#include <asm/cpudata.h>
17 18 19
#include <asm/hypervisor.h>
#include <asm/mdesc.h>
#include <asm/prom.h>
20
#include <linux/uaccess.h>
21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
#include <asm/oplib.h>
#include <asm/smp.h>

/* Unlike the OBP device tree, the machine description is a full-on
 * DAG.  An arbitrary number of ARCs are possible from one
 * node to other nodes and thus we can't use the OBP device_node
 * data structure to represent these nodes inside of the kernel.
 *
 * Actually, it isn't even a DAG, because there are back pointers
 * which create cycles in the graph.
 *
 * mdesc_hdr and mdesc_elem describe the layout of the data structure
 * we get from the Hypervisor.
 */
struct mdesc_hdr {
	u32	version; /* Transport version */
	u32	node_sz; /* node block size */
	u32	name_sz; /* name block size */
	u32	data_sz; /* data block size */
40
} __attribute__((aligned(16)));
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63

struct mdesc_elem {
	u8	tag;
#define MD_LIST_END	0x00
#define MD_NODE		0x4e
#define MD_NODE_END	0x45
#define MD_NOOP		0x20
#define MD_PROP_ARC	0x61
#define MD_PROP_VAL	0x76
#define MD_PROP_STR	0x73
#define MD_PROP_DATA	0x64
	u8	name_len;
	u16	resv;
	u32	name_offset;
	union {
		struct {
			u32	data_len;
			u32	data_offset;
		} data;
		u64	val;
	} d;
};

64 65 66 67
struct mdesc_mem_ops {
	struct mdesc_handle *(*alloc)(unsigned int mdesc_size);
	void (*free)(struct mdesc_handle *handle);
};
68

69 70 71 72 73 74 75 76
struct mdesc_handle {
	struct list_head	list;
	struct mdesc_mem_ops	*mops;
	void			*self_base;
	atomic_t		refcnt;
	unsigned int		handle_size;
	struct mdesc_hdr	mdesc;
};
77

78 79 80
static void mdesc_handle_init(struct mdesc_handle *hp,
			      unsigned int handle_size,
			      void *base)
81
{
82 83 84 85 86 87 88
	BUG_ON(((unsigned long)&hp->mdesc) & (16UL - 1));

	memset(hp, 0, handle_size);
	INIT_LIST_HEAD(&hp->list);
	hp->self_base = base;
	atomic_set(&hp->refcnt, 1);
	hp->handle_size = handle_size;
89 90
}

Y
Yinghai Lu 已提交
91
static struct mdesc_handle * __init mdesc_memblock_alloc(unsigned int mdesc_size)
92
{
93
	unsigned int handle_size, alloc_size;
94 95
	struct mdesc_handle *hp;
	unsigned long paddr;
96

97 98 99 100
	handle_size = (sizeof(struct mdesc_handle) -
		       sizeof(struct mdesc_hdr) +
		       mdesc_size);
	alloc_size = PAGE_ALIGN(handle_size);
101

Y
Yinghai Lu 已提交
102
	paddr = memblock_alloc(alloc_size, PAGE_SIZE);
103

104 105 106 107 108
	hp = NULL;
	if (paddr) {
		hp = __va(paddr);
		mdesc_handle_init(hp, handle_size, hp);
	}
109
	return hp;
110 111
}

112
static void __init mdesc_memblock_free(struct mdesc_handle *hp)
113
{
114 115
	unsigned int alloc_size;
	unsigned long start;
116

117 118
	BUG_ON(atomic_read(&hp->refcnt) != 0);
	BUG_ON(!list_empty(&hp->list));
119

120 121 122
	alloc_size = PAGE_ALIGN(hp->handle_size);
	start = __pa(hp);
	free_bootmem_late(start, alloc_size);
123 124
}

Y
Yinghai Lu 已提交
125 126 127
static struct mdesc_mem_ops memblock_mdesc_ops = {
	.alloc = mdesc_memblock_alloc,
	.free  = mdesc_memblock_free,
128 129 130
};

static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size)
131
{
132
	unsigned int handle_size;
133 134
	struct mdesc_handle *hp;
	unsigned long addr;
135
	void *base;
136

137 138 139 140
	handle_size = (sizeof(struct mdesc_handle) -
		       sizeof(struct mdesc_hdr) +
		       mdesc_size);

141 142 143 144
	/*
	 * Allocation has to succeed because mdesc update would be missed
	 * and such events are not retransmitted.
	 */
145
	base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_NOFAIL);
146 147 148
	addr = (unsigned long)base;
	addr = (addr + 15UL) & ~15UL;
	hp = (struct mdesc_handle *) addr;
149

150
	mdesc_handle_init(hp, handle_size, base);
151

152
	return hp;
153 154
}

155
static void mdesc_kfree(struct mdesc_handle *hp)
156
{
157 158 159 160
	BUG_ON(atomic_read(&hp->refcnt) != 0);
	BUG_ON(!list_empty(&hp->list));

	kfree(hp->self_base);
161 162
}

163 164 165 166 167 168 169
static struct mdesc_mem_ops kmalloc_mdesc_memops = {
	.alloc = mdesc_kmalloc,
	.free  = mdesc_kfree,
};

static struct mdesc_handle *mdesc_alloc(unsigned int mdesc_size,
					struct mdesc_mem_ops *mops)
170
{
171
	struct mdesc_handle *hp = mops->alloc(mdesc_size);
172

173 174
	if (hp)
		hp->mops = mops;
175

176 177
	return hp;
}
178

179
static void mdesc_free(struct mdesc_handle *hp)
180
{
181 182
	hp->mops->free(hp);
}
183

184 185 186
static struct mdesc_handle *cur_mdesc;
static LIST_HEAD(mdesc_zombie_list);
static DEFINE_SPINLOCK(mdesc_lock);
187

188 189 190 191
struct mdesc_handle *mdesc_grab(void)
{
	struct mdesc_handle *hp;
	unsigned long flags;
192

193 194 195 196 197
	spin_lock_irqsave(&mdesc_lock, flags);
	hp = cur_mdesc;
	if (hp)
		atomic_inc(&hp->refcnt);
	spin_unlock_irqrestore(&mdesc_lock, flags);
198

199
	return hp;
200
}
201
EXPORT_SYMBOL(mdesc_grab);
202

203
void mdesc_release(struct mdesc_handle *hp)
204
{
205
	unsigned long flags;
206

207 208 209 210
	spin_lock_irqsave(&mdesc_lock, flags);
	if (atomic_dec_and_test(&hp->refcnt)) {
		list_del_init(&hp->list);
		hp->mops->free(hp);
211
	}
212
	spin_unlock_irqrestore(&mdesc_lock, flags);
213
}
214
EXPORT_SYMBOL(mdesc_release);
215

216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
static DEFINE_MUTEX(mdesc_mutex);
static struct mdesc_notifier_client *client_list;

void mdesc_register_notifier(struct mdesc_notifier_client *client)
{
	u64 node;

	mutex_lock(&mdesc_mutex);
	client->next = client_list;
	client_list = client;

	mdesc_for_each_node_by_name(cur_mdesc, node, client->node_name)
		client->add(cur_mdesc, node);

	mutex_unlock(&mdesc_mutex);
}

233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
static const u64 *parent_cfg_handle(struct mdesc_handle *hp, u64 node)
{
	const u64 *id;
	u64 a;

	id = NULL;
	mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
		u64 target;

		target = mdesc_arc_target(hp, a);
		id = mdesc_get_property(hp, target,
					"cfg-handle", NULL);
		if (id)
			break;
	}

	return id;
}

252 253 254 255 256 257 258 259 260
/* Run 'func' on nodes which are in A but not in B.  */
static void invoke_on_missing(const char *name,
			      struct mdesc_handle *a,
			      struct mdesc_handle *b,
			      void (*func)(struct mdesc_handle *, u64))
{
	u64 node;

	mdesc_for_each_node_by_name(a, node, name) {
261 262 263
		int found = 0, is_vdc_port = 0;
		const char *name_prop;
		const u64 *id;
264 265
		u64 fnode;

266 267 268 269 270 271 272 273 274 275 276 277 278
		name_prop = mdesc_get_property(a, node, "name", NULL);
		if (name_prop && !strcmp(name_prop, "vdc-port")) {
			is_vdc_port = 1;
			id = parent_cfg_handle(a, node);
		} else
			id = mdesc_get_property(a, node, "id", NULL);

		if (!id) {
			printk(KERN_ERR "MD: Cannot find ID for %s node.\n",
			       (name_prop ? name_prop : name));
			continue;
		}

279
		mdesc_for_each_node_by_name(b, fnode, name) {
280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
			const u64 *fid;

			if (is_vdc_port) {
				name_prop = mdesc_get_property(b, fnode,
							       "name", NULL);
				if (!name_prop ||
				    strcmp(name_prop, "vdc-port"))
					continue;
				fid = parent_cfg_handle(b, fnode);
				if (!fid) {
					printk(KERN_ERR "MD: Cannot find ID "
					       "for vdc-port node.\n");
					continue;
				}
			} else
				fid = mdesc_get_property(b, fnode,
							 "id", NULL);
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326

			if (*id == *fid) {
				found = 1;
				break;
			}
		}
		if (!found)
			func(a, node);
	}
}

static void notify_one(struct mdesc_notifier_client *p,
		       struct mdesc_handle *old_hp,
		       struct mdesc_handle *new_hp)
{
	invoke_on_missing(p->node_name, old_hp, new_hp, p->remove);
	invoke_on_missing(p->node_name, new_hp, old_hp, p->add);
}

static void mdesc_notify_clients(struct mdesc_handle *old_hp,
				 struct mdesc_handle *new_hp)
{
	struct mdesc_notifier_client *p = client_list;

	while (p) {
		notify_one(p, old_hp, new_hp);
		p = p->next;
	}
}

327
void mdesc_update(void)
328
{
329 330 331 332
	unsigned long len, real_len, status;
	struct mdesc_handle *hp, *orig_hp;
	unsigned long flags;

333 334
	mutex_lock(&mdesc_mutex);

335 336 337 338 339
	(void) sun4v_mach_desc(0UL, 0UL, &len);

	hp = mdesc_alloc(len, &kmalloc_mdesc_memops);
	if (!hp) {
		printk(KERN_ERR "MD: mdesc alloc fails\n");
340
		goto out;
341 342 343 344 345 346 347 348
	}

	status = sun4v_mach_desc(__pa(&hp->mdesc), len, &real_len);
	if (status != HV_EOK || real_len > len) {
		printk(KERN_ERR "MD: mdesc reread fails with %lu\n",
		       status);
		atomic_dec(&hp->refcnt);
		mdesc_free(hp);
349
		goto out;
350
	}
351

352 353 354
	spin_lock_irqsave(&mdesc_lock, flags);
	orig_hp = cur_mdesc;
	cur_mdesc = hp;
355
	spin_unlock_irqrestore(&mdesc_lock, flags);
356

357 358 359
	mdesc_notify_clients(orig_hp, hp);

	spin_lock_irqsave(&mdesc_lock, flags);
360 361 362 363 364
	if (atomic_dec_and_test(&orig_hp->refcnt))
		mdesc_free(orig_hp);
	else
		list_add(&orig_hp->list, &mdesc_zombie_list);
	spin_unlock_irqrestore(&mdesc_lock, flags);
365 366 367

out:
	mutex_unlock(&mdesc_mutex);
368 369
}

370
static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc)
371 372 373 374
{
	return (struct mdesc_elem *) (mdesc + 1);
}

375
static void *name_block(struct mdesc_hdr *mdesc)
376 377 378 379
{
	return ((void *) node_block(mdesc)) + mdesc->node_sz;
}

380
static void *data_block(struct mdesc_hdr *mdesc)
381 382 383 384
{
	return ((void *) name_block(mdesc)) + mdesc->name_sz;
}

385 386
u64 mdesc_node_by_name(struct mdesc_handle *hp,
		       u64 from_node, const char *name)
387
{
388 389 390 391 392
	struct mdesc_elem *ep = node_block(&hp->mdesc);
	const char *names = name_block(&hp->mdesc);
	u64 last_node = hp->mdesc.node_sz / 16;
	u64 ret;

393 394 395
	if (from_node == MDESC_NODE_NULL) {
		ret = from_node = 0;
	} else if (from_node >= last_node) {
396
		return MDESC_NODE_NULL;
397 398 399
	} else {
		ret = ep[from_node].d.val;
	}
400 401 402 403 404 405 406 407 408 409 410 411 412

	while (ret < last_node) {
		if (ep[ret].tag != MD_NODE)
			return MDESC_NODE_NULL;
		if (!strcmp(names + ep[ret].name_offset, name))
			break;
		ret = ep[ret].d.val;
	}
	if (ret >= last_node)
		ret = MDESC_NODE_NULL;
	return ret;
}
EXPORT_SYMBOL(mdesc_node_by_name);
413

414 415 416 417 418 419 420
const void *mdesc_get_property(struct mdesc_handle *hp, u64 node,
			       const char *name, int *lenp)
{
	const char *names = name_block(&hp->mdesc);
	u64 last_node = hp->mdesc.node_sz / 16;
	void *data = data_block(&hp->mdesc);
	struct mdesc_elem *ep;
421

422 423
	if (node == MDESC_NODE_NULL || node >= last_node)
		return NULL;
424

425 426 427 428 429 430 431 432 433 434
	ep = node_block(&hp->mdesc) + node;
	ep++;
	for (; ep->tag != MD_NODE_END; ep++) {
		void *val = NULL;
		int len = 0;

		switch (ep->tag) {
		case MD_PROP_VAL:
			val = &ep->d.val;
			len = 8;
435 436
			break;

437 438 439 440 441
		case MD_PROP_STR:
		case MD_PROP_DATA:
			val = data + ep->d.data.data_offset;
			len = ep->d.data.data_len;
			break;
442

443
		default:
444 445
			break;
		}
446 447
		if (!val)
			continue;
448

449 450 451 452 453
		if (!strcmp(names + ep->name_offset, name)) {
			if (lenp)
				*lenp = len;
			return val;
		}
454 455
	}

456 457 458
	return NULL;
}
EXPORT_SYMBOL(mdesc_get_property);
459

460 461 462 463 464
u64 mdesc_next_arc(struct mdesc_handle *hp, u64 from, const char *arc_type)
{
	struct mdesc_elem *ep, *base = node_block(&hp->mdesc);
	const char *names = name_block(&hp->mdesc);
	u64 last_node = hp->mdesc.node_sz / 16;
465

466 467
	if (from == MDESC_NODE_NULL || from >= last_node)
		return MDESC_NODE_NULL;
468

469 470 471 472 473 474 475 476 477 478 479
	ep = base + from;

	ep++;
	for (; ep->tag != MD_NODE_END; ep++) {
		if (ep->tag != MD_PROP_ARC)
			continue;

		if (strcmp(names + ep->name_offset, arc_type))
			continue;

		return ep - base;
480
	}
481 482

	return MDESC_NODE_NULL;
483
}
484
EXPORT_SYMBOL(mdesc_next_arc);
485

486
u64 mdesc_arc_target(struct mdesc_handle *hp, u64 arc)
487
{
488 489 490 491 492
	struct mdesc_elem *ep, *base = node_block(&hp->mdesc);

	ep = base + arc;

	return ep->d.val;
493
}
494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511
EXPORT_SYMBOL(mdesc_arc_target);

const char *mdesc_node_name(struct mdesc_handle *hp, u64 node)
{
	struct mdesc_elem *ep, *base = node_block(&hp->mdesc);
	const char *names = name_block(&hp->mdesc);
	u64 last_node = hp->mdesc.node_sz / 16;

	if (node == MDESC_NODE_NULL || node >= last_node)
		return NULL;

	ep = base + node;
	if (ep->tag != MD_NODE)
		return NULL;

	return names + ep->name_offset;
}
EXPORT_SYMBOL(mdesc_node_name);
512

513 514
static u64 max_cpus = 64;

515 516
static void __init report_platform_properties(void)
{
517 518
	struct mdesc_handle *hp = mdesc_grab();
	u64 pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "platform");
519 520 521
	const char *s;
	const u64 *v;

522
	if (pn == MDESC_NODE_NULL) {
523 524 525 526
		prom_printf("No platform node in machine-description.\n");
		prom_halt();
	}

527
	s = mdesc_get_property(hp, pn, "banner-name", NULL);
528
	printk("PLATFORM: banner-name [%s]\n", s);
529
	s = mdesc_get_property(hp, pn, "name", NULL);
530 531
	printk("PLATFORM: name [%s]\n", s);

532
	v = mdesc_get_property(hp, pn, "hostid", NULL);
533
	if (v)
534
		printk("PLATFORM: hostid [%08llx]\n", *v);
535
	v = mdesc_get_property(hp, pn, "serial#", NULL);
536
	if (v)
537
		printk("PLATFORM: serial# [%08llx]\n", *v);
538
	v = mdesc_get_property(hp, pn, "stick-frequency", NULL);
539
	printk("PLATFORM: stick-frequency [%08llx]\n", *v);
540
	v = mdesc_get_property(hp, pn, "mac-address", NULL);
541
	if (v)
542
		printk("PLATFORM: mac-address [%llx]\n", *v);
543
	v = mdesc_get_property(hp, pn, "watchdog-resolution", NULL);
544
	if (v)
545
		printk("PLATFORM: watchdog-resolution [%llu ms]\n", *v);
546
	v = mdesc_get_property(hp, pn, "watchdog-max-timeout", NULL);
547
	if (v)
548
		printk("PLATFORM: watchdog-max-timeout [%llu ms]\n", *v);
549
	v = mdesc_get_property(hp, pn, "max-cpus", NULL);
550 551 552 553
	if (v) {
		max_cpus = *v;
		printk("PLATFORM: max-cpus [%llu]\n", max_cpus);
	}
554

555 556 557 558 559 560 561 562 563 564 565 566
#ifdef CONFIG_SMP
	{
		int max_cpu, i;

		if (v) {
			max_cpu = *v;
			if (max_cpu > NR_CPUS)
				max_cpu = NR_CPUS;
		} else {
			max_cpu = NR_CPUS;
		}
		for (i = 0; i < max_cpu; i++)
567
			set_cpu_possible(i, true);
568 569 570
	}
#endif

571
	mdesc_release(hp);
572 573
}

574
static void fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_handle *hp, u64 mp)
575
{
576 577 578
	const u64 *level = mdesc_get_property(hp, mp, "level", NULL);
	const u64 *size = mdesc_get_property(hp, mp, "size", NULL);
	const u64 *line_size = mdesc_get_property(hp, mp, "line-size", NULL);
579 580 581
	const char *type;
	int type_len;

582
	type = mdesc_get_property(hp, mp, "type", &type_len);
583 584 585

	switch (*level) {
	case 1:
586
		if (of_find_in_proplist(type, "instn", type_len)) {
587 588
			c->icache_size = *size;
			c->icache_line_size = *line_size;
589
		} else if (of_find_in_proplist(type, "data", type_len)) {
590 591 592 593 594 595 596 597 598 599 600 601 602 603 604
			c->dcache_size = *size;
			c->dcache_line_size = *line_size;
		}
		break;

	case 2:
		c->ecache_size = *size;
		c->ecache_line_size = *line_size;
		break;

	default:
		break;
	}

	if (*level == 1) {
605
		u64 a;
606

607 608 609
		mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
			u64 target = mdesc_arc_target(hp, a);
			const char *name = mdesc_node_name(hp, target);
610

611 612
			if (!strcmp(name, "cache"))
				fill_in_one_cache(c, hp, target);
613 614 615 616
		}
	}
}

617 618 619 620
static void find_back_node_value(struct mdesc_handle *hp, u64 node,
				 char *srch_val,
				 void (*func)(struct mdesc_handle *, u64, int),
				 u64 val, int depth)
621
{
622
	u64 arc;
623

624 625 626
	/* Since we have an estimate of recursion depth, do a sanity check. */
	if (depth == 0)
		return;
627

628 629 630
	mdesc_for_each_arc(arc, hp, node, MDESC_ARC_TYPE_BACK) {
		u64 n = mdesc_arc_target(hp, arc);
		const char *name = mdesc_node_name(hp, n);
631

632 633
		if (!strcmp(srch_val, name))
			(*func)(hp, n, val);
634

635
		find_back_node_value(hp, n, srch_val, func, val, depth-1);
636 637 638
	}
}

639 640 641 642 643 644 645 646 647
static void __mark_core_id(struct mdesc_handle *hp, u64 node,
			   int core_id)
{
	const u64 *id = mdesc_get_property(hp, node, "id", NULL);

	if (*id < num_possible_cpus())
		cpu_data(*id).core_id = core_id;
}

648 649
static void __mark_max_cache_id(struct mdesc_handle *hp, u64 node,
				int max_cache_id)
650 651 652
{
	const u64 *id = mdesc_get_property(hp, node, "id", NULL);

653 654 655 656 657 658 659 660 661
	if (*id < num_possible_cpus()) {
		cpu_data(*id).max_cache_id = max_cache_id;

		/**
		 * On systems without explicit socket descriptions socket
		 * is max_cache_id
		 */
		cpu_data(*id).sock_id = max_cache_id;
	}
662 663 664 665 666 667 668 669
}

static void mark_core_ids(struct mdesc_handle *hp, u64 mp,
			  int core_id)
{
	find_back_node_value(hp, mp, "cpu", __mark_core_id, core_id, 10);
}

670 671
static void mark_max_cache_ids(struct mdesc_handle *hp, u64 mp,
			       int max_cache_id)
672
{
673 674
	find_back_node_value(hp, mp, "cpu", __mark_max_cache_id,
			     max_cache_id, 10);
675 676
}

677
static void set_core_ids(struct mdesc_handle *hp)
678 679
{
	int idx;
680
	u64 mp;
681 682

	idx = 1;
683 684 685 686

	/* Identify unique cores by looking for cpus backpointed to by
	 * level 1 instruction caches.
	 */
687 688
	mdesc_for_each_node_by_name(hp, mp, "cache") {
		const u64 *level;
689 690 691
		const char *type;
		int len;

692
		level = mdesc_get_property(hp, mp, "level", NULL);
693 694 695
		if (*level != 1)
			continue;

696
		type = mdesc_get_property(hp, mp, "type", &len);
697
		if (!of_find_in_proplist(type, "instn", len))
698 699
			continue;

700
		mark_core_ids(hp, mp, idx);
701 702 703 704
		idx++;
	}
}

705
static int set_max_cache_ids_by_cache(struct mdesc_handle *hp, int level)
706 707 708 709 710
{
	u64 mp;
	int idx = 1;
	int fnd = 0;

711 712 713
	/**
	 * Identify unique highest level of shared cache by looking for cpus
	 * backpointed to by shared level N caches.
714 715 716 717 718 719 720
	 */
	mdesc_for_each_node_by_name(hp, mp, "cache") {
		const u64 *cur_lvl;

		cur_lvl = mdesc_get_property(hp, mp, "level", NULL);
		if (*cur_lvl != level)
			continue;
721
		mark_max_cache_ids(hp, mp, idx);
722 723 724 725 726 727 728 729 730
		idx++;
		fnd = 1;
	}
	return fnd;
}

static void set_sock_ids_by_socket(struct mdesc_handle *hp, u64 mp)
{
	int idx = 1;
731

732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747
	mdesc_for_each_node_by_name(hp, mp, "socket") {
		u64 a;

		mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
			u64 t = mdesc_arc_target(hp, a);
			const char *name;
			const u64 *id;

			name = mdesc_node_name(hp, t);
			if (strcmp(name, "cpu"))
				continue;

			id = mdesc_get_property(hp, t, "id", NULL);
			if (*id < num_possible_cpus())
				cpu_data(*id).sock_id = idx;
		}
748 749 750 751
		idx++;
	}
}

752 753 754 755
static void set_sock_ids(struct mdesc_handle *hp)
{
	u64 mp;

756 757 758
	/**
	 * Find the highest level of shared cache which pre-T7 is also
	 * the socket.
759
	 */
760 761 762 763
	if (!set_max_cache_ids_by_cache(hp, 3))
		set_max_cache_ids_by_cache(hp, 2);

	/* If machine description exposes sockets data use it.*/
764 765
	mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "sockets");
	if (mp != MDESC_NODE_NULL)
766
		set_sock_ids_by_socket(hp, mp);
767 768
}

769
static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id)
770
{
771
	u64 a;
772

773 774 775
	mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) {
		u64 t = mdesc_arc_target(hp, a);
		const char *name;
776 777
		const u64 *id;

778 779
		name = mdesc_node_name(hp, t);
		if (strcmp(name, "cpu"))
780 781
			continue;

782
		id = mdesc_get_property(hp, t, "id", NULL);
783 784 785 786 787
		if (*id < NR_CPUS)
			cpu_data(*id).proc_id = proc_id;
	}
}

788
static void __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name)
789 790
{
	int idx;
791
	u64 mp;
792 793

	idx = 0;
794
	mdesc_for_each_node_by_name(hp, mp, exec_unit_name) {
795 796 797
		const char *type;
		int len;

798
		type = mdesc_get_property(hp, mp, "type", &len);
799 800
		if (!of_find_in_proplist(type, "int", len) &&
		    !of_find_in_proplist(type, "integer", len))
801 802
			continue;

803
		mark_proc_ids(hp, mp, idx);
804 805 806 807
		idx++;
	}
}

808
static void set_proc_ids(struct mdesc_handle *hp)
809
{
810 811
	__set_proc_ids(hp, "exec_unit");
	__set_proc_ids(hp, "exec-unit");
812 813
}

814 815
static void get_one_mondo_bits(const u64 *p, unsigned int *mask,
			       unsigned long def, unsigned long max)
816 817 818 819 820 821 822 823 824 825
{
	u64 val;

	if (!p)
		goto use_default;
	val = *p;

	if (!val || val >= 64)
		goto use_default;

826 827 828
	if (val > max)
		val = max;

829 830 831 832 833 834 835
	*mask = ((1U << val) * 64U) - 1U;
	return;

use_default:
	*mask = ((1U << def) * 64U) - 1U;
}

836 837
static void get_mondo_data(struct mdesc_handle *hp, u64 mp,
			   struct trap_per_cpu *tb)
838
{
839
	static int printed;
840 841
	const u64 *val;

842
	val = mdesc_get_property(hp, mp, "q-cpu-mondo-#bits", NULL);
843
	get_one_mondo_bits(val, &tb->cpu_mondo_qmask, 7, ilog2(max_cpus * 2));
844

845
	val = mdesc_get_property(hp, mp, "q-dev-mondo-#bits", NULL);
846
	get_one_mondo_bits(val, &tb->dev_mondo_qmask, 7, 8);
847

848
	val = mdesc_get_property(hp, mp, "q-resumable-#bits", NULL);
849
	get_one_mondo_bits(val, &tb->resum_qmask, 6, 7);
850

851
	val = mdesc_get_property(hp, mp, "q-nonresumable-#bits", NULL);
852 853 854 855 856 857 858 859 860
	get_one_mondo_bits(val, &tb->nonresum_qmask, 2, 2);
	if (!printed++) {
		pr_info("SUN4V: Mondo queue sizes "
			"[cpu(%u) dev(%u) r(%u) nr(%u)]\n",
			tb->cpu_mondo_qmask + 1,
			tb->dev_mondo_qmask + 1,
			tb->resum_qmask + 1,
			tb->nonresum_qmask + 1);
	}
861 862
}

863
static void *mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handle *, u64, int, void *), void *arg, cpumask_t *mask)
864
{
865
	struct mdesc_handle *hp = mdesc_grab();
866
	void *ret = NULL;
867
	u64 mp;
868

869 870
	mdesc_for_each_node_by_name(hp, mp, "cpu") {
		const u64 *id = mdesc_get_property(hp, mp, "id", NULL);
871
		int cpuid = *id;
872 873

#ifdef CONFIG_SMP
874 875 876 877
		if (cpuid >= NR_CPUS) {
			printk(KERN_WARNING "Ignoring CPU %d which is "
			       ">= NR_CPUS (%d)\n",
			       cpuid, NR_CPUS);
878
			continue;
879
		}
880
		if (!cpumask_test_cpu(cpuid, mask))
881 882 883
			continue;
#endif

884 885 886 887 888 889 890 891
		ret = func(hp, mp, cpuid, arg);
		if (ret)
			goto out;
	}
out:
	mdesc_release(hp);
	return ret;
}
892

893 894
static void *record_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid,
			    void *arg)
895 896 897 898 899 900 901
{
	ncpus_probed++;
#ifdef CONFIG_SMP
	set_cpu_present(cpuid, true);
#endif
	return NULL;
}
902

903
void mdesc_populate_present_mask(cpumask_t *mask)
904 905 906
{
	if (tlb_type != hypervisor)
		return;
907

908 909 910
	ncpus_probed = 0;
	mdesc_iterate_over_cpus(record_one_cpu, NULL, mask);
}
911

912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935
static void * __init check_one_pgsz(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg)
{
	const u64 *pgsz_prop = mdesc_get_property(hp, mp, "mmu-page-size-list", NULL);
	unsigned long *pgsz_mask = arg;
	u64 val;

	val = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K |
	       HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB);
	if (pgsz_prop)
		val = *pgsz_prop;

	if (!*pgsz_mask)
		*pgsz_mask = val;
	else
		*pgsz_mask &= val;
	return NULL;
}

void __init mdesc_get_page_sizes(cpumask_t *mask, unsigned long *pgsz_mask)
{
	*pgsz_mask = 0;
	mdesc_iterate_over_cpus(check_one_pgsz, pgsz_mask, mask);
}

936 937
static void *fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid,
			     void *arg)
938 939 940 941 942
{
	const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL);
	struct trap_per_cpu *tb;
	cpuinfo_sparc *c;
	u64 a;
943

944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967
#ifndef CONFIG_SMP
	/* On uniprocessor we only want the values for the
	 * real physical cpu the kernel booted onto, however
	 * cpu_data() only has one entry at index 0.
	 */
	if (cpuid != real_hard_smp_processor_id())
		return NULL;
	cpuid = 0;
#endif

	c = &cpu_data(cpuid);
	c->clock_tick = *cfreq;

	tb = &trap_block[cpuid];
	get_mondo_data(hp, mp, tb);

	mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
		u64 j, t = mdesc_arc_target(hp, a);
		const char *t_name;

		t_name = mdesc_node_name(hp, t);
		if (!strcmp(t_name, "cache")) {
			fill_in_one_cache(c, hp, t);
			continue;
968 969
		}

970 971 972
		mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_FWD) {
			u64 n = mdesc_arc_target(hp, j);
			const char *n_name;
973

974 975 976 977
			n_name = mdesc_node_name(hp, n);
			if (!strcmp(n_name, "cache"))
				fill_in_one_cache(c, hp, n);
		}
978 979
	}

980 981 982 983 984 985
	c->core_id = 0;
	c->proc_id = -1;

	return NULL;
}

986
void mdesc_fill_in_cpu_data(cpumask_t *mask)
987 988 989
{
	struct mdesc_handle *hp;

990
	mdesc_iterate_over_cpus(fill_in_one_cpu, NULL, mask);
991 992 993

	hp = mdesc_grab();

994 995
	set_core_ids(hp);
	set_proc_ids(hp);
996
	set_sock_ids(hp);
997

998
	mdesc_release(hp);
999 1000

	smp_fill_in_sib_core_maps();
1001 1002
}

1003 1004 1005 1006 1007 1008
/* mdesc_open() - Grab a reference to mdesc_handle when /dev/mdesc is
 * opened. Hold this reference until /dev/mdesc is closed to ensure
 * mdesc data structure is not released underneath us. Store the
 * pointer to mdesc structure in private_data for read and seek to use
 */
static int mdesc_open(struct inode *inode, struct file *file)
1009 1010 1011 1012 1013 1014
{
	struct mdesc_handle *hp = mdesc_grab();

	if (!hp)
		return -ENODEV;

1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
	file->private_data = hp;

	return 0;
}

static ssize_t mdesc_read(struct file *file, char __user *buf,
			  size_t len, loff_t *offp)
{
	struct mdesc_handle *hp = file->private_data;
	unsigned char *mdesc;
	int bytes_left, count = len;

	if (*offp >= hp->handle_size)
		return 0;

	bytes_left = hp->handle_size - *offp;
	if (count > bytes_left)
		count = bytes_left;

	mdesc = (unsigned char *)&hp->mdesc;
	mdesc += *offp;
	if (!copy_to_user(buf, mdesc, count)) {
		*offp += count;
		return count;
	} else {
		return -EFAULT;
	}
}
1043

1044 1045
static loff_t mdesc_llseek(struct file *file, loff_t offset, int whence)
{
1046
	struct mdesc_handle *hp = file->private_data;
1047

1048
	return no_seek_end_llseek_size(file, offset, whence, hp->handle_size);
1049 1050 1051 1052 1053 1054 1055 1056 1057
}

/* mdesc_close() - /dev/mdesc is being closed, release the reference to
 * mdesc structure.
 */
static int mdesc_close(struct inode *inode, struct file *file)
{
	mdesc_release(file->private_data);
	return 0;
1058 1059 1060
}

static const struct file_operations mdesc_fops = {
1061 1062 1063 1064 1065
	.open    = mdesc_open,
	.read	 = mdesc_read,
	.llseek  = mdesc_llseek,
	.release = mdesc_close,
	.owner	 = THIS_MODULE,
1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080
};

static struct miscdevice mdesc_misc = {
	.minor	= MISC_DYNAMIC_MINOR,
	.name	= "mdesc",
	.fops	= &mdesc_fops,
};

static int __init mdesc_misc_init(void)
{
	return misc_register(&mdesc_misc);
}

__initcall(mdesc_misc_init);

1081 1082
void __init sun4v_mdesc_init(void)
{
1083
	struct mdesc_handle *hp;
1084 1085 1086 1087 1088 1089
	unsigned long len, real_len, status;

	(void) sun4v_mach_desc(0UL, 0UL, &len);

	printk("MDESC: Size is %lu bytes.\n", len);

Y
Yinghai Lu 已提交
1090
	hp = mdesc_alloc(len, &memblock_mdesc_ops);
1091 1092 1093 1094
	if (hp == NULL) {
		prom_printf("MDESC: alloc of %lu bytes failed.\n", len);
		prom_halt();
	}
1095

1096
	status = sun4v_mach_desc(__pa(&hp->mdesc), len, &real_len);
1097 1098 1099 1100
	if (status != HV_EOK || real_len > len) {
		prom_printf("sun4v_mach_desc fails, err(%lu), "
			    "len(%lu), real_len(%lu)\n",
			    status, len, real_len);
1101
		mdesc_free(hp);
1102 1103 1104
		prom_halt();
	}

1105
	cur_mdesc = hp;
1106 1107 1108

	report_platform_properties();
}