hpet.c 28.7 KB
Newer Older
1
#include <linux/clocksource.h>
2
#include <linux/clockchips.h>
3
#include <linux/interrupt.h>
4
#include <linux/export.h>
5
#include <linux/delay.h>
6
#include <linux/errno.h>
7
#include <linux/i8253.h>
8
#include <linux/slab.h>
9 10
#include <linux/hpet.h>
#include <linux/init.h>
11
#include <linux/cpu.h>
12 13
#include <linux/pm.h>
#include <linux/io.h>
14

15
#include <asm/fixmap.h>
16
#include <asm/hpet.h>
17
#include <asm/time.h>
18

19
#define HPET_MASK			CLOCKSOURCE_MASK(32)
20

P
Pavel Machek 已提交
21 22
/* FSEC = 10^-15
   NSEC = 10^-9 */
23
#define FSEC_PER_NSEC			1000000L
24

25 26 27 28 29 30
#define HPET_DEV_USED_BIT		2
#define HPET_DEV_USED			(1 << HPET_DEV_USED_BIT)
#define HPET_DEV_VALID			0x8
#define HPET_DEV_FSB_CAP		0x1000
#define HPET_DEV_PERI_CAP		0x2000

31 32 33
#define HPET_MIN_CYCLES			128
#define HPET_MIN_PROG_DELTA		(HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1))

34 35 36
/*
 * HPET address is set in acpi/boot.c, when an ACPI entry exists
 */
37
unsigned long				hpet_address;
38
u8					hpet_blockid; /* OS timer block num */
39 40
u8					hpet_msi_disable;

41
#ifdef CONFIG_PCI_MSI
H
Hannes Eder 已提交
42
static unsigned long			hpet_num_timers;
43
#endif
44
static void __iomem			*hpet_virt_address;
45

46
struct hpet_dev {
47 48 49 50 51 52
	struct clock_event_device	evt;
	unsigned int			num;
	int				cpu;
	unsigned int			irq;
	unsigned int			flags;
	char				name[10];
53 54
};

55 56 57 58 59
inline struct hpet_dev *EVT_TO_HPET_DEV(struct clock_event_device *evtdev)
{
	return container_of(evtdev, struct hpet_dev, evt);
}

J
Jan Beulich 已提交
60
inline unsigned int hpet_readl(unsigned int a)
61 62 63 64
{
	return readl(hpet_virt_address + a);
}

J
Jan Beulich 已提交
65
static inline void hpet_writel(unsigned int d, unsigned int a)
66 67 68 69
{
	writel(d, hpet_virt_address + a);
}

70 71
#ifdef CONFIG_X86_64
#include <asm/pgtable.h>
72
#endif
73

74 75 76
static inline void hpet_set_mapping(void)
{
	hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
77
#ifdef CONFIG_X86_64
A
Andy Lutomirski 已提交
78
	__set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VVAR_NOCACHE);
79
#endif
80 81 82 83 84 85 86 87
}

static inline void hpet_clear_mapping(void)
{
	iounmap(hpet_virt_address);
	hpet_virt_address = NULL;
}

88 89 90
/*
 * HPET command line enable / disable
 */
91
int boot_hpet_disable;
T
Thomas Gleixner 已提交
92
int hpet_force_user;
93
static int hpet_verbose;
94

95
static int __init hpet_setup(char *str)
96
{
97 98 99 100 101
	while (str) {
		char *next = strchr(str, ',');

		if (next)
			*next++ = 0;
102 103
		if (!strncmp("disable", str, 7))
			boot_hpet_disable = 1;
T
Thomas Gleixner 已提交
104 105
		if (!strncmp("force", str, 5))
			hpet_force_user = 1;
106 107
		if (!strncmp("verbose", str, 7))
			hpet_verbose = 1;
108
		str = next;
109 110 111 112 113
	}
	return 1;
}
__setup("hpet=", hpet_setup);

114 115 116 117 118 119 120
static int __init disable_hpet(char *str)
{
	boot_hpet_disable = 1;
	return 1;
}
__setup("nohpet", disable_hpet);

121 122
static inline int is_hpet_capable(void)
{
123
	return !boot_hpet_disable && hpet_address;
124 125 126 127 128 129 130 131 132 133 134 135 136 137
}

/*
 * HPET timer interrupt enable / disable
 */
static int hpet_legacy_int_enabled;

/**
 * is_hpet_enabled - check whether the hpet timer interrupt is enabled
 */
int is_hpet_enabled(void)
{
	return is_hpet_capable() && hpet_legacy_int_enabled;
}
138
EXPORT_SYMBOL_GPL(is_hpet_enabled);
139

140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
static void _hpet_print_config(const char *function, int line)
{
	u32 i, timers, l, h;
	printk(KERN_INFO "hpet: %s(%d):\n", function, line);
	l = hpet_readl(HPET_ID);
	h = hpet_readl(HPET_PERIOD);
	timers = ((l & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1;
	printk(KERN_INFO "hpet: ID: 0x%x, PERIOD: 0x%x\n", l, h);
	l = hpet_readl(HPET_CFG);
	h = hpet_readl(HPET_STATUS);
	printk(KERN_INFO "hpet: CFG: 0x%x, STATUS: 0x%x\n", l, h);
	l = hpet_readl(HPET_COUNTER);
	h = hpet_readl(HPET_COUNTER+4);
	printk(KERN_INFO "hpet: COUNTER_l: 0x%x, COUNTER_h: 0x%x\n", l, h);

	for (i = 0; i < timers; i++) {
		l = hpet_readl(HPET_Tn_CFG(i));
		h = hpet_readl(HPET_Tn_CFG(i)+4);
		printk(KERN_INFO "hpet: T%d: CFG_l: 0x%x, CFG_h: 0x%x\n",
		       i, l, h);
		l = hpet_readl(HPET_Tn_CMP(i));
		h = hpet_readl(HPET_Tn_CMP(i)+4);
		printk(KERN_INFO "hpet: T%d: CMP_l: 0x%x, CMP_h: 0x%x\n",
		       i, l, h);
		l = hpet_readl(HPET_Tn_ROUTE(i));
		h = hpet_readl(HPET_Tn_ROUTE(i)+4);
		printk(KERN_INFO "hpet: T%d ROUTE_l: 0x%x, ROUTE_h: 0x%x\n",
		       i, l, h);
	}
}

#define hpet_print_config()					\
do {								\
	if (hpet_verbose)					\
		_hpet_print_config(__FUNCTION__, __LINE__);	\
} while (0)

177 178 179 180 181
/*
 * When the hpet driver (/dev/hpet) is enabled, we need to reserve
 * timer 0 and timer 1 in case of RTC emulation.
 */
#ifdef CONFIG_HPET
182

V
Venki Pallipadi 已提交
183
static void hpet_reserve_msi_timers(struct hpet_data *hd);
184

J
Jan Beulich 已提交
185
static void hpet_reserve_platform_timers(unsigned int id)
186 187
{
	struct hpet __iomem *hpet = hpet_virt_address;
188 189
	struct hpet_timer __iomem *timer = &hpet->hpet_timers[2];
	unsigned int nrtimers, i;
190 191 192 193
	struct hpet_data hd;

	nrtimers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1;

194 195 196 197
	memset(&hd, 0, sizeof(hd));
	hd.hd_phys_address	= hpet_address;
	hd.hd_address		= hpet;
	hd.hd_nirqs		= nrtimers;
198 199 200 201 202
	hpet_reserve_timer(&hd, 0);

#ifdef CONFIG_HPET_EMULATE_RTC
	hpet_reserve_timer(&hd, 1);
#endif
203

204 205 206 207 208
	/*
	 * NOTE that hd_irq[] reflects IOAPIC input pins (LEGACY_8254
	 * is wrong for i8259!) not the output IRQ.  Many BIOS writers
	 * don't bother configuring *any* comparator interrupts.
	 */
209 210 211
	hd.hd_irq[0] = HPET_LEGACY_8254;
	hd.hd_irq[1] = HPET_LEGACY_RTC;

I
Ingo Molnar 已提交
212
	for (i = 2; i < nrtimers; timer++, i++) {
213 214
		hd.hd_irq[i] = (readl(&timer->hpet_config) &
			Tn_INT_ROUTE_CNF_MASK) >> Tn_INT_ROUTE_CNF_SHIFT;
I
Ingo Molnar 已提交
215
	}
216

217
	hpet_reserve_msi_timers(&hd);
218

219
	hpet_alloc(&hd);
220

221 222
}
#else
J
Jan Beulich 已提交
223
static void hpet_reserve_platform_timers(unsigned int id) { }
224 225 226 227 228
#endif

/*
 * Common hpet info
 */
229
static unsigned long hpet_freq;
230

231
static void hpet_legacy_set_mode(enum clock_event_mode mode,
232
			  struct clock_event_device *evt);
233
static int hpet_legacy_next_event(unsigned long delta,
234 235 236 237 238 239 240 241
			   struct clock_event_device *evt);

/*
 * The hpet clock event device
 */
static struct clock_event_device hpet_clockevent = {
	.name		= "hpet",
	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
242 243
	.set_mode	= hpet_legacy_set_mode,
	.set_next_event = hpet_legacy_next_event,
244
	.irq		= 0,
245
	.rating		= 50,
246 247
};

248
static void hpet_stop_counter(void)
249 250 251 252
{
	unsigned long cfg = hpet_readl(HPET_CFG);
	cfg &= ~HPET_CFG_ENABLE;
	hpet_writel(cfg, HPET_CFG);
253 254 255 256
}

static void hpet_reset_counter(void)
{
257 258
	hpet_writel(0, HPET_COUNTER);
	hpet_writel(0, HPET_COUNTER + 4);
259 260 261 262
}

static void hpet_start_counter(void)
{
J
Jan Beulich 已提交
263
	unsigned int cfg = hpet_readl(HPET_CFG);
264 265 266 267
	cfg |= HPET_CFG_ENABLE;
	hpet_writel(cfg, HPET_CFG);
}

268 269 270
static void hpet_restart_counter(void)
{
	hpet_stop_counter();
271
	hpet_reset_counter();
272 273 274
	hpet_start_counter();
}

275 276
static void hpet_resume_device(void)
{
V
Venki Pallipadi 已提交
277
	force_hpet_resume();
278 279
}

280
static void hpet_resume_counter(struct clocksource *cs)
281 282
{
	hpet_resume_device();
283
	hpet_restart_counter();
284 285
}

286
static void hpet_enable_legacy_int(void)
287
{
J
Jan Beulich 已提交
288
	unsigned int cfg = hpet_readl(HPET_CFG);
289 290 291 292 293 294

	cfg |= HPET_CFG_LEGACY;
	hpet_writel(cfg, HPET_CFG);
	hpet_legacy_int_enabled = 1;
}

295 296 297 298 299 300 301 302 303
static void hpet_legacy_clockevent_register(void)
{
	/* Start HPET legacy interrupts */
	hpet_enable_legacy_int();

	/*
	 * Start hpet with the boot cpu mask and make it
	 * global after the IO_APIC has been initialized.
	 */
304
	hpet_clockevent.cpumask = cpumask_of(smp_processor_id());
305 306
	clockevents_config_and_register(&hpet_clockevent, hpet_freq,
					HPET_MIN_PROG_DELTA, 0x7FFFFFFF);
307 308 309 310
	global_clock_event = &hpet_clockevent;
	printk(KERN_DEBUG "hpet clockevent registered\n");
}

311 312
static int hpet_setup_msi_irq(unsigned int irq);

313 314
static void hpet_set_mode(enum clock_event_mode mode,
			  struct clock_event_device *evt, int timer)
315
{
J
Jan Beulich 已提交
316
	unsigned int cfg, cmp, now;
317 318
	uint64_t delta;

319
	switch (mode) {
320
	case CLOCK_EVT_MODE_PERIODIC:
321
		hpet_stop_counter();
322 323
		delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult;
		delta >>= evt->shift;
324
		now = hpet_readl(HPET_COUNTER);
J
Jan Beulich 已提交
325
		cmp = now + (unsigned int) delta;
326
		cfg = hpet_readl(HPET_Tn_CFG(timer));
327 328
		cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
		       HPET_TN_SETVAL | HPET_TN_32BIT;
329
		hpet_writel(cfg, HPET_Tn_CFG(timer));
330 331 332 333 334 335 336 337 338
		hpet_writel(cmp, HPET_Tn_CMP(timer));
		udelay(1);
		/*
		 * HPET on AMD 81xx needs a second write (with HPET_TN_SETVAL
		 * cleared) to T0_CMP to set the period. The HPET_TN_SETVAL
		 * bit is automatically cleared after the first write.
		 * (See AMD-8111 HyperTransport I/O Hub Data Sheet,
		 * Publication # 24674)
		 */
J
Jan Beulich 已提交
339
		hpet_writel((unsigned int) delta, HPET_Tn_CMP(timer));
340
		hpet_start_counter();
341
		hpet_print_config();
342 343 344
		break;

	case CLOCK_EVT_MODE_ONESHOT:
345
		cfg = hpet_readl(HPET_Tn_CFG(timer));
346 347
		cfg &= ~HPET_TN_PERIODIC;
		cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
348
		hpet_writel(cfg, HPET_Tn_CFG(timer));
349 350 351 352
		break;

	case CLOCK_EVT_MODE_UNUSED:
	case CLOCK_EVT_MODE_SHUTDOWN:
353
		cfg = hpet_readl(HPET_Tn_CFG(timer));
354
		cfg &= ~HPET_TN_ENABLE;
355
		hpet_writel(cfg, HPET_Tn_CFG(timer));
356
		break;
T
Thomas Gleixner 已提交
357 358

	case CLOCK_EVT_MODE_RESUME:
359 360 361 362 363 364
		if (timer == 0) {
			hpet_enable_legacy_int();
		} else {
			struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
			hpet_setup_msi_irq(hdev->irq);
			disable_irq(hdev->irq);
365
			irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
366 367
			enable_irq(hdev->irq);
		}
368
		hpet_print_config();
T
Thomas Gleixner 已提交
369
		break;
370 371 372
	}
}

373 374
static int hpet_next_event(unsigned long delta,
			   struct clock_event_device *evt, int timer)
375
{
376
	u32 cnt;
377
	s32 res;
378 379

	cnt = hpet_readl(HPET_COUNTER);
380
	cnt += (u32) delta;
381
	hpet_writel(cnt, HPET_Tn_CMP(timer));
382

383
	/*
384 385 386 387 388 389
	 * HPETs are a complete disaster. The compare register is
	 * based on a equal comparison and neither provides a less
	 * than or equal functionality (which would require to take
	 * the wraparound into account) nor a simple count down event
	 * mode. Further the write to the comparator register is
	 * delayed internally up to two HPET clock cycles in certain
390 391 392 393 394 395
	 * chipsets (ATI, ICH9,10). Some newer AMD chipsets have even
	 * longer delays. We worked around that by reading back the
	 * compare register, but that required another workaround for
	 * ICH9,10 chips where the first readout after write can
	 * return the old stale value. We already had a minimum
	 * programming delta of 5us enforced, but a NMI or SMI hitting
396 397 398 399
	 * between the counter readout and the comparator write can
	 * move us behind that point easily. Now instead of reading
	 * the compare register back several times, we make the ETIME
	 * decision based on the following: Return ETIME if the
400
	 * counter value after the write is less than HPET_MIN_CYCLES
401
	 * away from the event or if the counter is already ahead of
402 403
	 * the event. The minimum programming delta for the generic
	 * clockevents code is set to 1.5 * HPET_MIN_CYCLES.
404
	 */
405
	res = (s32)(cnt - hpet_readl(HPET_COUNTER));
406

407
	return res < HPET_MIN_CYCLES ? -ETIME : 0;
408 409
}

410 411 412 413 414 415 416 417 418 419 420 421
static void hpet_legacy_set_mode(enum clock_event_mode mode,
			struct clock_event_device *evt)
{
	hpet_set_mode(mode, evt, 0);
}

static int hpet_legacy_next_event(unsigned long delta,
			struct clock_event_device *evt)
{
	return hpet_next_event(delta, evt, 0);
}

422 423 424
/*
 * HPET MSI Support
 */
425
#ifdef CONFIG_PCI_MSI
V
Venki Pallipadi 已提交
426 427 428 429

static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev);
static struct hpet_dev	*hpet_devs;

430
void hpet_msi_unmask(struct irq_data *data)
431
{
432
	struct hpet_dev *hdev = data->handler_data;
J
Jan Beulich 已提交
433
	unsigned int cfg;
434 435 436

	/* unmask it */
	cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
437
	cfg |= HPET_TN_ENABLE | HPET_TN_FSB;
438 439 440
	hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
}

441
void hpet_msi_mask(struct irq_data *data)
442
{
443
	struct hpet_dev *hdev = data->handler_data;
J
Jan Beulich 已提交
444
	unsigned int cfg;
445 446 447

	/* mask it */
	cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
448
	cfg &= ~(HPET_TN_ENABLE | HPET_TN_FSB);
449 450 451
	hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
}

452
void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg)
453 454 455 456 457
{
	hpet_writel(msg->data, HPET_Tn_ROUTE(hdev->num));
	hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hdev->num) + 4);
}

458
void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg)
459 460 461 462 463 464
{
	msg->data = hpet_readl(HPET_Tn_ROUTE(hdev->num));
	msg->address_lo = hpet_readl(HPET_Tn_ROUTE(hdev->num) + 4);
	msg->address_hi = 0;
}

465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480
static void hpet_msi_set_mode(enum clock_event_mode mode,
				struct clock_event_device *evt)
{
	struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
	hpet_set_mode(mode, evt, hdev->num);
}

static int hpet_msi_next_event(unsigned long delta,
				struct clock_event_device *evt)
{
	struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
	return hpet_next_event(delta, evt, hdev->num);
}

static int hpet_setup_msi_irq(unsigned int irq)
{
481
	if (x86_msi.setup_hpet_msi(irq, hpet_blockid)) {
482
		irq_free_hwirq(irq);
483 484 485 486 487 488 489
		return -EINVAL;
	}
	return 0;
}

static int hpet_assign_irq(struct hpet_dev *dev)
{
490
	unsigned int irq = irq_alloc_hwirq(-1);
491 492 493 494

	if (!irq)
		return -EINVAL;

495
	irq_set_handler_data(irq, dev);
496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522

	if (hpet_setup_msi_irq(irq))
		return -EINVAL;

	dev->irq = irq;
	return 0;
}

static irqreturn_t hpet_interrupt_handler(int irq, void *data)
{
	struct hpet_dev *dev = (struct hpet_dev *)data;
	struct clock_event_device *hevt = &dev->evt;

	if (!hevt->event_handler) {
		printk(KERN_INFO "Spurious HPET timer interrupt on HPET timer %d\n",
				dev->num);
		return IRQ_HANDLED;
	}

	hevt->event_handler(hevt);
	return IRQ_HANDLED;
}

static int hpet_setup_irq(struct hpet_dev *dev)
{

	if (request_irq(dev->irq, hpet_interrupt_handler,
523
			IRQF_TIMER | IRQF_NOBALANCING,
524
			dev->name, dev))
525 526 527
		return -1;

	disable_irq(dev->irq);
528
	irq_set_affinity(dev->irq, cpumask_of(dev->cpu));
529 530
	enable_irq(dev->irq);

Y
Yinghai Lu 已提交
531 532 533
	printk(KERN_DEBUG "hpet: %s irq %d for MSI\n",
			 dev->name, dev->irq);

534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561
	return 0;
}

/* This should be called in specific @cpu */
static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu)
{
	struct clock_event_device *evt = &hdev->evt;

	WARN_ON(cpu != smp_processor_id());
	if (!(hdev->flags & HPET_DEV_VALID))
		return;

	if (hpet_setup_msi_irq(hdev->irq))
		return;

	hdev->cpu = cpu;
	per_cpu(cpu_hpet_dev, cpu) = hdev;
	evt->name = hdev->name;
	hpet_setup_irq(hdev);
	evt->irq = hdev->irq;

	evt->rating = 110;
	evt->features = CLOCK_EVT_FEAT_ONESHOT;
	if (hdev->flags & HPET_DEV_PERI_CAP)
		evt->features |= CLOCK_EVT_FEAT_PERIODIC;

	evt->set_mode = hpet_msi_set_mode;
	evt->set_next_event = hpet_msi_next_event;
562
	evt->cpumask = cpumask_of(hdev->cpu);
563 564 565

	clockevents_config_and_register(evt, hpet_freq, HPET_MIN_PROG_DELTA,
					0x7FFFFFFF);
566 567 568 569 570 571 572 573
}

#ifdef CONFIG_HPET
/* Reserve at least one timer for userspace (/dev/hpet) */
#define RESERVE_TIMERS 1
#else
#define RESERVE_TIMERS 0
#endif
V
Venki Pallipadi 已提交
574 575

static void hpet_msi_capability_lookup(unsigned int start_timer)
576 577 578 579 580 581
{
	unsigned int id;
	unsigned int num_timers;
	unsigned int num_timers_used = 0;
	int i;

582 583 584
	if (hpet_msi_disable)
		return;

585 586
	if (boot_cpu_has(X86_FEATURE_ARAT))
		return;
587 588 589 590
	id = hpet_readl(HPET_ID);

	num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
	num_timers++; /* Value read out starts from 0 */
591
	hpet_print_config();
592 593 594 595 596 597 598 599 600

	hpet_devs = kzalloc(sizeof(struct hpet_dev) * num_timers, GFP_KERNEL);
	if (!hpet_devs)
		return;

	hpet_num_timers = num_timers;

	for (i = start_timer; i < num_timers - RESERVE_TIMERS; i++) {
		struct hpet_dev *hdev = &hpet_devs[num_timers_used];
J
Jan Beulich 已提交
601
		unsigned int cfg = hpet_readl(HPET_Tn_CFG(i));
602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626

		/* Only consider HPET timer with MSI support */
		if (!(cfg & HPET_TN_FSB_CAP))
			continue;

		hdev->flags = 0;
		if (cfg & HPET_TN_PERIODIC_CAP)
			hdev->flags |= HPET_DEV_PERI_CAP;
		hdev->num = i;

		sprintf(hdev->name, "hpet%d", i);
		if (hpet_assign_irq(hdev))
			continue;

		hdev->flags |= HPET_DEV_FSB_CAP;
		hdev->flags |= HPET_DEV_VALID;
		num_timers_used++;
		if (num_timers_used == num_possible_cpus())
			break;
	}

	printk(KERN_INFO "HPET: %d timers in total, %d timers will be used for per-cpu timer\n",
		num_timers, num_timers_used);
}

V
Venki Pallipadi 已提交
627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646
#ifdef CONFIG_HPET
static void hpet_reserve_msi_timers(struct hpet_data *hd)
{
	int i;

	if (!hpet_devs)
		return;

	for (i = 0; i < hpet_num_timers; i++) {
		struct hpet_dev *hdev = &hpet_devs[i];

		if (!(hdev->flags & HPET_DEV_VALID))
			continue;

		hd->hd_irq[hdev->num] = hdev->irq;
		hpet_reserve_timer(hd, hdev->num);
	}
}
#endif

647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695
static struct hpet_dev *hpet_get_unused_timer(void)
{
	int i;

	if (!hpet_devs)
		return NULL;

	for (i = 0; i < hpet_num_timers; i++) {
		struct hpet_dev *hdev = &hpet_devs[i];

		if (!(hdev->flags & HPET_DEV_VALID))
			continue;
		if (test_and_set_bit(HPET_DEV_USED_BIT,
			(unsigned long *)&hdev->flags))
			continue;
		return hdev;
	}
	return NULL;
}

struct hpet_work_struct {
	struct delayed_work work;
	struct completion complete;
};

static void hpet_work(struct work_struct *w)
{
	struct hpet_dev *hdev;
	int cpu = smp_processor_id();
	struct hpet_work_struct *hpet_work;

	hpet_work = container_of(w, struct hpet_work_struct, work.work);

	hdev = hpet_get_unused_timer();
	if (hdev)
		init_one_hpet_msi_clockevent(hdev, cpu);

	complete(&hpet_work->complete);
}

static int hpet_cpuhp_notify(struct notifier_block *n,
		unsigned long action, void *hcpu)
{
	unsigned long cpu = (unsigned long)hcpu;
	struct hpet_work_struct work;
	struct hpet_dev *hdev = per_cpu(cpu_hpet_dev, cpu);

	switch (action & 0xf) {
	case CPU_ONLINE:
A
Andrew Morton 已提交
696
		INIT_DELAYED_WORK_ONSTACK(&work.work, hpet_work);
697 698 699 700
		init_completion(&work.complete);
		/* FIXME: add schedule_work_on() */
		schedule_delayed_work_on(cpu, &work.work, 0);
		wait_for_completion(&work.complete);
701
		destroy_delayed_work_on_stack(&work.work);
702 703 704 705 706 707 708 709 710 711 712 713 714
		break;
	case CPU_DEAD:
		if (hdev) {
			free_irq(hdev->irq, hdev);
			hdev->flags &= ~HPET_DEV_USED;
			per_cpu(cpu_hpet_dev, cpu) = NULL;
		}
		break;
	}
	return NOTIFY_OK;
}
#else

715 716 717 718
static int hpet_setup_msi_irq(unsigned int irq)
{
	return 0;
}
V
Venki Pallipadi 已提交
719 720 721 722 723 724 725
static void hpet_msi_capability_lookup(unsigned int start_timer)
{
	return;
}

#ifdef CONFIG_HPET
static void hpet_reserve_msi_timers(struct hpet_data *hd)
726 727 728
{
	return;
}
V
Venki Pallipadi 已提交
729
#endif
730 731 732 733 734 735 736 737 738

static int hpet_cpuhp_notify(struct notifier_block *n,
		unsigned long action, void *hcpu)
{
	return NOTIFY_OK;
}

#endif

739 740 741
/*
 * Clock source related code
 */
742
static cycle_t read_hpet(struct clocksource *cs)
743 744 745 746 747 748 749 750 751 752
{
	return (cycle_t)hpet_readl(HPET_COUNTER);
}

static struct clocksource clocksource_hpet = {
	.name		= "hpet",
	.rating		= 250,
	.read		= read_hpet,
	.mask		= HPET_MASK,
	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
753
	.resume		= hpet_resume_counter,
754
	.archdata	= { .vclock_mode = VCLOCK_HPET },
755 756
};

757
static int hpet_clocksource_register(void)
758
{
759
	u64 start, now;
760
	cycle_t t1;
761 762

	/* Start the counter */
763
	hpet_restart_counter();
764

765
	/* Verify whether hpet counter works */
766
	t1 = hpet_readl(HPET_COUNTER);
767 768 769 770 771 772 773 774 775 776 777 778 779
	rdtscll(start);

	/*
	 * We don't know the TSC frequency yet, but waiting for
	 * 200000 TSC cycles is safe:
	 * 4 GHz == 50us
	 * 1 GHz == 200us
	 */
	do {
		rep_nop();
		rdtscll(now);
	} while ((now - start) < 200000UL);

780
	if (t1 == hpet_readl(HPET_COUNTER)) {
781 782
		printk(KERN_WARNING
		       "HPET counter not counting. HPET disabled\n");
783
		return -ENODEV;
784 785
	}

786
	clocksource_register_hz(&clocksource_hpet, (u32)hpet_freq);
787 788 789
	return 0;
}

790 791
static u32 *hpet_boot_cfg;

P
Pavel Machek 已提交
792 793
/**
 * hpet_enable - Try to setup the HPET timer. Returns 1 on success.
794 795 796
 */
int __init hpet_enable(void)
{
797
	u32 hpet_period, cfg, id;
798
	u64 freq;
799
	unsigned int i, last;
800 801 802 803 804 805 806 807 808 809

	if (!is_hpet_capable())
		return 0;

	hpet_set_mapping();

	/*
	 * Read the period and check for a sane value:
	 */
	hpet_period = hpet_readl(HPET_PERIOD);
810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832

	/*
	 * AMD SB700 based systems with spread spectrum enabled use a
	 * SMM based HPET emulation to provide proper frequency
	 * setting. The SMM code is initialized with the first HPET
	 * register access and takes some time to complete. During
	 * this time the config register reads 0xffffffff. We check
	 * for max. 1000 loops whether the config register reads a non
	 * 0xffffffff value to make sure that HPET is up and running
	 * before we go further. A counting loop is safe, as the HPET
	 * access takes thousands of CPU cycles. On non SB700 based
	 * machines this check is only done once and has no side
	 * effects.
	 */
	for (i = 0; hpet_readl(HPET_CFG) == 0xFFFFFFFF; i++) {
		if (i == 1000) {
			printk(KERN_WARNING
			       "HPET config register value = 0xFFFFFFFF. "
			       "Disabling HPET\n");
			goto out_nohpet;
		}
	}

833 834 835
	if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD)
		goto out_nohpet;

836 837 838 839 840 841 842 843
	/*
	 * The period is a femto seconds value. Convert it to a
	 * frequency.
	 */
	freq = FSEC_PER_SEC;
	do_div(freq, hpet_period);
	hpet_freq = freq;

844 845 846 847 848
	/*
	 * Read the HPET ID register to retrieve the IRQ routing
	 * information and the number of channels
	 */
	id = hpet_readl(HPET_ID);
849
	hpet_print_config();
850

851 852
	last = (id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT;

853 854 855 856 857
#ifdef CONFIG_HPET_EMULATE_RTC
	/*
	 * The legacy routing mode needs at least two channels, tick timer
	 * and the rtc emulation channel.
	 */
858
	if (!last)
859 860 861
		goto out_nohpet;
#endif

862 863 864 865 866 867 868 869
	cfg = hpet_readl(HPET_CFG);
	hpet_boot_cfg = kmalloc((last + 2) * sizeof(*hpet_boot_cfg),
				GFP_KERNEL);
	if (hpet_boot_cfg)
		*hpet_boot_cfg = cfg;
	else
		pr_warn("HPET initial state will not be saved\n");
	cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
870
	hpet_writel(cfg, HPET_CFG);
871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889
	if (cfg)
		pr_warn("HPET: Unrecognized bits %#x set in global cfg\n",
			cfg);

	for (i = 0; i <= last; ++i) {
		cfg = hpet_readl(HPET_Tn_CFG(i));
		if (hpet_boot_cfg)
			hpet_boot_cfg[i + 1] = cfg;
		cfg &= ~(HPET_TN_ENABLE | HPET_TN_LEVEL | HPET_TN_FSB);
		hpet_writel(cfg, HPET_Tn_CFG(i));
		cfg &= ~(HPET_TN_PERIODIC | HPET_TN_PERIODIC_CAP
			 | HPET_TN_64BIT_CAP | HPET_TN_32BIT | HPET_TN_ROUTE
			 | HPET_TN_FSB | HPET_TN_FSB_CAP);
		if (cfg)
			pr_warn("HPET: Unrecognized bits %#x set in cfg#%u\n",
				cfg, i);
	}
	hpet_print_config();

890 891 892
	if (hpet_clocksource_register())
		goto out_nohpet;

893
	if (id & HPET_ID_LEGSUP) {
894
		hpet_legacy_clockevent_register();
895 896 897
		return 1;
	}
	return 0;
898

899
out_nohpet:
900
	hpet_clear_mapping();
J
Janne Kulmala 已提交
901
	hpet_address = 0;
902 903 904
	return 0;
}

905 906 907 908 909 910 911 912
/*
 * Needs to be late, as the reserve_timer code calls kalloc !
 *
 * Not a problem on i386 as hpet_enable is called from late_time_init,
 * but on x86_64 it is necessary !
 */
static __init int hpet_late_init(void)
{
913 914
	int cpu;

915
	if (boot_hpet_disable)
916 917
		return -ENODEV;

918 919 920 921 922 923 924 925
	if (!hpet_address) {
		if (!force_hpet_address)
			return -ENODEV;

		hpet_address = force_hpet_address;
		hpet_enable();
	}

926 927 928
	if (!hpet_virt_address)
		return -ENODEV;

929 930 931 932 933
	if (hpet_readl(HPET_ID) & HPET_ID_LEGSUP)
		hpet_msi_capability_lookup(2);
	else
		hpet_msi_capability_lookup(0);

934
	hpet_reserve_platform_timers(hpet_readl(HPET_ID));
935
	hpet_print_config();
936

937 938 939
	if (hpet_msi_disable)
		return 0;

940 941 942
	if (boot_cpu_has(X86_FEATURE_ARAT))
		return 0;

943
	cpu_notifier_register_begin();
944 945 946 947 948
	for_each_online_cpu(cpu) {
		hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu);
	}

	/* This notifier should be called after workqueue is ready */
949 950
	__hotcpu_notifier(hpet_cpuhp_notify, -20);
	cpu_notifier_register_done();
951

952 953 954 955
	return 0;
}
fs_initcall(hpet_late_init);

O
OGAWA Hirofumi 已提交
956 957
void hpet_disable(void)
{
958
	if (is_hpet_capable() && hpet_virt_address) {
959
		unsigned int cfg = hpet_readl(HPET_CFG), id, last;
O
OGAWA Hirofumi 已提交
960

961 962 963
		if (hpet_boot_cfg)
			cfg = *hpet_boot_cfg;
		else if (hpet_legacy_int_enabled) {
O
OGAWA Hirofumi 已提交
964 965 966 967 968
			cfg &= ~HPET_CFG_LEGACY;
			hpet_legacy_int_enabled = 0;
		}
		cfg &= ~HPET_CFG_ENABLE;
		hpet_writel(cfg, HPET_CFG);
969 970 971 972 973 974 975 976 977 978 979 980

		if (!hpet_boot_cfg)
			return;

		id = hpet_readl(HPET_ID);
		last = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);

		for (id = 0; id <= last; ++id)
			hpet_writel(hpet_boot_cfg[id + 1], HPET_Tn_CFG(id));

		if (*hpet_boot_cfg & HPET_CFG_ENABLE)
			hpet_writel(*hpet_boot_cfg, HPET_CFG);
O
OGAWA Hirofumi 已提交
981 982 983
	}
}

984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001
#ifdef CONFIG_HPET_EMULATE_RTC

/* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET
 * is enabled, we support RTC interrupt functionality in software.
 * RTC has 3 kinds of interrupts:
 * 1) Update Interrupt - generate an interrupt, every sec, when RTC clock
 *    is updated
 * 2) Alarm Interrupt - generate an interrupt at a specific time of day
 * 3) Periodic Interrupt - generate periodic interrupt, with frequencies
 *    2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2)
 * (1) and (2) above are implemented using polling at a frequency of
 * 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt
 * overhead. (DEFAULT_RTC_INT_FREQ)
 * For (3), we use interrupts at 64Hz or user specified periodic
 * frequency, whichever is higher.
 */
#include <linux/mc146818rtc.h>
#include <linux/rtc.h>
1002
#include <asm/rtc.h>
1003 1004 1005 1006 1007 1008

#define DEFAULT_RTC_INT_FREQ	64
#define DEFAULT_RTC_SHIFT	6
#define RTC_NUM_INTS		1

static unsigned long hpet_rtc_flags;
D
David Brownell 已提交
1009
static int hpet_prev_update_sec;
1010 1011
static struct rtc_time hpet_alarm_time;
static unsigned long hpet_pie_count;
1012
static u32 hpet_t1_cmp;
J
Jan Beulich 已提交
1013 1014
static u32 hpet_default_delta;
static u32 hpet_pie_delta;
1015 1016
static unsigned long hpet_pie_limit;

1017 1018
static rtc_irq_handler irq_handler;

1019 1020 1021 1022 1023 1024 1025 1026
/*
 * Check that the hpet counter c1 is ahead of the c2
 */
static inline int hpet_cnt_ahead(u32 c1, u32 c2)
{
	return (s32)(c2 - c1) < 0;
}

1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
/*
 * Registers a IRQ handler.
 */
int hpet_register_irq_handler(rtc_irq_handler handler)
{
	if (!is_hpet_enabled())
		return -ENODEV;
	if (irq_handler)
		return -EBUSY;

	irq_handler = handler;

	return 0;
}
EXPORT_SYMBOL_GPL(hpet_register_irq_handler);

/*
 * Deregisters the IRQ handler registered with hpet_register_irq_handler()
 * and does cleanup.
 */
void hpet_unregister_irq_handler(rtc_irq_handler handler)
{
	if (!is_hpet_enabled())
		return;

	irq_handler = NULL;
	hpet_rtc_flags = 0;
}
EXPORT_SYMBOL_GPL(hpet_unregister_irq_handler);

1057 1058 1059 1060 1061 1062 1063 1064
/*
 * Timer 1 for RTC emulation. We use one shot mode, as periodic mode
 * is not supported by all HPET implementations for timer 1.
 *
 * hpet_rtc_timer_init() is called when the rtc is initialized.
 */
int hpet_rtc_timer_init(void)
{
J
Jan Beulich 已提交
1065 1066
	unsigned int cfg, cnt, delta;
	unsigned long flags;
1067 1068 1069 1070 1071 1072 1073 1074 1075

	if (!is_hpet_enabled())
		return 0;

	if (!hpet_default_delta) {
		uint64_t clc;

		clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC;
		clc >>= hpet_clockevent.shift + DEFAULT_RTC_SHIFT;
J
Jan Beulich 已提交
1076
		hpet_default_delta = clc;
1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098
	}

	if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
		delta = hpet_default_delta;
	else
		delta = hpet_pie_delta;

	local_irq_save(flags);

	cnt = delta + hpet_readl(HPET_COUNTER);
	hpet_writel(cnt, HPET_T1_CMP);
	hpet_t1_cmp = cnt;

	cfg = hpet_readl(HPET_T1_CFG);
	cfg &= ~HPET_TN_PERIODIC;
	cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
	hpet_writel(cfg, HPET_T1_CFG);

	local_irq_restore(flags);

	return 1;
}
1099
EXPORT_SYMBOL_GPL(hpet_rtc_timer_init);
1100

1101 1102 1103 1104 1105 1106 1107 1108
static void hpet_disable_rtc_channel(void)
{
	unsigned long cfg;
	cfg = hpet_readl(HPET_T1_CFG);
	cfg &= ~HPET_TN_ENABLE;
	hpet_writel(cfg, HPET_T1_CFG);
}

1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119
/*
 * The functions below are called from rtc driver.
 * Return 0 if HPET is not being used.
 * Otherwise do the necessary changes and return 1.
 */
int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
{
	if (!is_hpet_enabled())
		return 0;

	hpet_rtc_flags &= ~bit_mask;
1120 1121 1122
	if (unlikely(!hpet_rtc_flags))
		hpet_disable_rtc_channel();

1123 1124
	return 1;
}
1125
EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit);
1126 1127 1128 1129 1130 1131 1132 1133 1134 1135

int hpet_set_rtc_irq_bit(unsigned long bit_mask)
{
	unsigned long oldbits = hpet_rtc_flags;

	if (!is_hpet_enabled())
		return 0;

	hpet_rtc_flags |= bit_mask;

D
David Brownell 已提交
1136 1137 1138
	if ((bit_mask & RTC_UIE) && !(oldbits & RTC_UIE))
		hpet_prev_update_sec = -1;

1139 1140 1141 1142 1143
	if (!oldbits)
		hpet_rtc_timer_init();

	return 1;
}
1144
EXPORT_SYMBOL_GPL(hpet_set_rtc_irq_bit);
1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157

int hpet_set_alarm_time(unsigned char hrs, unsigned char min,
			unsigned char sec)
{
	if (!is_hpet_enabled())
		return 0;

	hpet_alarm_time.tm_hour = hrs;
	hpet_alarm_time.tm_min = min;
	hpet_alarm_time.tm_sec = sec;

	return 1;
}
1158
EXPORT_SYMBOL_GPL(hpet_set_alarm_time);
1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172

int hpet_set_periodic_freq(unsigned long freq)
{
	uint64_t clc;

	if (!is_hpet_enabled())
		return 0;

	if (freq <= DEFAULT_RTC_INT_FREQ)
		hpet_pie_limit = DEFAULT_RTC_INT_FREQ / freq;
	else {
		clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC;
		do_div(clc, freq);
		clc >>= hpet_clockevent.shift;
J
Jan Beulich 已提交
1173
		hpet_pie_delta = clc;
1174
		hpet_pie_limit = 0;
1175 1176 1177
	}
	return 1;
}
1178
EXPORT_SYMBOL_GPL(hpet_set_periodic_freq);
1179 1180 1181 1182 1183

int hpet_rtc_dropped_irq(void)
{
	return is_hpet_enabled();
}
1184
EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq);
1185 1186 1187

static void hpet_rtc_timer_reinit(void)
{
1188
	unsigned int delta;
1189 1190
	int lost_ints = -1;

1191 1192
	if (unlikely(!hpet_rtc_flags))
		hpet_disable_rtc_channel();
1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206

	if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
		delta = hpet_default_delta;
	else
		delta = hpet_pie_delta;

	/*
	 * Increment the comparator value until we are ahead of the
	 * current count.
	 */
	do {
		hpet_t1_cmp += delta;
		hpet_writel(hpet_t1_cmp, HPET_T1_CMP);
		lost_ints++;
1207
	} while (!hpet_cnt_ahead(hpet_t1_cmp, hpet_readl(HPET_COUNTER)));
1208 1209 1210 1211 1212

	if (lost_ints) {
		if (hpet_rtc_flags & RTC_PIE)
			hpet_pie_count += lost_ints;
		if (printk_ratelimit())
D
David Brownell 已提交
1213
			printk(KERN_WARNING "hpet1: lost %d rtc interrupts\n",
1214 1215 1216 1217 1218 1219 1220 1221 1222 1223
				lost_ints);
	}
}

irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
{
	struct rtc_time curr_time;
	unsigned long rtc_int_flag = 0;

	hpet_rtc_timer_reinit();
1224
	memset(&curr_time, 0, sizeof(struct rtc_time));
1225 1226

	if (hpet_rtc_flags & (RTC_UIE | RTC_AIE))
1227
		get_rtc_time(&curr_time);
1228 1229 1230

	if (hpet_rtc_flags & RTC_UIE &&
	    curr_time.tm_sec != hpet_prev_update_sec) {
D
David Brownell 已提交
1231 1232
		if (hpet_prev_update_sec >= 0)
			rtc_int_flag = RTC_UF;
1233 1234 1235 1236 1237 1238 1239 1240 1241
		hpet_prev_update_sec = curr_time.tm_sec;
	}

	if (hpet_rtc_flags & RTC_PIE &&
	    ++hpet_pie_count >= hpet_pie_limit) {
		rtc_int_flag |= RTC_PF;
		hpet_pie_count = 0;
	}

1242
	if (hpet_rtc_flags & RTC_AIE &&
1243 1244 1245 1246 1247 1248 1249
	    (curr_time.tm_sec == hpet_alarm_time.tm_sec) &&
	    (curr_time.tm_min == hpet_alarm_time.tm_min) &&
	    (curr_time.tm_hour == hpet_alarm_time.tm_hour))
			rtc_int_flag |= RTC_AF;

	if (rtc_int_flag) {
		rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8));
1250 1251
		if (irq_handler)
			irq_handler(rtc_int_flag, dev_id);
1252 1253 1254
	}
	return IRQ_HANDLED;
}
1255
EXPORT_SYMBOL_GPL(hpet_rtc_interrupt);
1256
#endif