acpi_lpss.c 25.4 KB
Newer Older
1 2 3
/*
 * ACPI support for Intel Lynxpoint LPSS.
 *
4
 * Copyright (C) 2013, Intel Corporation
5 6 7 8 9 10 11 12 13 14 15 16 17
 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
 *          Rafael J. Wysocki <rafael.j.wysocki@intel.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/acpi.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/io.h>
18
#include <linux/mutex.h>
19 20
#include <linux/platform_device.h>
#include <linux/platform_data/clk-lpss.h>
21
#include <linux/platform_data/x86/pmc_atom.h>
22
#include <linux/pm_domain.h>
23
#include <linux/pm_runtime.h>
24
#include <linux/pwm.h>
25
#include <linux/delay.h>
26 27 28 29 30

#include "internal.h"

ACPI_MODULE_NAME("acpi_lpss");

31 32
#ifdef CONFIG_X86_INTEL_LPSS

33
#include <asm/cpu_device_id.h>
34
#include <asm/intel-family.h>
35 36
#include <asm/iosf_mbi.h>

37 38
#define LPSS_ADDR(desc) ((unsigned long)&desc)

39
#define LPSS_CLK_SIZE	0x04
40 41 42
#define LPSS_LTR_SIZE	0x18

/* Offsets relative to LPSS_PRIVATE_OFFSET */
43
#define LPSS_CLK_DIVIDER_DEF_MASK	(BIT(1) | BIT(16))
44 45 46
#define LPSS_RESETS			0x04
#define LPSS_RESETS_RESET_FUNC		BIT(0)
#define LPSS_RESETS_RESET_APB		BIT(1)
47 48
#define LPSS_GENERAL			0x08
#define LPSS_GENERAL_LTR_MODE_SW	BIT(2)
49
#define LPSS_GENERAL_UART_RTS_OVRD	BIT(3)
50 51
#define LPSS_SW_LTR			0x10
#define LPSS_AUTO_LTR			0x14
52 53 54 55 56 57 58
#define LPSS_LTR_SNOOP_REQ		BIT(15)
#define LPSS_LTR_SNOOP_MASK		0x0000FFFF
#define LPSS_LTR_SNOOP_LAT_1US		0x800
#define LPSS_LTR_SNOOP_LAT_32US		0xC00
#define LPSS_LTR_SNOOP_LAT_SHIFT	5
#define LPSS_LTR_SNOOP_LAT_CUTOFF	3000
#define LPSS_LTR_MAX_VAL		0x3FF
59 60
#define LPSS_TX_INT			0x20
#define LPSS_TX_INT_MASK		BIT(1)
61

62 63
#define LPSS_PRV_REG_COUNT		9

H
Heikki Krogerus 已提交
64 65 66 67 68 69
/* LPSS Flags */
#define LPSS_CLK			BIT(0)
#define LPSS_CLK_GATE			BIT(1)
#define LPSS_CLK_DIVIDER		BIT(2)
#define LPSS_LTR			BIT(3)
#define LPSS_SAVE_CTX			BIT(4)
70
#define LPSS_NO_D3_DELAY		BIT(5)
71

72
struct lpss_private_data;
73 74

struct lpss_device_desc {
H
Heikki Krogerus 已提交
75
	unsigned int flags;
76
	const char *clk_con_id;
77
	unsigned int prv_offset;
78
	size_t prv_size_override;
79
	struct property_entry *properties;
80
	void (*setup)(struct lpss_private_data *pdata);
81 82
};

83
static const struct lpss_device_desc lpss_dma_desc = {
84
	.flags = LPSS_CLK,
85 86
};

87 88 89
struct lpss_private_data {
	void __iomem *mmio_base;
	resource_size_t mmio_size;
90
	unsigned int fixed_clk_rate;
91 92
	struct clk *clk;
	const struct lpss_device_desc *dev_desc;
93
	u32 prv_reg_ctx[LPSS_PRV_REG_COUNT];
94 95
};

96 97 98 99 100 101
/* LPSS run time quirks */
static unsigned int lpss_quirks;

/*
 * LPSS_QUIRK_ALWAYS_POWER_ON: override power state for LPSS DMA device.
 *
102
 * The LPSS DMA controller has neither _PS0 nor _PS3 method. Moreover
103 104 105 106 107 108 109 110 111 112
 * it can be powered off automatically whenever the last LPSS device goes down.
 * In case of no power any access to the DMA controller will hang the system.
 * The behaviour is reproduced on some HP laptops based on Intel BayTrail as
 * well as on ASuS T100TA transformer.
 *
 * This quirk overrides power state of entire LPSS island to keep DMA powered
 * on whenever we have at least one other device in use.
 */
#define LPSS_QUIRK_ALWAYS_POWER_ON	BIT(0)

113 114 115 116
/* UART Component Parameter Register */
#define LPSS_UART_CPR			0xF4
#define LPSS_UART_CPR_AFCE		BIT(4)

117 118
static void lpss_uart_setup(struct lpss_private_data *pdata)
{
119
	unsigned int offset;
120
	u32 val;
121

122
	offset = pdata->dev_desc->prv_offset + LPSS_TX_INT;
123 124 125 126 127 128 129 130 131 132
	val = readl(pdata->mmio_base + offset);
	writel(val | LPSS_TX_INT_MASK, pdata->mmio_base + offset);

	val = readl(pdata->mmio_base + LPSS_UART_CPR);
	if (!(val & LPSS_UART_CPR_AFCE)) {
		offset = pdata->dev_desc->prv_offset + LPSS_GENERAL;
		val = readl(pdata->mmio_base + offset);
		val |= LPSS_GENERAL_UART_RTS_OVRD;
		writel(val, pdata->mmio_base + offset);
	}
133 134
}

135
static void lpss_deassert_reset(struct lpss_private_data *pdata)
136 137 138 139 140 141 142 143
{
	unsigned int offset;
	u32 val;

	offset = pdata->dev_desc->prv_offset + LPSS_RESETS;
	val = readl(pdata->mmio_base + offset);
	val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC;
	writel(val, pdata->mmio_base + offset);
144 145 146 147 148 149 150
}

#define LPSS_I2C_ENABLE			0x6c

static void byt_i2c_setup(struct lpss_private_data *pdata)
{
	lpss_deassert_reset(pdata);
151

152 153
	if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset))
		pdata->fixed_clk_rate = 133000000;
154 155

	writel(0, pdata->mmio_base + LPSS_I2C_ENABLE);
156
}
157

158 159 160 161 162 163 164 165 166 167 168 169
/* BSW PWM used for backlight control by the i915 driver */
static struct pwm_lookup bsw_pwm_lookup[] = {
	PWM_LOOKUP_WITH_MODULE("80862288:00", 0, "0000:00:02.0",
			       "pwm_backlight", 0, PWM_POLARITY_NORMAL,
			       "pwm-lpss-platform"),
};

static void bsw_pwm_setup(struct lpss_private_data *pdata)
{
	pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup));
}

170
static const struct lpss_device_desc lpt_dev_desc = {
H
Heikki Krogerus 已提交
171
	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
172 173 174
	.prv_offset = 0x800,
};

175
static const struct lpss_device_desc lpt_i2c_dev_desc = {
H
Heikki Krogerus 已提交
176
	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR,
177 178 179
	.prv_offset = 0x800,
};

180 181 182 183 184 185 186
static struct property_entry uart_properties[] = {
	PROPERTY_ENTRY_U32("reg-io-width", 4),
	PROPERTY_ENTRY_U32("reg-shift", 2),
	PROPERTY_ENTRY_BOOL("snps,uart-16550-compatible"),
	{ },
};

187
static const struct lpss_device_desc lpt_uart_dev_desc = {
H
Heikki Krogerus 已提交
188
	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
189
	.clk_con_id = "baudclk",
190 191
	.prv_offset = 0x800,
	.setup = lpss_uart_setup,
192
	.properties = uart_properties,
193 194
};

195
static const struct lpss_device_desc lpt_sdio_dev_desc = {
H
Heikki Krogerus 已提交
196
	.flags = LPSS_LTR,
197
	.prv_offset = 0x1000,
198
	.prv_size_override = 0x1018,
199 200
};

201
static const struct lpss_device_desc byt_pwm_dev_desc = {
202
	.flags = LPSS_SAVE_CTX,
203 204
};

205 206
static const struct lpss_device_desc bsw_pwm_dev_desc = {
	.flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
207
	.setup = bsw_pwm_setup,
208 209
};

210
static const struct lpss_device_desc byt_uart_dev_desc = {
211
	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
212
	.clk_con_id = "baudclk",
213
	.prv_offset = 0x800,
214
	.setup = lpss_uart_setup,
215
	.properties = uart_properties,
216 217
};

218 219 220 221 222 223
static const struct lpss_device_desc bsw_uart_dev_desc = {
	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
			| LPSS_NO_D3_DELAY,
	.clk_con_id = "baudclk",
	.prv_offset = 0x800,
	.setup = lpss_uart_setup,
224
	.properties = uart_properties,
225 226
};

227
static const struct lpss_device_desc byt_spi_dev_desc = {
228
	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
229 230 231
	.prv_offset = 0x400,
};

232
static const struct lpss_device_desc byt_sdio_dev_desc = {
233
	.flags = LPSS_CLK,
234 235
};

236
static const struct lpss_device_desc byt_i2c_dev_desc = {
237
	.flags = LPSS_CLK | LPSS_SAVE_CTX,
238
	.prv_offset = 0x800,
239
	.setup = byt_i2c_setup,
240 241
};

242 243 244 245 246 247
static const struct lpss_device_desc bsw_i2c_dev_desc = {
	.flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
	.prv_offset = 0x800,
	.setup = byt_i2c_setup,
};

248
static const struct lpss_device_desc bsw_spi_dev_desc = {
249 250
	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
			| LPSS_NO_D3_DELAY,
251 252 253 254
	.prv_offset = 0x400,
	.setup = lpss_deassert_reset,
};

255 256 257
#define ICPU(model)	{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }

static const struct x86_cpu_id lpss_cpu_ids[] = {
258 259
	ICPU(INTEL_FAM6_ATOM_SILVERMONT1),	/* Valleyview, Bay Trail */
	ICPU(INTEL_FAM6_ATOM_AIRMONT),	/* Braswell, Cherry Trail */
260 261 262
	{}
};

263 264 265 266 267 268
#else

#define LPSS_ADDR(desc) (0UL)

#endif /* CONFIG_X86_INTEL_LPSS */

269
static const struct acpi_device_id acpi_lpss_device_ids[] = {
270
	/* Generic LPSS devices */
271
	{ "INTL9C60", LPSS_ADDR(lpss_dma_desc) },
272

273
	/* Lynxpoint LPSS devices */
274 275 276 277 278 279 280
	{ "INT33C0", LPSS_ADDR(lpt_dev_desc) },
	{ "INT33C1", LPSS_ADDR(lpt_dev_desc) },
	{ "INT33C2", LPSS_ADDR(lpt_i2c_dev_desc) },
	{ "INT33C3", LPSS_ADDR(lpt_i2c_dev_desc) },
	{ "INT33C4", LPSS_ADDR(lpt_uart_dev_desc) },
	{ "INT33C5", LPSS_ADDR(lpt_uart_dev_desc) },
	{ "INT33C6", LPSS_ADDR(lpt_sdio_dev_desc) },
281 282
	{ "INT33C7", },

283
	/* BayTrail LPSS devices */
284 285 286 287 288
	{ "80860F09", LPSS_ADDR(byt_pwm_dev_desc) },
	{ "80860F0A", LPSS_ADDR(byt_uart_dev_desc) },
	{ "80860F0E", LPSS_ADDR(byt_spi_dev_desc) },
	{ "80860F14", LPSS_ADDR(byt_sdio_dev_desc) },
	{ "80860F41", LPSS_ADDR(byt_i2c_dev_desc) },
289
	{ "INT33B2", },
290
	{ "INT33FC", },
291

292
	/* Braswell LPSS devices */
293 294
	{ "80862288", LPSS_ADDR(bsw_pwm_dev_desc) },
	{ "8086228A", LPSS_ADDR(bsw_uart_dev_desc) },
295
	{ "8086228E", LPSS_ADDR(bsw_spi_dev_desc) },
296
	{ "808622C1", LPSS_ADDR(bsw_i2c_dev_desc) },
297

298
	/* Broadwell LPSS devices */
299 300 301 302 303 304 305
	{ "INT3430", LPSS_ADDR(lpt_dev_desc) },
	{ "INT3431", LPSS_ADDR(lpt_dev_desc) },
	{ "INT3432", LPSS_ADDR(lpt_i2c_dev_desc) },
	{ "INT3433", LPSS_ADDR(lpt_i2c_dev_desc) },
	{ "INT3434", LPSS_ADDR(lpt_uart_dev_desc) },
	{ "INT3435", LPSS_ADDR(lpt_uart_dev_desc) },
	{ "INT3436", LPSS_ADDR(lpt_sdio_dev_desc) },
306 307
	{ "INT3437", },

H
Heikki Krogerus 已提交
308 309
	/* Wildcat Point LPSS devices */
	{ "INT3438", LPSS_ADDR(lpt_dev_desc) },
310

311 312 313
	{ }
};

314 315
#ifdef CONFIG_X86_INTEL_LPSS

316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333
static int is_memory(struct acpi_resource *res, void *not_used)
{
	struct resource r;
	return !acpi_dev_resource_memory(res, &r);
}

/* LPSS main clock device. */
static struct platform_device *lpss_clk_dev;

static inline void lpt_register_clock_device(void)
{
	lpss_clk_dev = platform_device_register_simple("clk-lpt", -1, NULL, 0);
}

static int register_device_clock(struct acpi_device *adev,
				 struct lpss_private_data *pdata)
{
	const struct lpss_device_desc *dev_desc = pdata->dev_desc;
334
	const char *devname = dev_name(&adev->dev);
335
	struct clk *clk = ERR_PTR(-ENODEV);
336
	struct lpss_clk_data *clk_data;
337 338
	const char *parent, *clk_name;
	void __iomem *prv_base;
339 340 341 342

	if (!lpss_clk_dev)
		lpt_register_clock_device();

343 344 345
	clk_data = platform_get_drvdata(lpss_clk_dev);
	if (!clk_data)
		return -ENODEV;
346
	clk = clk_data->clk;
347 348

	if (!pdata->mmio_base
349
	    || pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE)
350 351
		return -ENODATA;

352
	parent = clk_data->name;
353
	prv_base = pdata->mmio_base + dev_desc->prv_offset;
354

355 356 357 358
	if (pdata->fixed_clk_rate) {
		clk = clk_register_fixed_rate(NULL, devname, parent, 0,
					      pdata->fixed_clk_rate);
		goto out;
359 360
	}

H
Heikki Krogerus 已提交
361
	if (dev_desc->flags & LPSS_CLK_GATE) {
362 363 364 365 366
		clk = clk_register_gate(NULL, devname, parent, 0,
					prv_base, 0, 0, NULL);
		parent = devname;
	}

H
Heikki Krogerus 已提交
367
	if (dev_desc->flags & LPSS_CLK_DIVIDER) {
368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
		/* Prevent division by zero */
		if (!readl(prv_base))
			writel(LPSS_CLK_DIVIDER_DEF_MASK, prv_base);

		clk_name = kasprintf(GFP_KERNEL, "%s-div", devname);
		if (!clk_name)
			return -ENOMEM;
		clk = clk_register_fractional_divider(NULL, clk_name, parent,
						      0, prv_base,
						      1, 15, 16, 15, 0, NULL);
		parent = clk_name;

		clk_name = kasprintf(GFP_KERNEL, "%s-update", devname);
		if (!clk_name) {
			kfree(parent);
			return -ENOMEM;
		}
		clk = clk_register_gate(NULL, clk_name, parent,
					CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
					prv_base, 31, 0, NULL);
		kfree(parent);
		kfree(clk_name);
390
	}
391
out:
392 393
	if (IS_ERR(clk))
		return PTR_ERR(clk);
394

395
	pdata->clk = clk;
396
	clk_register_clkdev(clk, dev_desc->clk_con_id, devname);
397 398 399 400 401 402
	return 0;
}

static int acpi_lpss_create_device(struct acpi_device *adev,
				   const struct acpi_device_id *id)
{
403
	const struct lpss_device_desc *dev_desc;
404
	struct lpss_private_data *pdata;
405
	struct resource_entry *rentry;
406
	struct list_head resource_list;
407
	struct platform_device *pdev;
408 409
	int ret;

410
	dev_desc = (const struct lpss_device_desc *)id->driver_data;
411
	if (!dev_desc) {
412
		pdev = acpi_create_platform_device(adev, NULL);
413 414
		return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1;
	}
415 416 417 418 419 420 421 422 423 424
	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
	if (!pdata)
		return -ENOMEM;

	INIT_LIST_HEAD(&resource_list);
	ret = acpi_dev_get_resources(adev, &resource_list, is_memory, NULL);
	if (ret < 0)
		goto err_out;

	list_for_each_entry(rentry, &resource_list, node)
425
		if (resource_type(rentry->res) == IORESOURCE_MEM) {
426 427 428
			if (dev_desc->prv_size_override)
				pdata->mmio_size = dev_desc->prv_size_override;
			else
429 430
				pdata->mmio_size = resource_size(rentry->res);
			pdata->mmio_base = ioremap(rentry->res->start,
431 432 433 434 435 436
						   pdata->mmio_size);
			break;
		}

	acpi_dev_free_resource_list(&resource_list);

437 438 439 440 441
	if (!pdata->mmio_base) {
		ret = -ENOMEM;
		goto err_out;
	}

442 443
	pdata->dev_desc = dev_desc;

444 445 446
	if (dev_desc->setup)
		dev_desc->setup(pdata);

H
Heikki Krogerus 已提交
447
	if (dev_desc->flags & LPSS_CLK) {
448 449
		ret = register_device_clock(adev, pdata);
		if (ret) {
450 451 452
			/* Skip the device, but continue the namespace scan. */
			ret = 0;
			goto err_out;
453 454 455
		}
	}

456 457 458 459 460 461 462 463 464 465 466 467
	/*
	 * This works around a known issue in ACPI tables where LPSS devices
	 * have _PS0 and _PS3 without _PSC (and no power resources), so
	 * acpi_bus_init_power() will assume that the BIOS has put them into D0.
	 */
	ret = acpi_device_fix_up_power(adev);
	if (ret) {
		/* Skip the device, but continue the namespace scan. */
		ret = 0;
		goto err_out;
	}

468
	adev->driver_data = pdata;
469
	pdev = acpi_create_platform_device(adev, dev_desc->properties);
470 471 472
	if (!IS_ERR_OR_NULL(pdev)) {
		return 1;
	}
473

474
	ret = PTR_ERR(pdev);
475 476 477 478 479 480 481
	adev->driver_data = NULL;

 err_out:
	kfree(pdata);
	return ret;
}

482 483 484 485 486 487 488 489 490 491 492
static u32 __lpss_reg_read(struct lpss_private_data *pdata, unsigned int reg)
{
	return readl(pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
}

static void __lpss_reg_write(u32 val, struct lpss_private_data *pdata,
			     unsigned int reg)
{
	writel(val, pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
}

493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513
static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val)
{
	struct acpi_device *adev;
	struct lpss_private_data *pdata;
	unsigned long flags;
	int ret;

	ret = acpi_bus_get_device(ACPI_HANDLE(dev), &adev);
	if (WARN_ON(ret))
		return ret;

	spin_lock_irqsave(&dev->power.lock, flags);
	if (pm_runtime_suspended(dev)) {
		ret = -EAGAIN;
		goto out;
	}
	pdata = acpi_driver_data(adev);
	if (WARN_ON(!pdata || !pdata->mmio_base)) {
		ret = -ENODEV;
		goto out;
	}
514
	*val = __lpss_reg_read(pdata, reg);
515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566

 out:
	spin_unlock_irqrestore(&dev->power.lock, flags);
	return ret;
}

static ssize_t lpss_ltr_show(struct device *dev, struct device_attribute *attr,
			     char *buf)
{
	u32 ltr_value = 0;
	unsigned int reg;
	int ret;

	reg = strcmp(attr->attr.name, "auto_ltr") ? LPSS_SW_LTR : LPSS_AUTO_LTR;
	ret = lpss_reg_read(dev, reg, &ltr_value);
	if (ret)
		return ret;

	return snprintf(buf, PAGE_SIZE, "%08x\n", ltr_value);
}

static ssize_t lpss_ltr_mode_show(struct device *dev,
				  struct device_attribute *attr, char *buf)
{
	u32 ltr_mode = 0;
	char *outstr;
	int ret;

	ret = lpss_reg_read(dev, LPSS_GENERAL, &ltr_mode);
	if (ret)
		return ret;

	outstr = (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) ? "sw" : "auto";
	return sprintf(buf, "%s\n", outstr);
}

static DEVICE_ATTR(auto_ltr, S_IRUSR, lpss_ltr_show, NULL);
static DEVICE_ATTR(sw_ltr, S_IRUSR, lpss_ltr_show, NULL);
static DEVICE_ATTR(ltr_mode, S_IRUSR, lpss_ltr_mode_show, NULL);

static struct attribute *lpss_attrs[] = {
	&dev_attr_auto_ltr.attr,
	&dev_attr_sw_ltr.attr,
	&dev_attr_ltr_mode.attr,
	NULL,
};

static struct attribute_group lpss_attr_group = {
	.attrs = lpss_attrs,
	.name = "lpss_ltr",
};

567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597
static void acpi_lpss_set_ltr(struct device *dev, s32 val)
{
	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
	u32 ltr_mode, ltr_val;

	ltr_mode = __lpss_reg_read(pdata, LPSS_GENERAL);
	if (val < 0) {
		if (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) {
			ltr_mode &= ~LPSS_GENERAL_LTR_MODE_SW;
			__lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL);
		}
		return;
	}
	ltr_val = __lpss_reg_read(pdata, LPSS_SW_LTR) & ~LPSS_LTR_SNOOP_MASK;
	if (val >= LPSS_LTR_SNOOP_LAT_CUTOFF) {
		ltr_val |= LPSS_LTR_SNOOP_LAT_32US;
		val = LPSS_LTR_MAX_VAL;
	} else if (val > LPSS_LTR_MAX_VAL) {
		ltr_val |= LPSS_LTR_SNOOP_LAT_32US | LPSS_LTR_SNOOP_REQ;
		val >>= LPSS_LTR_SNOOP_LAT_SHIFT;
	} else {
		ltr_val |= LPSS_LTR_SNOOP_LAT_1US | LPSS_LTR_SNOOP_REQ;
	}
	ltr_val |= val;
	__lpss_reg_write(ltr_val, pdata, LPSS_SW_LTR);
	if (!(ltr_mode & LPSS_GENERAL_LTR_MODE_SW)) {
		ltr_mode |= LPSS_GENERAL_LTR_MODE_SW;
		__lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL);
	}
}

598 599 600 601
#ifdef CONFIG_PM
/**
 * acpi_lpss_save_ctx() - Save the private registers of LPSS device
 * @dev: LPSS device
602
 * @pdata: pointer to the private data of the LPSS device
603 604 605 606 607
 *
 * Most LPSS devices have private registers which may loose their context when
 * the device is powered down. acpi_lpss_save_ctx() saves those registers into
 * prv_reg_ctx array.
 */
608 609
static void acpi_lpss_save_ctx(struct device *dev,
			       struct lpss_private_data *pdata)
610 611 612 613 614 615 616 617 618 619 620 621 622 623 624
{
	unsigned int i;

	for (i = 0; i < LPSS_PRV_REG_COUNT; i++) {
		unsigned long offset = i * sizeof(u32);

		pdata->prv_reg_ctx[i] = __lpss_reg_read(pdata, offset);
		dev_dbg(dev, "saving 0x%08x from LPSS reg at offset 0x%02lx\n",
			pdata->prv_reg_ctx[i], offset);
	}
}

/**
 * acpi_lpss_restore_ctx() - Restore the private registers of LPSS device
 * @dev: LPSS device
625
 * @pdata: pointer to the private data of the LPSS device
626 627 628
 *
 * Restores the registers that were previously stored with acpi_lpss_save_ctx().
 */
629 630
static void acpi_lpss_restore_ctx(struct device *dev,
				  struct lpss_private_data *pdata)
631 632 633
{
	unsigned int i;

634 635 636 637 638 639 640 641 642 643 644
	for (i = 0; i < LPSS_PRV_REG_COUNT; i++) {
		unsigned long offset = i * sizeof(u32);

		__lpss_reg_write(pdata->prv_reg_ctx[i], pdata, offset);
		dev_dbg(dev, "restoring 0x%08x to LPSS reg at offset 0x%02lx\n",
			pdata->prv_reg_ctx[i], offset);
	}
}

static void acpi_lpss_d3_to_d0_delay(struct lpss_private_data *pdata)
{
645 646 647 648
	/*
	 * The following delay is needed or the subsequent write operations may
	 * fail. The LPSS devices are actually PCI devices and the PCI spec
	 * expects 10ms delay before the device can be accessed after D3 to D0
649
	 * transition. However some platforms like BSW does not need this delay.
650
	 */
651 652 653 654 655 656
	unsigned int delay = 10;	/* default 10ms delay */

	if (pdata->dev_desc->flags & LPSS_NO_D3_DELAY)
		delay = 0;

	msleep(delay);
657 658
}

659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686
static int acpi_lpss_activate(struct device *dev)
{
	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
	int ret;

	ret = acpi_dev_runtime_resume(dev);
	if (ret)
		return ret;

	acpi_lpss_d3_to_d0_delay(pdata);

	/*
	 * This is called only on ->probe() stage where a device is either in
	 * known state defined by BIOS or most likely powered off. Due to this
	 * we have to deassert reset line to be sure that ->probe() will
	 * recognize the device.
	 */
	if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
		lpss_deassert_reset(pdata);

	return 0;
}

static void acpi_lpss_dismiss(struct device *dev)
{
	acpi_dev_runtime_suspend(dev);
}

687 688 689
#ifdef CONFIG_PM_SLEEP
static int acpi_lpss_suspend_late(struct device *dev)
{
690 691
	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
	int ret;
692

693
	ret = pm_generic_suspend_late(dev);
694 695 696
	if (ret)
		return ret;

697 698 699
	if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
		acpi_lpss_save_ctx(dev, pdata);

700 701 702
	return acpi_dev_suspend_late(dev);
}

703
static int acpi_lpss_resume_early(struct device *dev)
704
{
705 706
	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
	int ret;
707

708
	ret = acpi_dev_resume_early(dev);
709 710 711
	if (ret)
		return ret;

712 713
	acpi_lpss_d3_to_d0_delay(pdata);

714 715 716
	if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
		acpi_lpss_restore_ctx(dev, pdata);

717 718 719 720
	return pm_generic_resume_early(dev);
}
#endif /* CONFIG_PM_SLEEP */

721 722 723 724 725 726 727 728 729 730 731 732 733 734
/* IOSF SB for LPSS island */
#define LPSS_IOSF_UNIT_LPIOEP		0xA0
#define LPSS_IOSF_UNIT_LPIO1		0xAB
#define LPSS_IOSF_UNIT_LPIO2		0xAC

#define LPSS_IOSF_PMCSR			0x84
#define LPSS_PMCSR_D0			0
#define LPSS_PMCSR_D3hot		3
#define LPSS_PMCSR_Dx_MASK		GENMASK(1, 0)

#define LPSS_IOSF_GPIODEF0		0x154
#define LPSS_GPIODEF0_DMA1_D3		BIT(2)
#define LPSS_GPIODEF0_DMA2_D3		BIT(3)
#define LPSS_GPIODEF0_DMA_D3_MASK	GENMASK(3, 2)
735
#define LPSS_GPIODEF0_DMA_LLP		BIT(13)
736 737 738 739 740 741

static DEFINE_MUTEX(lpss_iosf_mutex);

static void lpss_iosf_enter_d3_state(void)
{
	u32 value1 = 0;
742
	u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP;
743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785
	u32 value2 = LPSS_PMCSR_D3hot;
	u32 mask2 = LPSS_PMCSR_Dx_MASK;
	/*
	 * PMC provides an information about actual status of the LPSS devices.
	 * Here we read the values related to LPSS power island, i.e. LPSS
	 * devices, excluding both LPSS DMA controllers, along with SCC domain.
	 */
	u32 func_dis, d3_sts_0, pmc_status, pmc_mask = 0xfe000ffe;
	int ret;

	ret = pmc_atom_read(PMC_FUNC_DIS, &func_dis);
	if (ret)
		return;

	mutex_lock(&lpss_iosf_mutex);

	ret = pmc_atom_read(PMC_D3_STS_0, &d3_sts_0);
	if (ret)
		goto exit;

	/*
	 * Get the status of entire LPSS power island per device basis.
	 * Shutdown both LPSS DMA controllers if and only if all other devices
	 * are already in D3hot.
	 */
	pmc_status = (~(d3_sts_0 | func_dis)) & pmc_mask;
	if (pmc_status)
		goto exit;

	iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
			LPSS_IOSF_PMCSR, value2, mask2);

	iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE,
			LPSS_IOSF_PMCSR, value2, mask2);

	iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
			LPSS_IOSF_GPIODEF0, value1, mask1);
exit:
	mutex_unlock(&lpss_iosf_mutex);
}

static void lpss_iosf_exit_d3_state(void)
{
786 787 788
	u32 value1 = LPSS_GPIODEF0_DMA1_D3 | LPSS_GPIODEF0_DMA2_D3 |
		     LPSS_GPIODEF0_DMA_LLP;
	u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP;
789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805
	u32 value2 = LPSS_PMCSR_D0;
	u32 mask2 = LPSS_PMCSR_Dx_MASK;

	mutex_lock(&lpss_iosf_mutex);

	iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
			LPSS_IOSF_GPIODEF0, value1, mask1);

	iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE,
			LPSS_IOSF_PMCSR, value2, mask2);

	iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
			LPSS_IOSF_PMCSR, value2, mask2);

	mutex_unlock(&lpss_iosf_mutex);
}

806 807
static int acpi_lpss_runtime_suspend(struct device *dev)
{
808 809
	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
	int ret;
810

811
	ret = pm_generic_runtime_suspend(dev);
812 813 814
	if (ret)
		return ret;

815 816 817
	if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
		acpi_lpss_save_ctx(dev, pdata);

818 819 820 821 822 823 824 825 826 827 828
	ret = acpi_dev_runtime_suspend(dev);

	/*
	 * This call must be last in the sequence, otherwise PMC will return
	 * wrong status for devices being about to be powered off. See
	 * lpss_iosf_enter_d3_state() for further information.
	 */
	if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
		lpss_iosf_enter_d3_state();

	return ret;
829 830 831 832
}

static int acpi_lpss_runtime_resume(struct device *dev)
{
833 834
	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
	int ret;
835

836 837 838 839 840 841 842
	/*
	 * This call is kept first to be in symmetry with
	 * acpi_lpss_runtime_suspend() one.
	 */
	if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
		lpss_iosf_exit_d3_state();

843
	ret = acpi_dev_runtime_resume(dev);
844 845 846
	if (ret)
		return ret;

847 848
	acpi_lpss_d3_to_d0_delay(pdata);

849 850 851
	if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
		acpi_lpss_restore_ctx(dev, pdata);

852 853 854 855 856
	return pm_generic_runtime_resume(dev);
}
#endif /* CONFIG_PM */

static struct dev_pm_domain acpi_lpss_pm_domain = {
857 858 859 860
#ifdef CONFIG_PM
	.activate = acpi_lpss_activate,
	.dismiss = acpi_lpss_dismiss,
#endif
861
	.ops = {
862
#ifdef CONFIG_PM
863 864
#ifdef CONFIG_PM_SLEEP
		.prepare = acpi_subsys_prepare,
865
		.complete = pm_complete_with_resume_check,
866
		.suspend = acpi_subsys_suspend,
867 868
		.suspend_late = acpi_lpss_suspend_late,
		.resume_early = acpi_lpss_resume_early,
869 870
		.freeze = acpi_subsys_freeze,
		.poweroff = acpi_subsys_suspend,
871 872
		.poweroff_late = acpi_lpss_suspend_late,
		.restore_early = acpi_lpss_resume_early,
873 874 875 876 877 878 879
#endif
		.runtime_suspend = acpi_lpss_runtime_suspend,
		.runtime_resume = acpi_lpss_runtime_resume,
#endif
	},
};

880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895
static int acpi_lpss_platform_notify(struct notifier_block *nb,
				     unsigned long action, void *data)
{
	struct platform_device *pdev = to_platform_device(data);
	struct lpss_private_data *pdata;
	struct acpi_device *adev;
	const struct acpi_device_id *id;

	id = acpi_match_device(acpi_lpss_device_ids, &pdev->dev);
	if (!id || !id->driver_data)
		return 0;

	if (acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
		return 0;

	pdata = acpi_driver_data(adev);
896
	if (!pdata)
897 898
		return 0;

899 900
	if (pdata->mmio_base &&
	    pdata->mmio_size < pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) {
901 902 903 904
		dev_err(&pdev->dev, "MMIO size insufficient to access LTR\n");
		return 0;
	}

905
	switch (action) {
906
	case BUS_NOTIFY_BIND_DRIVER:
907
		dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain);
908
		break;
909
	case BUS_NOTIFY_DRIVER_NOT_BOUND:
910
	case BUS_NOTIFY_UNBOUND_DRIVER:
911
		dev_pm_domain_set(&pdev->dev, NULL);
912 913
		break;
	case BUS_NOTIFY_ADD_DEVICE:
914
		dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain);
H
Heikki Krogerus 已提交
915
		if (pdata->dev_desc->flags & LPSS_LTR)
916 917
			return sysfs_create_group(&pdev->dev.kobj,
						  &lpss_attr_group);
918
		break;
919
	case BUS_NOTIFY_DEL_DEVICE:
H
Heikki Krogerus 已提交
920
		if (pdata->dev_desc->flags & LPSS_LTR)
921
			sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group);
922
		dev_pm_domain_set(&pdev->dev, NULL);
923
		break;
924 925 926
	default:
		break;
	}
927

928
	return 0;
929 930 931 932 933 934
}

static struct notifier_block acpi_lpss_nb = {
	.notifier_call = acpi_lpss_platform_notify,
};

935 936 937 938
static void acpi_lpss_bind(struct device *dev)
{
	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));

H
Heikki Krogerus 已提交
939
	if (!pdata || !pdata->mmio_base || !(pdata->dev_desc->flags & LPSS_LTR))
940 941 942 943 944 945 946 947 948 949 950 951 952
		return;

	if (pdata->mmio_size >= pdata->dev_desc->prv_offset + LPSS_LTR_SIZE)
		dev->power.set_latency_tolerance = acpi_lpss_set_ltr;
	else
		dev_err(dev, "MMIO size insufficient to access LTR\n");
}

static void acpi_lpss_unbind(struct device *dev)
{
	dev->power.set_latency_tolerance = NULL;
}

953 954 955
static struct acpi_scan_handler lpss_handler = {
	.ids = acpi_lpss_device_ids,
	.attach = acpi_lpss_create_device,
956 957
	.bind = acpi_lpss_bind,
	.unbind = acpi_lpss_unbind,
958 959 960 961
};

void __init acpi_lpss_init(void)
{
962 963 964 965 966 967 968 969 970 971 972 973 974
	const struct x86_cpu_id *id;
	int ret;

	ret = lpt_clk_init();
	if (ret)
		return;

	id = x86_match_cpu(lpss_cpu_ids);
	if (id)
		lpss_quirks |= LPSS_QUIRK_ALWAYS_POWER_ON;

	bus_register_notifier(&platform_bus_type, &acpi_lpss_nb);
	acpi_scan_add_handler(&lpss_handler);
975
}
976 977 978 979 980 981 982 983 984 985 986 987 988

#else

static struct acpi_scan_handler lpss_handler = {
	.ids = acpi_lpss_device_ids,
};

void __init acpi_lpss_init(void)
{
	acpi_scan_add_handler(&lpss_handler);
}

#endif /* CONFIG_X86_INTEL_LPSS */