access.c 19.9 KB
Newer Older
1
#include <linux/delay.h>
L
Linus Torvalds 已提交
2 3
#include <linux/pci.h>
#include <linux/module.h>
A
Al Viro 已提交
4
#include <linux/sched.h>
5
#include <linux/slab.h>
L
Linus Torvalds 已提交
6
#include <linux/ioport.h>
7
#include <linux/wait.h>
L
Linus Torvalds 已提交
8

9 10
#include "pci.h"

L
Linus Torvalds 已提交
11 12 13 14 15
/*
 * This interrupt-safe spinlock protects all accesses to PCI
 * configuration space.
 */

16
DEFINE_RAW_SPINLOCK(pci_lock);
L
Linus Torvalds 已提交
17 18 19 20 21 22 23 24 25 26 27

/*
 *  Wrappers for all PCI configuration access functions.  They just check
 *  alignment, do locking and call the low-level functions pointed to
 *  by pci_dev->ops.
 */

#define PCI_byte_BAD 0
#define PCI_word_BAD (pos & 1)
#define PCI_dword_BAD (pos & 3)

B
Bogicevic Sasa 已提交
28
#define PCI_OP_READ(size, type, len) \
L
Linus Torvalds 已提交
29 30 31 32 33 34 35
int pci_bus_read_config_##size \
	(struct pci_bus *bus, unsigned int devfn, int pos, type *value)	\
{									\
	int res;							\
	unsigned long flags;						\
	u32 data = 0;							\
	if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER;	\
36
	raw_spin_lock_irqsave(&pci_lock, flags);			\
L
Linus Torvalds 已提交
37 38
	res = bus->ops->read(bus, devfn, pos, len, &data);		\
	*value = (type)data;						\
39
	raw_spin_unlock_irqrestore(&pci_lock, flags);		\
L
Linus Torvalds 已提交
40 41 42
	return res;							\
}

B
Bogicevic Sasa 已提交
43
#define PCI_OP_WRITE(size, type, len) \
L
Linus Torvalds 已提交
44 45 46 47 48 49
int pci_bus_write_config_##size \
	(struct pci_bus *bus, unsigned int devfn, int pos, type value)	\
{									\
	int res;							\
	unsigned long flags;						\
	if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER;	\
50
	raw_spin_lock_irqsave(&pci_lock, flags);			\
L
Linus Torvalds 已提交
51
	res = bus->ops->write(bus, devfn, pos, len, value);		\
52
	raw_spin_unlock_irqrestore(&pci_lock, flags);		\
L
Linus Torvalds 已提交
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
	return res;							\
}

PCI_OP_READ(byte, u8, 1)
PCI_OP_READ(word, u16, 2)
PCI_OP_READ(dword, u32, 4)
PCI_OP_WRITE(byte, u8, 1)
PCI_OP_WRITE(word, u16, 2)
PCI_OP_WRITE(dword, u32, 4)

EXPORT_SYMBOL(pci_bus_read_config_byte);
EXPORT_SYMBOL(pci_bus_read_config_word);
EXPORT_SYMBOL(pci_bus_read_config_dword);
EXPORT_SYMBOL(pci_bus_write_config_byte);
EXPORT_SYMBOL(pci_bus_write_config_word);
EXPORT_SYMBOL(pci_bus_write_config_dword);
69

R
Rob Herring 已提交
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
			    int where, int size, u32 *val)
{
	void __iomem *addr;

	addr = bus->ops->map_bus(bus, devfn, where);
	if (!addr) {
		*val = ~0;
		return PCIBIOS_DEVICE_NOT_FOUND;
	}

	if (size == 1)
		*val = readb(addr);
	else if (size == 2)
		*val = readw(addr);
	else
		*val = readl(addr);

	return PCIBIOS_SUCCESSFUL;
}
EXPORT_SYMBOL_GPL(pci_generic_config_read);

int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
			     int where, int size, u32 val)
{
	void __iomem *addr;

	addr = bus->ops->map_bus(bus, devfn, where);
	if (!addr)
		return PCIBIOS_DEVICE_NOT_FOUND;

	if (size == 1)
		writeb(val, addr);
	else if (size == 2)
		writew(val, addr);
	else
		writel(val, addr);

	return PCIBIOS_SUCCESSFUL;
}
EXPORT_SYMBOL_GPL(pci_generic_config_write);

int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
			      int where, int size, u32 *val)
{
	void __iomem *addr;

	addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
	if (!addr) {
		*val = ~0;
		return PCIBIOS_DEVICE_NOT_FOUND;
	}

	*val = readl(addr);

	if (size <= 2)
		*val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);

	return PCIBIOS_SUCCESSFUL;
}
EXPORT_SYMBOL_GPL(pci_generic_config_read32);

int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
			       int where, int size, u32 val)
{
	void __iomem *addr;
	u32 mask, tmp;

	addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
	if (!addr)
		return PCIBIOS_DEVICE_NOT_FOUND;

	if (size == 4) {
		writel(val, addr);
		return PCIBIOS_SUCCESSFUL;
	} else {
		mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
	}

	tmp = readl(addr) & mask;
	tmp |= val << ((where & 0x3) * 8);
	writel(tmp, addr);

	return PCIBIOS_SUCCESSFUL;
}
EXPORT_SYMBOL_GPL(pci_generic_config_write32);

H
Huang Ying 已提交
157 158 159 160 161 162 163 164 165 166 167 168
/**
 * pci_bus_set_ops - Set raw operations of pci bus
 * @bus:	pci bus struct
 * @ops:	new raw operations
 *
 * Return previous raw operations
 */
struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops)
{
	struct pci_ops *old_ops;
	unsigned long flags;

169
	raw_spin_lock_irqsave(&pci_lock, flags);
H
Huang Ying 已提交
170 171
	old_ops = bus->ops;
	bus->ops = ops;
172
	raw_spin_unlock_irqrestore(&pci_lock, flags);
H
Huang Ying 已提交
173 174 175
	return old_ops;
}
EXPORT_SYMBOL(pci_bus_set_ops);
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196

/**
 * pci_read_vpd - Read one entry from Vital Product Data
 * @dev:	pci device struct
 * @pos:	offset in vpd space
 * @count:	number of bytes to read
 * @buf:	pointer to where to store result
 *
 */
ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf)
{
	if (!dev->vpd || !dev->vpd->ops)
		return -ENODEV;
	return dev->vpd->ops->read(dev, pos, count, buf);
}
EXPORT_SYMBOL(pci_read_vpd);

/**
 * pci_write_vpd - Write entry to Vital Product Data
 * @dev:	pci device struct
 * @pos:	offset in vpd space
R
Randy Dunlap 已提交
197 198
 * @count:	number of bytes to write
 * @buf:	buffer containing write data
199 200 201 202 203 204 205 206 207 208
 *
 */
ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
{
	if (!dev->vpd || !dev->vpd->ops)
		return -ENODEV;
	return dev->vpd->ops->write(dev, pos, count, buf);
}
EXPORT_SYMBOL(pci_write_vpd);

209 210 211 212 213 214 215 216
/*
 * The following routines are to prevent the user from accessing PCI config
 * space when it's unsafe to do so.  Some devices require this during BIST and
 * we're required to prevent it during D-state transitions.
 *
 * We have a bit per device to indicate it's blocked and a global wait queue
 * for callers to sleep on until devices are unblocked.
 */
217
static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait);
218

219
static noinline void pci_wait_cfg(struct pci_dev *dev)
220 221 222
{
	DECLARE_WAITQUEUE(wait, current);

223
	__add_wait_queue(&pci_cfg_wait, &wait);
224 225
	do {
		set_current_state(TASK_UNINTERRUPTIBLE);
226
		raw_spin_unlock_irq(&pci_lock);
227
		schedule();
228
		raw_spin_lock_irq(&pci_lock);
229 230
	} while (dev->block_cfg_access);
	__remove_wait_queue(&pci_cfg_wait, &wait);
231 232
}

G
Greg Thelen 已提交
233
/* Returns 0 on success, negative values indicate error. */
B
Bogicevic Sasa 已提交
234
#define PCI_USER_READ_CONFIG(size, type)					\
235 236 237
int pci_user_read_config_##size						\
	(struct pci_dev *dev, int pos, type *val)			\
{									\
238
	int ret = PCIBIOS_SUCCESSFUL;					\
239
	u32 data = -1;							\
G
Greg Thelen 已提交
240 241
	if (PCI_##size##_BAD)						\
		return -EINVAL;						\
242
	raw_spin_lock_irq(&pci_lock);				\
243 244
	if (unlikely(dev->block_cfg_access))				\
		pci_wait_cfg(dev);					\
245
	ret = dev->bus->ops->read(dev->bus, dev->devfn,			\
246
					pos, sizeof(type), &data);	\
247
	raw_spin_unlock_irq(&pci_lock);				\
248
	*val = (type)data;						\
249
	return pcibios_err_to_errno(ret);				\
250 251
}									\
EXPORT_SYMBOL_GPL(pci_user_read_config_##size);
252

G
Greg Thelen 已提交
253
/* Returns 0 on success, negative values indicate error. */
B
Bogicevic Sasa 已提交
254
#define PCI_USER_WRITE_CONFIG(size, type)				\
255 256 257
int pci_user_write_config_##size					\
	(struct pci_dev *dev, int pos, type val)			\
{									\
258
	int ret = PCIBIOS_SUCCESSFUL;					\
G
Greg Thelen 已提交
259 260
	if (PCI_##size##_BAD)						\
		return -EINVAL;						\
261
	raw_spin_lock_irq(&pci_lock);				\
262 263
	if (unlikely(dev->block_cfg_access))				\
		pci_wait_cfg(dev);					\
264
	ret = dev->bus->ops->write(dev->bus, dev->devfn,		\
265
					pos, sizeof(type), val);	\
266
	raw_spin_unlock_irq(&pci_lock);				\
267
	return pcibios_err_to_errno(ret);				\
268 269
}									\
EXPORT_SYMBOL_GPL(pci_user_write_config_##size);
270 271 272 273 274 275 276 277

PCI_USER_READ_CONFIG(byte, u8)
PCI_USER_READ_CONFIG(word, u16)
PCI_USER_READ_CONFIG(dword, u32)
PCI_USER_WRITE_CONFIG(byte, u8)
PCI_USER_WRITE_CONFIG(word, u16)
PCI_USER_WRITE_CONFIG(dword, u32)

278 279 280 281 282 283
/* VPD access through PCI 2.2+ VPD capability */

#define PCI_VPD_PCI22_SIZE (PCI_VPD_ADDR_MASK + 1)

struct pci_vpd_pci22 {
	struct pci_vpd base;
284 285 286
	struct mutex lock;
	u16	flag;
	u8	cap;
287
	u8	busy:1;
288
	u8	valid:1;
289 290
};

291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
/**
 * pci_vpd_size - determine actual size of Vital Product Data
 * @dev:	pci device struct
 * @old_size:	current assumed size, also maximum allowed size
 */
static size_t pci_vpd_pci22_size(struct pci_dev *dev, size_t old_size)
{
	size_t off = 0;
	unsigned char header[1+2];	/* 1 byte tag, 2 bytes length */

	while (off < old_size &&
	       pci_read_vpd(dev, off, 1, header) == 1) {
		unsigned char tag;

		if (header[0] & PCI_VPD_LRDT) {
			/* Large Resource Data Type Tag */
			tag = pci_vpd_lrdt_tag(header);
			/* Only read length from known tag items */
			if ((tag == PCI_VPD_LTIN_ID_STRING) ||
			    (tag == PCI_VPD_LTIN_RO_DATA) ||
			    (tag == PCI_VPD_LTIN_RW_DATA)) {
				if (pci_read_vpd(dev, off+1, 2,
						 &header[1]) != 2) {
					dev_warn(&dev->dev,
						 "invalid large VPD tag %02x size at offset %zu",
						 tag, off + 1);
					return 0;
				}
				off += PCI_VPD_LRDT_TAG_SIZE +
					pci_vpd_lrdt_size(header);
			}
		} else {
			/* Short Resource Data Type Tag */
			off += PCI_VPD_SRDT_TAG_SIZE +
				pci_vpd_srdt_size(header);
			tag = pci_vpd_srdt_tag(header);
		}

		if (tag == PCI_VPD_STIN_END)	/* End tag descriptor */
			return off;

		if ((tag != PCI_VPD_LTIN_ID_STRING) &&
		    (tag != PCI_VPD_LTIN_RO_DATA) &&
		    (tag != PCI_VPD_LTIN_RW_DATA)) {
			dev_warn(&dev->dev,
				 "invalid %s VPD tag %02x at offset %zu",
				 (header[0] & PCI_VPD_LRDT) ? "large" : "short",
				 tag, off);
			return 0;
		}
	}
	return 0;
}

345 346 347 348 349
/*
 * Wait for last operation to complete.
 * This code has to spin since there is no other notification from the PCI
 * hardware. Since the VPD is often implemented by serial attachment to an
 * EEPROM, it may take many milliseconds to complete.
G
Greg Thelen 已提交
350 351
 *
 * Returns 0 on success, negative values indicate error.
352
 */
353 354 355 356
static int pci_vpd_pci22_wait(struct pci_dev *dev)
{
	struct pci_vpd_pci22 *vpd =
		container_of(dev->vpd, struct pci_vpd_pci22, base);
357 358
	unsigned long timeout = jiffies + HZ/20 + 2;
	u16 status;
359 360 361 362 363 364
	int ret;

	if (!vpd->busy)
		return 0;

	for (;;) {
365
		ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR,
366
						&status);
G
Greg Thelen 已提交
367
		if (ret < 0)
368
			return ret;
369 370

		if ((status & PCI_VPD_ADDR_F) == vpd->flag) {
371
			vpd->busy = 0;
372 373
			return 0;
		}
374

375
		if (time_after(jiffies, timeout)) {
376
			dev_printk(KERN_DEBUG, &dev->dev, "vpd r/w failed.  This is likely a firmware bug on this device.  Contact the card vendor for a firmware update\n");
377
			return -ETIMEDOUT;
378
		}
379 380 381 382
		if (fatal_signal_pending(current))
			return -EINTR;
		if (!cond_resched())
			udelay(10);
383 384 385
	}
}

386 387
static ssize_t pci_vpd_pci22_read(struct pci_dev *dev, loff_t pos, size_t count,
				  void *arg)
388 389 390
{
	struct pci_vpd_pci22 *vpd =
		container_of(dev->vpd, struct pci_vpd_pci22, base);
391 392 393
	int ret;
	loff_t end = pos + count;
	u8 *buf = arg;
394

395
	if (pos < 0)
396 397
		return -EINVAL;

398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413
	if (!vpd->valid) {
		vpd->valid = 1;
		vpd->base.len = pci_vpd_pci22_size(dev, vpd->base.len);
	}

	if (vpd->base.len == 0)
		return -EIO;

	if (pos >= vpd->base.len)
		return 0;

	if (end > vpd->base.len) {
		end = vpd->base.len;
		count = end - pos;
	}

414 415 416
	if (mutex_lock_killable(&vpd->lock))
		return -EINTR;

417 418 419
	ret = pci_vpd_pci22_wait(dev);
	if (ret < 0)
		goto out;
420

421 422 423 424 425 426 427 428
	while (pos < end) {
		u32 val;
		unsigned int i, skip;

		ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
						 pos & ~3);
		if (ret < 0)
			break;
429
		vpd->busy = 1;
430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448
		vpd->flag = PCI_VPD_ADDR_F;
		ret = pci_vpd_pci22_wait(dev);
		if (ret < 0)
			break;

		ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA, &val);
		if (ret < 0)
			break;

		skip = pos & 3;
		for (i = 0;  i < sizeof(u32); i++) {
			if (i >= skip) {
				*buf++ = val;
				if (++pos == end)
					break;
			}
			val >>= 8;
		}
	}
449
out:
450
	mutex_unlock(&vpd->lock);
451
	return ret ? ret : count;
452 453
}

454 455
static ssize_t pci_vpd_pci22_write(struct pci_dev *dev, loff_t pos, size_t count,
				   const void *arg)
456 457 458
{
	struct pci_vpd_pci22 *vpd =
		container_of(dev->vpd, struct pci_vpd_pci22, base);
459 460
	const u8 *buf = arg;
	loff_t end = pos + count;
461
	int ret = 0;
462

463 464 465 466 467 468 469 470 471 472 473 474
	if (pos < 0 || (pos & 3) || (count & 3))
		return -EINVAL;

	if (!vpd->valid) {
		vpd->valid = 1;
		vpd->base.len = pci_vpd_pci22_size(dev, vpd->base.len);
	}

	if (vpd->base.len == 0)
		return -EIO;

	if (end > vpd->base.len)
475 476
		return -EINVAL;

477 478
	if (mutex_lock_killable(&vpd->lock))
		return -EINTR;
479

480 481 482
	ret = pci_vpd_pci22_wait(dev);
	if (ret < 0)
		goto out;
483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499

	while (pos < end) {
		u32 val;

		val = *buf++;
		val |= *buf++ << 8;
		val |= *buf++ << 16;
		val |= *buf++ << 24;

		ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, val);
		if (ret < 0)
			break;
		ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
						 pos | PCI_VPD_ADDR_F);
		if (ret < 0)
			break;

500
		vpd->busy = 1;
501 502
		vpd->flag = 0;
		ret = pci_vpd_pci22_wait(dev);
503 504
		if (ret < 0)
			break;
505 506 507

		pos += sizeof(u32);
	}
508
out:
509
	mutex_unlock(&vpd->lock);
510
	return ret ? ret : count;
511 512 513 514 515 516 517
}

static void pci_vpd_pci22_release(struct pci_dev *dev)
{
	kfree(container_of(dev->vpd, struct pci_vpd_pci22, base));
}

518
static const struct pci_vpd_ops pci_vpd_pci22_ops = {
519 520 521 522 523
	.read = pci_vpd_pci22_read,
	.write = pci_vpd_pci22_write,
	.release = pci_vpd_pci22_release,
};

524 525 526
static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
			       void *arg)
{
527 528
	struct pci_dev *tdev = pci_get_slot(dev->bus,
					    PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
529 530 531 532 533 534 535 536 537 538 539 540 541
	ssize_t ret;

	if (!tdev)
		return -ENODEV;

	ret = pci_read_vpd(tdev, pos, count, arg);
	pci_dev_put(tdev);
	return ret;
}

static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
				const void *arg)
{
542 543
	struct pci_dev *tdev = pci_get_slot(dev->bus,
					    PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559
	ssize_t ret;

	if (!tdev)
		return -ENODEV;

	ret = pci_write_vpd(tdev, pos, count, arg);
	pci_dev_put(tdev);
	return ret;
}

static const struct pci_vpd_ops pci_vpd_f0_ops = {
	.read = pci_vpd_f0_read,
	.write = pci_vpd_f0_write,
	.release = pci_vpd_pci22_release,
};

560 561 562 563 564 565 566 567
int pci_vpd_pci22_init(struct pci_dev *dev)
{
	struct pci_vpd_pci22 *vpd;
	u8 cap;

	cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
	if (!cap)
		return -ENODEV;
568

569 570 571 572
	vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
	if (!vpd)
		return -ENOMEM;

573
	vpd->base.len = PCI_VPD_PCI22_SIZE;
574 575 576 577
	if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0)
		vpd->base.ops = &pci_vpd_f0_ops;
	else
		vpd->base.ops = &pci_vpd_pci22_ops;
578
	mutex_init(&vpd->lock);
579
	vpd->cap = cap;
580
	vpd->busy = 0;
581
	vpd->valid = 0;
582 583 584 585
	dev->vpd = &vpd->base;
	return 0;
}

586
/**
587
 * pci_cfg_access_lock - Lock PCI config reads/writes
588 589
 * @dev:	pci device struct
 *
590 591 592
 * When access is locked, any userspace reads or writes to config
 * space and concurrent lock requests will sleep until access is
 * allowed via pci_cfg_access_unlocked again.
593
 */
594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
void pci_cfg_access_lock(struct pci_dev *dev)
{
	might_sleep();

	raw_spin_lock_irq(&pci_lock);
	if (dev->block_cfg_access)
		pci_wait_cfg(dev);
	dev->block_cfg_access = 1;
	raw_spin_unlock_irq(&pci_lock);
}
EXPORT_SYMBOL_GPL(pci_cfg_access_lock);

/**
 * pci_cfg_access_trylock - try to lock PCI config reads/writes
 * @dev:	pci device struct
 *
 * Same as pci_cfg_access_lock, but will return 0 if access is
 * already locked, 1 otherwise. This function can be used from
 * atomic contexts.
 */
bool pci_cfg_access_trylock(struct pci_dev *dev)
615 616
{
	unsigned long flags;
617
	bool locked = true;
618

619
	raw_spin_lock_irqsave(&pci_lock, flags);
620 621 622 623
	if (dev->block_cfg_access)
		locked = false;
	else
		dev->block_cfg_access = 1;
624
	raw_spin_unlock_irqrestore(&pci_lock, flags);
625

626
	return locked;
627
}
628
EXPORT_SYMBOL_GPL(pci_cfg_access_trylock);
629 630

/**
631
 * pci_cfg_access_unlock - Unlock PCI config reads/writes
632 633
 * @dev:	pci device struct
 *
634
 * This function allows PCI config accesses to resume.
635
 */
636
void pci_cfg_access_unlock(struct pci_dev *dev)
637 638 639
{
	unsigned long flags;

640
	raw_spin_lock_irqsave(&pci_lock, flags);
641 642 643

	/* This indicates a problem in the caller, but we don't need
	 * to kill them, unlike a double-block above. */
644
	WARN_ON(!dev->block_cfg_access);
645

646 647
	dev->block_cfg_access = 0;
	wake_up_all(&pci_cfg_wait);
648
	raw_spin_unlock_irqrestore(&pci_lock, flags);
649
}
650
EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
651 652 653

static inline int pcie_cap_version(const struct pci_dev *dev)
{
654
	return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS;
655 656
}

657 658 659 660 661 662 663 664
static bool pcie_downstream_port(const struct pci_dev *dev)
{
	int type = pci_pcie_type(dev);

	return type == PCI_EXP_TYPE_ROOT_PORT ||
	       type == PCI_EXP_TYPE_DOWNSTREAM;
}

665
bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
666 667 668
{
	int type = pci_pcie_type(dev);

669
	return type == PCI_EXP_TYPE_ENDPOINT ||
670 671 672 673 674 675
	       type == PCI_EXP_TYPE_LEG_END ||
	       type == PCI_EXP_TYPE_ROOT_PORT ||
	       type == PCI_EXP_TYPE_UPSTREAM ||
	       type == PCI_EXP_TYPE_DOWNSTREAM ||
	       type == PCI_EXP_TYPE_PCI_BRIDGE ||
	       type == PCI_EXP_TYPE_PCIE_BRIDGE;
676 677 678 679
}

static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
{
680
	return pcie_downstream_port(dev) &&
681
	       pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT;
682 683 684 685 686 687
}

static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev)
{
	int type = pci_pcie_type(dev);

688
	return type == PCI_EXP_TYPE_ROOT_PORT ||
689 690 691 692 693 694 695 696 697
	       type == PCI_EXP_TYPE_RC_EC;
}

static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
{
	if (!pci_is_pcie(dev))
		return false;

	switch (pos) {
698
	case PCI_EXP_FLAGS:
699 700 701 702
		return true;
	case PCI_EXP_DEVCAP:
	case PCI_EXP_DEVCTL:
	case PCI_EXP_DEVSTA:
703
		return true;
704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758
	case PCI_EXP_LNKCAP:
	case PCI_EXP_LNKCTL:
	case PCI_EXP_LNKSTA:
		return pcie_cap_has_lnkctl(dev);
	case PCI_EXP_SLTCAP:
	case PCI_EXP_SLTCTL:
	case PCI_EXP_SLTSTA:
		return pcie_cap_has_sltctl(dev);
	case PCI_EXP_RTCTL:
	case PCI_EXP_RTCAP:
	case PCI_EXP_RTSTA:
		return pcie_cap_has_rtctl(dev);
	case PCI_EXP_DEVCAP2:
	case PCI_EXP_DEVCTL2:
	case PCI_EXP_LNKCAP2:
	case PCI_EXP_LNKCTL2:
	case PCI_EXP_LNKSTA2:
		return pcie_cap_version(dev) > 1;
	default:
		return false;
	}
}

/*
 * Note that these accessor functions are only for the "PCI Express
 * Capability" (see PCIe spec r3.0, sec 7.8).  They do not apply to the
 * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.)
 */
int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
{
	int ret;

	*val = 0;
	if (pos & 1)
		return -EINVAL;

	if (pcie_capability_reg_implemented(dev, pos)) {
		ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
		/*
		 * Reset *val to 0 if pci_read_config_word() fails, it may
		 * have been written as 0xFFFF if hardware error happens
		 * during pci_read_config_word().
		 */
		if (ret)
			*val = 0;
		return ret;
	}

	/*
	 * For Functions that do not implement the Slot Capabilities,
	 * Slot Status, and Slot Control registers, these spaces must
	 * be hardwired to 0b, with the exception of the Presence Detect
	 * State bit in the Slot Status register of Downstream Ports,
	 * which must be hardwired to 1b.  (PCIe Base Spec 3.0, sec 7.8)
	 */
759 760
	if (pci_is_pcie(dev) && pcie_downstream_port(dev) &&
	    pos == PCI_EXP_SLTSTA)
761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
		*val = PCI_EXP_SLTSTA_PDS;

	return 0;
}
EXPORT_SYMBOL(pcie_capability_read_word);

int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val)
{
	int ret;

	*val = 0;
	if (pos & 3)
		return -EINVAL;

	if (pcie_capability_reg_implemented(dev, pos)) {
		ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val);
		/*
		 * Reset *val to 0 if pci_read_config_dword() fails, it may
		 * have been written as 0xFFFFFFFF if hardware error happens
		 * during pci_read_config_dword().
		 */
		if (ret)
			*val = 0;
		return ret;
	}

787 788
	if (pci_is_pcie(dev) && pcie_downstream_port(dev) &&
	    pos == PCI_EXP_SLTSTA)
789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851
		*val = PCI_EXP_SLTSTA_PDS;

	return 0;
}
EXPORT_SYMBOL(pcie_capability_read_dword);

int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
{
	if (pos & 1)
		return -EINVAL;

	if (!pcie_capability_reg_implemented(dev, pos))
		return 0;

	return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
}
EXPORT_SYMBOL(pcie_capability_write_word);

int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val)
{
	if (pos & 3)
		return -EINVAL;

	if (!pcie_capability_reg_implemented(dev, pos))
		return 0;

	return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val);
}
EXPORT_SYMBOL(pcie_capability_write_dword);

int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
				       u16 clear, u16 set)
{
	int ret;
	u16 val;

	ret = pcie_capability_read_word(dev, pos, &val);
	if (!ret) {
		val &= ~clear;
		val |= set;
		ret = pcie_capability_write_word(dev, pos, val);
	}

	return ret;
}
EXPORT_SYMBOL(pcie_capability_clear_and_set_word);

int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
					u32 clear, u32 set)
{
	int ret;
	u32 val;

	ret = pcie_capability_read_dword(dev, pos, &val);
	if (!ret) {
		val &= ~clear;
		val |= set;
		ret = pcie_capability_write_dword(dev, pos, val);
	}

	return ret;
}
EXPORT_SYMBOL(pcie_capability_clear_and_set_dword);