access.c 20.2 KB
Newer Older
1
#include <linux/delay.h>
L
Linus Torvalds 已提交
2 3
#include <linux/pci.h>
#include <linux/module.h>
A
Al Viro 已提交
4
#include <linux/sched.h>
5
#include <linux/slab.h>
L
Linus Torvalds 已提交
6
#include <linux/ioport.h>
7
#include <linux/wait.h>
L
Linus Torvalds 已提交
8

9 10
#include "pci.h"

L
Linus Torvalds 已提交
11 12 13 14 15
/*
 * This interrupt-safe spinlock protects all accesses to PCI
 * configuration space.
 */

16
DEFINE_RAW_SPINLOCK(pci_lock);
L
Linus Torvalds 已提交
17 18 19 20 21 22 23 24 25 26 27

/*
 *  Wrappers for all PCI configuration access functions.  They just check
 *  alignment, do locking and call the low-level functions pointed to
 *  by pci_dev->ops.
 */

#define PCI_byte_BAD 0
#define PCI_word_BAD (pos & 1)
#define PCI_dword_BAD (pos & 3)

B
Bogicevic Sasa 已提交
28
#define PCI_OP_READ(size, type, len) \
L
Linus Torvalds 已提交
29 30 31 32 33 34 35
int pci_bus_read_config_##size \
	(struct pci_bus *bus, unsigned int devfn, int pos, type *value)	\
{									\
	int res;							\
	unsigned long flags;						\
	u32 data = 0;							\
	if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER;	\
36
	raw_spin_lock_irqsave(&pci_lock, flags);			\
L
Linus Torvalds 已提交
37 38
	res = bus->ops->read(bus, devfn, pos, len, &data);		\
	*value = (type)data;						\
39
	raw_spin_unlock_irqrestore(&pci_lock, flags);		\
L
Linus Torvalds 已提交
40 41 42
	return res;							\
}

B
Bogicevic Sasa 已提交
43
#define PCI_OP_WRITE(size, type, len) \
L
Linus Torvalds 已提交
44 45 46 47 48 49
int pci_bus_write_config_##size \
	(struct pci_bus *bus, unsigned int devfn, int pos, type value)	\
{									\
	int res;							\
	unsigned long flags;						\
	if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER;	\
50
	raw_spin_lock_irqsave(&pci_lock, flags);			\
L
Linus Torvalds 已提交
51
	res = bus->ops->write(bus, devfn, pos, len, value);		\
52
	raw_spin_unlock_irqrestore(&pci_lock, flags);		\
L
Linus Torvalds 已提交
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
	return res;							\
}

PCI_OP_READ(byte, u8, 1)
PCI_OP_READ(word, u16, 2)
PCI_OP_READ(dword, u32, 4)
PCI_OP_WRITE(byte, u8, 1)
PCI_OP_WRITE(word, u16, 2)
PCI_OP_WRITE(dword, u32, 4)

EXPORT_SYMBOL(pci_bus_read_config_byte);
EXPORT_SYMBOL(pci_bus_read_config_word);
EXPORT_SYMBOL(pci_bus_read_config_dword);
EXPORT_SYMBOL(pci_bus_write_config_byte);
EXPORT_SYMBOL(pci_bus_write_config_word);
EXPORT_SYMBOL(pci_bus_write_config_dword);
69

R
Rob Herring 已提交
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
			    int where, int size, u32 *val)
{
	void __iomem *addr;

	addr = bus->ops->map_bus(bus, devfn, where);
	if (!addr) {
		*val = ~0;
		return PCIBIOS_DEVICE_NOT_FOUND;
	}

	if (size == 1)
		*val = readb(addr);
	else if (size == 2)
		*val = readw(addr);
	else
		*val = readl(addr);

	return PCIBIOS_SUCCESSFUL;
}
EXPORT_SYMBOL_GPL(pci_generic_config_read);

int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
			     int where, int size, u32 val)
{
	void __iomem *addr;

	addr = bus->ops->map_bus(bus, devfn, where);
	if (!addr)
		return PCIBIOS_DEVICE_NOT_FOUND;

	if (size == 1)
		writeb(val, addr);
	else if (size == 2)
		writew(val, addr);
	else
		writel(val, addr);

	return PCIBIOS_SUCCESSFUL;
}
EXPORT_SYMBOL_GPL(pci_generic_config_write);

int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
			      int where, int size, u32 *val)
{
	void __iomem *addr;

	addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
	if (!addr) {
		*val = ~0;
		return PCIBIOS_DEVICE_NOT_FOUND;
	}

	*val = readl(addr);

	if (size <= 2)
		*val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);

	return PCIBIOS_SUCCESSFUL;
}
EXPORT_SYMBOL_GPL(pci_generic_config_read32);

int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
			       int where, int size, u32 val)
{
	void __iomem *addr;
	u32 mask, tmp;

	addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
	if (!addr)
		return PCIBIOS_DEVICE_NOT_FOUND;

	if (size == 4) {
		writel(val, addr);
		return PCIBIOS_SUCCESSFUL;
	} else {
		mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
	}

	tmp = readl(addr) & mask;
	tmp |= val << ((where & 0x3) * 8);
	writel(tmp, addr);

	return PCIBIOS_SUCCESSFUL;
}
EXPORT_SYMBOL_GPL(pci_generic_config_write32);

H
Huang Ying 已提交
157 158 159 160 161 162 163 164 165 166 167 168
/**
 * pci_bus_set_ops - Set raw operations of pci bus
 * @bus:	pci bus struct
 * @ops:	new raw operations
 *
 * Return previous raw operations
 */
struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops)
{
	struct pci_ops *old_ops;
	unsigned long flags;

169
	raw_spin_lock_irqsave(&pci_lock, flags);
H
Huang Ying 已提交
170 171
	old_ops = bus->ops;
	bus->ops = ops;
172
	raw_spin_unlock_irqrestore(&pci_lock, flags);
H
Huang Ying 已提交
173 174 175
	return old_ops;
}
EXPORT_SYMBOL(pci_bus_set_ops);
176

177 178 179 180 181 182 183 184
/*
 * The following routines are to prevent the user from accessing PCI config
 * space when it's unsafe to do so.  Some devices require this during BIST and
 * we're required to prevent it during D-state transitions.
 *
 * We have a bit per device to indicate it's blocked and a global wait queue
 * for callers to sleep on until devices are unblocked.
 */
185
static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait);
186

187
static noinline void pci_wait_cfg(struct pci_dev *dev)
188 189 190
{
	DECLARE_WAITQUEUE(wait, current);

191
	__add_wait_queue(&pci_cfg_wait, &wait);
192 193
	do {
		set_current_state(TASK_UNINTERRUPTIBLE);
194
		raw_spin_unlock_irq(&pci_lock);
195
		schedule();
196
		raw_spin_lock_irq(&pci_lock);
197 198
	} while (dev->block_cfg_access);
	__remove_wait_queue(&pci_cfg_wait, &wait);
199 200
}

G
Greg Thelen 已提交
201
/* Returns 0 on success, negative values indicate error. */
B
Bogicevic Sasa 已提交
202
#define PCI_USER_READ_CONFIG(size, type)					\
203 204 205
int pci_user_read_config_##size						\
	(struct pci_dev *dev, int pos, type *val)			\
{									\
206
	int ret = PCIBIOS_SUCCESSFUL;					\
207
	u32 data = -1;							\
G
Greg Thelen 已提交
208 209
	if (PCI_##size##_BAD)						\
		return -EINVAL;						\
210
	raw_spin_lock_irq(&pci_lock);				\
211 212
	if (unlikely(dev->block_cfg_access))				\
		pci_wait_cfg(dev);					\
213
	ret = dev->bus->ops->read(dev->bus, dev->devfn,			\
214
					pos, sizeof(type), &data);	\
215
	raw_spin_unlock_irq(&pci_lock);				\
216
	*val = (type)data;						\
217
	return pcibios_err_to_errno(ret);				\
218 219
}									\
EXPORT_SYMBOL_GPL(pci_user_read_config_##size);
220

G
Greg Thelen 已提交
221
/* Returns 0 on success, negative values indicate error. */
B
Bogicevic Sasa 已提交
222
#define PCI_USER_WRITE_CONFIG(size, type)				\
223 224 225
int pci_user_write_config_##size					\
	(struct pci_dev *dev, int pos, type val)			\
{									\
226
	int ret = PCIBIOS_SUCCESSFUL;					\
G
Greg Thelen 已提交
227 228
	if (PCI_##size##_BAD)						\
		return -EINVAL;						\
229
	raw_spin_lock_irq(&pci_lock);				\
230 231
	if (unlikely(dev->block_cfg_access))				\
		pci_wait_cfg(dev);					\
232
	ret = dev->bus->ops->write(dev->bus, dev->devfn,		\
233
					pos, sizeof(type), val);	\
234
	raw_spin_unlock_irq(&pci_lock);				\
235
	return pcibios_err_to_errno(ret);				\
236 237
}									\
EXPORT_SYMBOL_GPL(pci_user_write_config_##size);
238 239 240 241 242 243 244 245

PCI_USER_READ_CONFIG(byte, u8)
PCI_USER_READ_CONFIG(word, u16)
PCI_USER_READ_CONFIG(dword, u32)
PCI_USER_WRITE_CONFIG(byte, u8)
PCI_USER_WRITE_CONFIG(word, u16)
PCI_USER_WRITE_CONFIG(dword, u32)

246 247
/* VPD access through PCI 2.2+ VPD capability */

248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277
/**
 * pci_read_vpd - Read one entry from Vital Product Data
 * @dev:	pci device struct
 * @pos:	offset in vpd space
 * @count:	number of bytes to read
 * @buf:	pointer to where to store result
 */
ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf)
{
	if (!dev->vpd || !dev->vpd->ops)
		return -ENODEV;
	return dev->vpd->ops->read(dev, pos, count, buf);
}
EXPORT_SYMBOL(pci_read_vpd);

/**
 * pci_write_vpd - Write entry to Vital Product Data
 * @dev:	pci device struct
 * @pos:	offset in vpd space
 * @count:	number of bytes to write
 * @buf:	buffer containing write data
 */
ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
{
	if (!dev->vpd || !dev->vpd->ops)
		return -ENODEV;
	return dev->vpd->ops->write(dev, pos, count, buf);
}
EXPORT_SYMBOL(pci_write_vpd);

278 279 280 281 282 283 284 285 286 287 288 289 290
/**
 * pci_set_vpd_size - Set size of Vital Product Data space
 * @dev:	pci device struct
 * @len:	size of vpd space
 */
int pci_set_vpd_size(struct pci_dev *dev, size_t len)
{
	if (!dev->vpd || !dev->vpd->ops)
		return -ENODEV;
	return dev->vpd->ops->set_size(dev, len);
}
EXPORT_SYMBOL(pci_set_vpd_size);

291
#define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
292

293 294 295 296 297
/**
 * pci_vpd_size - determine actual size of Vital Product Data
 * @dev:	pci device struct
 * @old_size:	current assumed size, also maximum allowed size
 */
298
static size_t pci_vpd_size(struct pci_dev *dev, size_t old_size)
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
{
	size_t off = 0;
	unsigned char header[1+2];	/* 1 byte tag, 2 bytes length */

	while (off < old_size &&
	       pci_read_vpd(dev, off, 1, header) == 1) {
		unsigned char tag;

		if (header[0] & PCI_VPD_LRDT) {
			/* Large Resource Data Type Tag */
			tag = pci_vpd_lrdt_tag(header);
			/* Only read length from known tag items */
			if ((tag == PCI_VPD_LTIN_ID_STRING) ||
			    (tag == PCI_VPD_LTIN_RO_DATA) ||
			    (tag == PCI_VPD_LTIN_RW_DATA)) {
				if (pci_read_vpd(dev, off+1, 2,
						 &header[1]) != 2) {
					dev_warn(&dev->dev,
						 "invalid large VPD tag %02x size at offset %zu",
						 tag, off + 1);
					return 0;
				}
				off += PCI_VPD_LRDT_TAG_SIZE +
					pci_vpd_lrdt_size(header);
			}
		} else {
			/* Short Resource Data Type Tag */
			off += PCI_VPD_SRDT_TAG_SIZE +
				pci_vpd_srdt_size(header);
			tag = pci_vpd_srdt_tag(header);
		}

		if (tag == PCI_VPD_STIN_END)	/* End tag descriptor */
			return off;

		if ((tag != PCI_VPD_LTIN_ID_STRING) &&
		    (tag != PCI_VPD_LTIN_RO_DATA) &&
		    (tag != PCI_VPD_LTIN_RW_DATA)) {
			dev_warn(&dev->dev,
				 "invalid %s VPD tag %02x at offset %zu",
				 (header[0] & PCI_VPD_LRDT) ? "large" : "short",
				 tag, off);
			return 0;
		}
	}
	return 0;
}

347 348 349 350 351
/*
 * Wait for last operation to complete.
 * This code has to spin since there is no other notification from the PCI
 * hardware. Since the VPD is often implemented by serial attachment to an
 * EEPROM, it may take many milliseconds to complete.
G
Greg Thelen 已提交
352 353
 *
 * Returns 0 on success, negative values indicate error.
354
 */
355
static int pci_vpd_wait(struct pci_dev *dev)
356
{
357
	struct pci_vpd *vpd = dev->vpd;
358 359
	unsigned long timeout = jiffies + msecs_to_jiffies(50);
	unsigned long max_sleep = 16;
360
	u16 status;
361 362 363 364 365
	int ret;

	if (!vpd->busy)
		return 0;

366
	while (time_before(jiffies, timeout)) {
367
		ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR,
368
						&status);
G
Greg Thelen 已提交
369
		if (ret < 0)
370
			return ret;
371 372

		if ((status & PCI_VPD_ADDR_F) == vpd->flag) {
373
			vpd->busy = 0;
374 375
			return 0;
		}
376 377 378

		if (fatal_signal_pending(current))
			return -EINTR;
379 380 381 382

		usleep_range(10, max_sleep);
		if (max_sleep < 1024)
			max_sleep *= 2;
383
	}
384 385 386

	dev_warn(&dev->dev, "VPD access failed.  This is likely a firmware bug on this device.  Contact the card vendor for a firmware update\n");
	return -ETIMEDOUT;
387 388
}

389 390
static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count,
			    void *arg)
391
{
392
	struct pci_vpd *vpd = dev->vpd;
393 394 395
	int ret;
	loff_t end = pos + count;
	u8 *buf = arg;
396

397
	if (pos < 0)
398 399
		return -EINVAL;

400 401
	if (!vpd->valid) {
		vpd->valid = 1;
402
		vpd->len = pci_vpd_size(dev, vpd->len);
403 404
	}

405
	if (vpd->len == 0)
406 407
		return -EIO;

408
	if (pos > vpd->len)
409 410
		return 0;

411 412
	if (end > vpd->len) {
		end = vpd->len;
413 414 415
		count = end - pos;
	}

416 417 418
	if (mutex_lock_killable(&vpd->lock))
		return -EINTR;

419
	ret = pci_vpd_wait(dev);
420 421
	if (ret < 0)
		goto out;
422

423 424 425 426 427 428 429 430
	while (pos < end) {
		u32 val;
		unsigned int i, skip;

		ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
						 pos & ~3);
		if (ret < 0)
			break;
431
		vpd->busy = 1;
432
		vpd->flag = PCI_VPD_ADDR_F;
433
		ret = pci_vpd_wait(dev);
434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450
		if (ret < 0)
			break;

		ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA, &val);
		if (ret < 0)
			break;

		skip = pos & 3;
		for (i = 0;  i < sizeof(u32); i++) {
			if (i >= skip) {
				*buf++ = val;
				if (++pos == end)
					break;
			}
			val >>= 8;
		}
	}
451
out:
452
	mutex_unlock(&vpd->lock);
453
	return ret ? ret : count;
454 455
}

456 457
static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
			     const void *arg)
458
{
459
	struct pci_vpd *vpd = dev->vpd;
460 461
	const u8 *buf = arg;
	loff_t end = pos + count;
462
	int ret = 0;
463

464 465 466 467 468
	if (pos < 0 || (pos & 3) || (count & 3))
		return -EINVAL;

	if (!vpd->valid) {
		vpd->valid = 1;
469
		vpd->len = pci_vpd_size(dev, vpd->len);
470 471
	}

472
	if (vpd->len == 0)
473 474
		return -EIO;

475
	if (end > vpd->len)
476 477
		return -EINVAL;

478 479
	if (mutex_lock_killable(&vpd->lock))
		return -EINTR;
480

481
	ret = pci_vpd_wait(dev);
482 483
	if (ret < 0)
		goto out;
484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500

	while (pos < end) {
		u32 val;

		val = *buf++;
		val |= *buf++ << 8;
		val |= *buf++ << 16;
		val |= *buf++ << 24;

		ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, val);
		if (ret < 0)
			break;
		ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
						 pos | PCI_VPD_ADDR_F);
		if (ret < 0)
			break;

501
		vpd->busy = 1;
502
		vpd->flag = 0;
503
		ret = pci_vpd_wait(dev);
504 505
		if (ret < 0)
			break;
506 507 508

		pos += sizeof(u32);
	}
509
out:
510
	mutex_unlock(&vpd->lock);
511
	return ret ? ret : count;
512 513
}

514 515 516 517 518 519 520 521 522 523 524 525 526
static int pci_vpd_set_size(struct pci_dev *dev, size_t len)
{
	struct pci_vpd *vpd = dev->vpd;

	if (len == 0 || len > PCI_VPD_MAX_SIZE)
		return -EIO;

	vpd->valid = 1;
	vpd->len = len;

	return 0;
}

527 528 529
static const struct pci_vpd_ops pci_vpd_ops = {
	.read = pci_vpd_read,
	.write = pci_vpd_write,
530
	.set_size = pci_vpd_set_size,
531 532
};

533 534 535
static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
			       void *arg)
{
536 537
	struct pci_dev *tdev = pci_get_slot(dev->bus,
					    PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
538 539 540 541 542 543 544 545 546 547 548 549 550
	ssize_t ret;

	if (!tdev)
		return -ENODEV;

	ret = pci_read_vpd(tdev, pos, count, arg);
	pci_dev_put(tdev);
	return ret;
}

static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
				const void *arg)
{
551 552
	struct pci_dev *tdev = pci_get_slot(dev->bus,
					    PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
553 554 555 556 557 558 559 560 561 562
	ssize_t ret;

	if (!tdev)
		return -ENODEV;

	ret = pci_write_vpd(tdev, pos, count, arg);
	pci_dev_put(tdev);
	return ret;
}

563 564 565 566 567 568 569 570 571 572 573 574 575 576
static int pci_vpd_f0_set_size(struct pci_dev *dev, size_t len)
{
	struct pci_dev *tdev = pci_get_slot(dev->bus,
					    PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
	int ret;

	if (!tdev)
		return -ENODEV;

	ret = pci_set_vpd_size(tdev, len);
	pci_dev_put(tdev);
	return ret;
}

577 578 579
static const struct pci_vpd_ops pci_vpd_f0_ops = {
	.read = pci_vpd_f0_read,
	.write = pci_vpd_f0_write,
580
	.set_size = pci_vpd_f0_set_size,
581 582
};

583
int pci_vpd_init(struct pci_dev *dev)
584
{
585
	struct pci_vpd *vpd;
586 587 588 589 590
	u8 cap;

	cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
	if (!cap)
		return -ENODEV;
591

592 593 594 595
	vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
	if (!vpd)
		return -ENOMEM;

596
	vpd->len = PCI_VPD_MAX_SIZE;
597
	if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0)
598
		vpd->ops = &pci_vpd_f0_ops;
599
	else
600
		vpd->ops = &pci_vpd_ops;
601
	mutex_init(&vpd->lock);
602
	vpd->cap = cap;
603
	vpd->busy = 0;
604
	vpd->valid = 0;
605
	dev->vpd = vpd;
606 607 608
	return 0;
}

609 610
void pci_vpd_release(struct pci_dev *dev)
{
611
	kfree(dev->vpd);
612 613
}

614
/**
615
 * pci_cfg_access_lock - Lock PCI config reads/writes
616 617
 * @dev:	pci device struct
 *
618 619 620
 * When access is locked, any userspace reads or writes to config
 * space and concurrent lock requests will sleep until access is
 * allowed via pci_cfg_access_unlocked again.
621
 */
622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642
void pci_cfg_access_lock(struct pci_dev *dev)
{
	might_sleep();

	raw_spin_lock_irq(&pci_lock);
	if (dev->block_cfg_access)
		pci_wait_cfg(dev);
	dev->block_cfg_access = 1;
	raw_spin_unlock_irq(&pci_lock);
}
EXPORT_SYMBOL_GPL(pci_cfg_access_lock);

/**
 * pci_cfg_access_trylock - try to lock PCI config reads/writes
 * @dev:	pci device struct
 *
 * Same as pci_cfg_access_lock, but will return 0 if access is
 * already locked, 1 otherwise. This function can be used from
 * atomic contexts.
 */
bool pci_cfg_access_trylock(struct pci_dev *dev)
643 644
{
	unsigned long flags;
645
	bool locked = true;
646

647
	raw_spin_lock_irqsave(&pci_lock, flags);
648 649 650 651
	if (dev->block_cfg_access)
		locked = false;
	else
		dev->block_cfg_access = 1;
652
	raw_spin_unlock_irqrestore(&pci_lock, flags);
653

654
	return locked;
655
}
656
EXPORT_SYMBOL_GPL(pci_cfg_access_trylock);
657 658

/**
659
 * pci_cfg_access_unlock - Unlock PCI config reads/writes
660 661
 * @dev:	pci device struct
 *
662
 * This function allows PCI config accesses to resume.
663
 */
664
void pci_cfg_access_unlock(struct pci_dev *dev)
665 666 667
{
	unsigned long flags;

668
	raw_spin_lock_irqsave(&pci_lock, flags);
669 670 671

	/* This indicates a problem in the caller, but we don't need
	 * to kill them, unlike a double-block above. */
672
	WARN_ON(!dev->block_cfg_access);
673

674 675
	dev->block_cfg_access = 0;
	wake_up_all(&pci_cfg_wait);
676
	raw_spin_unlock_irqrestore(&pci_lock, flags);
677
}
678
EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
679 680 681

static inline int pcie_cap_version(const struct pci_dev *dev)
{
682
	return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS;
683 684
}

685 686 687 688 689 690 691 692
static bool pcie_downstream_port(const struct pci_dev *dev)
{
	int type = pci_pcie_type(dev);

	return type == PCI_EXP_TYPE_ROOT_PORT ||
	       type == PCI_EXP_TYPE_DOWNSTREAM;
}

693
bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
694 695 696
{
	int type = pci_pcie_type(dev);

697
	return type == PCI_EXP_TYPE_ENDPOINT ||
698 699 700 701 702 703
	       type == PCI_EXP_TYPE_LEG_END ||
	       type == PCI_EXP_TYPE_ROOT_PORT ||
	       type == PCI_EXP_TYPE_UPSTREAM ||
	       type == PCI_EXP_TYPE_DOWNSTREAM ||
	       type == PCI_EXP_TYPE_PCI_BRIDGE ||
	       type == PCI_EXP_TYPE_PCIE_BRIDGE;
704 705 706 707
}

static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
{
708
	return pcie_downstream_port(dev) &&
709
	       pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT;
710 711 712 713 714 715
}

static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev)
{
	int type = pci_pcie_type(dev);

716
	return type == PCI_EXP_TYPE_ROOT_PORT ||
717 718 719 720 721 722 723 724 725
	       type == PCI_EXP_TYPE_RC_EC;
}

static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
{
	if (!pci_is_pcie(dev))
		return false;

	switch (pos) {
726
	case PCI_EXP_FLAGS:
727 728 729 730
		return true;
	case PCI_EXP_DEVCAP:
	case PCI_EXP_DEVCTL:
	case PCI_EXP_DEVSTA:
731
		return true;
732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
	case PCI_EXP_LNKCAP:
	case PCI_EXP_LNKCTL:
	case PCI_EXP_LNKSTA:
		return pcie_cap_has_lnkctl(dev);
	case PCI_EXP_SLTCAP:
	case PCI_EXP_SLTCTL:
	case PCI_EXP_SLTSTA:
		return pcie_cap_has_sltctl(dev);
	case PCI_EXP_RTCTL:
	case PCI_EXP_RTCAP:
	case PCI_EXP_RTSTA:
		return pcie_cap_has_rtctl(dev);
	case PCI_EXP_DEVCAP2:
	case PCI_EXP_DEVCTL2:
	case PCI_EXP_LNKCAP2:
	case PCI_EXP_LNKCTL2:
	case PCI_EXP_LNKSTA2:
		return pcie_cap_version(dev) > 1;
	default:
		return false;
	}
}

/*
 * Note that these accessor functions are only for the "PCI Express
 * Capability" (see PCIe spec r3.0, sec 7.8).  They do not apply to the
 * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.)
 */
int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
{
	int ret;

	*val = 0;
	if (pos & 1)
		return -EINVAL;

	if (pcie_capability_reg_implemented(dev, pos)) {
		ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
		/*
		 * Reset *val to 0 if pci_read_config_word() fails, it may
		 * have been written as 0xFFFF if hardware error happens
		 * during pci_read_config_word().
		 */
		if (ret)
			*val = 0;
		return ret;
	}

	/*
	 * For Functions that do not implement the Slot Capabilities,
	 * Slot Status, and Slot Control registers, these spaces must
	 * be hardwired to 0b, with the exception of the Presence Detect
	 * State bit in the Slot Status register of Downstream Ports,
	 * which must be hardwired to 1b.  (PCIe Base Spec 3.0, sec 7.8)
	 */
787 788
	if (pci_is_pcie(dev) && pcie_downstream_port(dev) &&
	    pos == PCI_EXP_SLTSTA)
789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814
		*val = PCI_EXP_SLTSTA_PDS;

	return 0;
}
EXPORT_SYMBOL(pcie_capability_read_word);

int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val)
{
	int ret;

	*val = 0;
	if (pos & 3)
		return -EINVAL;

	if (pcie_capability_reg_implemented(dev, pos)) {
		ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val);
		/*
		 * Reset *val to 0 if pci_read_config_dword() fails, it may
		 * have been written as 0xFFFFFFFF if hardware error happens
		 * during pci_read_config_dword().
		 */
		if (ret)
			*val = 0;
		return ret;
	}

815 816
	if (pci_is_pcie(dev) && pcie_downstream_port(dev) &&
	    pos == PCI_EXP_SLTSTA)
817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879
		*val = PCI_EXP_SLTSTA_PDS;

	return 0;
}
EXPORT_SYMBOL(pcie_capability_read_dword);

int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
{
	if (pos & 1)
		return -EINVAL;

	if (!pcie_capability_reg_implemented(dev, pos))
		return 0;

	return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
}
EXPORT_SYMBOL(pcie_capability_write_word);

int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val)
{
	if (pos & 3)
		return -EINVAL;

	if (!pcie_capability_reg_implemented(dev, pos))
		return 0;

	return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val);
}
EXPORT_SYMBOL(pcie_capability_write_dword);

int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
				       u16 clear, u16 set)
{
	int ret;
	u16 val;

	ret = pcie_capability_read_word(dev, pos, &val);
	if (!ret) {
		val &= ~clear;
		val |= set;
		ret = pcie_capability_write_word(dev, pos, val);
	}

	return ret;
}
EXPORT_SYMBOL(pcie_capability_clear_and_set_word);

int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
					u32 clear, u32 set)
{
	int ret;
	u32 val;

	ret = pcie_capability_read_dword(dev, pos, &val);
	if (!ret) {
		val &= ~clear;
		val |= set;
		ret = pcie_capability_write_dword(dev, pos, val);
	}

	return ret;
}
EXPORT_SYMBOL(pcie_capability_clear_and_set_dword);