access.c 18.5 KB
Newer Older
1
#include <linux/delay.h>
L
Linus Torvalds 已提交
2 3
#include <linux/pci.h>
#include <linux/module.h>
A
Al Viro 已提交
4
#include <linux/sched.h>
5
#include <linux/slab.h>
L
Linus Torvalds 已提交
6
#include <linux/ioport.h>
7
#include <linux/wait.h>
L
Linus Torvalds 已提交
8

9 10
#include "pci.h"

L
Linus Torvalds 已提交
11 12 13 14 15
/*
 * This interrupt-safe spinlock protects all accesses to PCI
 * configuration space.
 */

16
DEFINE_RAW_SPINLOCK(pci_lock);
L
Linus Torvalds 已提交
17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35

/*
 *  Wrappers for all PCI configuration access functions.  They just check
 *  alignment, do locking and call the low-level functions pointed to
 *  by pci_dev->ops.
 */

#define PCI_byte_BAD 0
#define PCI_word_BAD (pos & 1)
#define PCI_dword_BAD (pos & 3)

#define PCI_OP_READ(size,type,len) \
int pci_bus_read_config_##size \
	(struct pci_bus *bus, unsigned int devfn, int pos, type *value)	\
{									\
	int res;							\
	unsigned long flags;						\
	u32 data = 0;							\
	if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER;	\
36
	raw_spin_lock_irqsave(&pci_lock, flags);			\
L
Linus Torvalds 已提交
37 38
	res = bus->ops->read(bus, devfn, pos, len, &data);		\
	*value = (type)data;						\
39
	raw_spin_unlock_irqrestore(&pci_lock, flags);		\
L
Linus Torvalds 已提交
40 41 42 43 44 45 46 47 48 49
	return res;							\
}

#define PCI_OP_WRITE(size,type,len) \
int pci_bus_write_config_##size \
	(struct pci_bus *bus, unsigned int devfn, int pos, type value)	\
{									\
	int res;							\
	unsigned long flags;						\
	if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER;	\
50
	raw_spin_lock_irqsave(&pci_lock, flags);			\
L
Linus Torvalds 已提交
51
	res = bus->ops->write(bus, devfn, pos, len, value);		\
52
	raw_spin_unlock_irqrestore(&pci_lock, flags);		\
L
Linus Torvalds 已提交
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
	return res;							\
}

PCI_OP_READ(byte, u8, 1)
PCI_OP_READ(word, u16, 2)
PCI_OP_READ(dword, u32, 4)
PCI_OP_WRITE(byte, u8, 1)
PCI_OP_WRITE(word, u16, 2)
PCI_OP_WRITE(dword, u32, 4)

EXPORT_SYMBOL(pci_bus_read_config_byte);
EXPORT_SYMBOL(pci_bus_read_config_word);
EXPORT_SYMBOL(pci_bus_read_config_dword);
EXPORT_SYMBOL(pci_bus_write_config_byte);
EXPORT_SYMBOL(pci_bus_write_config_word);
EXPORT_SYMBOL(pci_bus_write_config_dword);
69

R
Rob Herring 已提交
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
			    int where, int size, u32 *val)
{
	void __iomem *addr;

	addr = bus->ops->map_bus(bus, devfn, where);
	if (!addr) {
		*val = ~0;
		return PCIBIOS_DEVICE_NOT_FOUND;
	}

	if (size == 1)
		*val = readb(addr);
	else if (size == 2)
		*val = readw(addr);
	else
		*val = readl(addr);

	return PCIBIOS_SUCCESSFUL;
}
EXPORT_SYMBOL_GPL(pci_generic_config_read);

int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
			     int where, int size, u32 val)
{
	void __iomem *addr;

	addr = bus->ops->map_bus(bus, devfn, where);
	if (!addr)
		return PCIBIOS_DEVICE_NOT_FOUND;

	if (size == 1)
		writeb(val, addr);
	else if (size == 2)
		writew(val, addr);
	else
		writel(val, addr);

	return PCIBIOS_SUCCESSFUL;
}
EXPORT_SYMBOL_GPL(pci_generic_config_write);

int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
			      int where, int size, u32 *val)
{
	void __iomem *addr;

	addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
	if (!addr) {
		*val = ~0;
		return PCIBIOS_DEVICE_NOT_FOUND;
	}

	*val = readl(addr);

	if (size <= 2)
		*val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);

	return PCIBIOS_SUCCESSFUL;
}
EXPORT_SYMBOL_GPL(pci_generic_config_read32);

int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
			       int where, int size, u32 val)
{
	void __iomem *addr;
	u32 mask, tmp;

	addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
	if (!addr)
		return PCIBIOS_DEVICE_NOT_FOUND;

	if (size == 4) {
		writel(val, addr);
		return PCIBIOS_SUCCESSFUL;
	} else {
		mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
	}

	tmp = readl(addr) & mask;
	tmp |= val << ((where & 0x3) * 8);
	writel(tmp, addr);

	return PCIBIOS_SUCCESSFUL;
}
EXPORT_SYMBOL_GPL(pci_generic_config_write32);

H
Huang Ying 已提交
157 158 159 160 161 162 163 164 165 166 167 168
/**
 * pci_bus_set_ops - Set raw operations of pci bus
 * @bus:	pci bus struct
 * @ops:	new raw operations
 *
 * Return previous raw operations
 */
struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops)
{
	struct pci_ops *old_ops;
	unsigned long flags;

169
	raw_spin_lock_irqsave(&pci_lock, flags);
H
Huang Ying 已提交
170 171
	old_ops = bus->ops;
	bus->ops = ops;
172
	raw_spin_unlock_irqrestore(&pci_lock, flags);
H
Huang Ying 已提交
173 174 175
	return old_ops;
}
EXPORT_SYMBOL(pci_bus_set_ops);
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196

/**
 * pci_read_vpd - Read one entry from Vital Product Data
 * @dev:	pci device struct
 * @pos:	offset in vpd space
 * @count:	number of bytes to read
 * @buf:	pointer to where to store result
 *
 */
ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf)
{
	if (!dev->vpd || !dev->vpd->ops)
		return -ENODEV;
	return dev->vpd->ops->read(dev, pos, count, buf);
}
EXPORT_SYMBOL(pci_read_vpd);

/**
 * pci_write_vpd - Write entry to Vital Product Data
 * @dev:	pci device struct
 * @pos:	offset in vpd space
R
Randy Dunlap 已提交
197 198
 * @count:	number of bytes to write
 * @buf:	buffer containing write data
199 200 201 202 203 204 205 206 207 208
 *
 */
ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
{
	if (!dev->vpd || !dev->vpd->ops)
		return -ENODEV;
	return dev->vpd->ops->write(dev, pos, count, buf);
}
EXPORT_SYMBOL(pci_write_vpd);

209 210 211 212 213 214 215 216
/*
 * The following routines are to prevent the user from accessing PCI config
 * space when it's unsafe to do so.  Some devices require this during BIST and
 * we're required to prevent it during D-state transitions.
 *
 * We have a bit per device to indicate it's blocked and a global wait queue
 * for callers to sleep on until devices are unblocked.
 */
217
static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait);
218

219
static noinline void pci_wait_cfg(struct pci_dev *dev)
220 221 222
{
	DECLARE_WAITQUEUE(wait, current);

223
	__add_wait_queue(&pci_cfg_wait, &wait);
224 225
	do {
		set_current_state(TASK_UNINTERRUPTIBLE);
226
		raw_spin_unlock_irq(&pci_lock);
227
		schedule();
228
		raw_spin_lock_irq(&pci_lock);
229 230
	} while (dev->block_cfg_access);
	__remove_wait_queue(&pci_cfg_wait, &wait);
231 232
}

G
Greg Thelen 已提交
233
/* Returns 0 on success, negative values indicate error. */
234 235 236 237
#define PCI_USER_READ_CONFIG(size,type)					\
int pci_user_read_config_##size						\
	(struct pci_dev *dev, int pos, type *val)			\
{									\
238
	int ret = PCIBIOS_SUCCESSFUL;					\
239
	u32 data = -1;							\
G
Greg Thelen 已提交
240 241
	if (PCI_##size##_BAD)						\
		return -EINVAL;						\
242
	raw_spin_lock_irq(&pci_lock);				\
243 244
	if (unlikely(dev->block_cfg_access))				\
		pci_wait_cfg(dev);					\
245
	ret = dev->bus->ops->read(dev->bus, dev->devfn,			\
246
					pos, sizeof(type), &data);	\
247
	raw_spin_unlock_irq(&pci_lock);				\
248
	*val = (type)data;						\
249
	return pcibios_err_to_errno(ret);				\
250 251
}									\
EXPORT_SYMBOL_GPL(pci_user_read_config_##size);
252

G
Greg Thelen 已提交
253
/* Returns 0 on success, negative values indicate error. */
254 255 256 257
#define PCI_USER_WRITE_CONFIG(size,type)				\
int pci_user_write_config_##size					\
	(struct pci_dev *dev, int pos, type val)			\
{									\
258
	int ret = PCIBIOS_SUCCESSFUL;					\
G
Greg Thelen 已提交
259 260
	if (PCI_##size##_BAD)						\
		return -EINVAL;						\
261
	raw_spin_lock_irq(&pci_lock);				\
262 263
	if (unlikely(dev->block_cfg_access))				\
		pci_wait_cfg(dev);					\
264
	ret = dev->bus->ops->write(dev->bus, dev->devfn,		\
265
					pos, sizeof(type), val);	\
266
	raw_spin_unlock_irq(&pci_lock);				\
267
	return pcibios_err_to_errno(ret);				\
268 269
}									\
EXPORT_SYMBOL_GPL(pci_user_write_config_##size);
270 271 272 273 274 275 276 277

PCI_USER_READ_CONFIG(byte, u8)
PCI_USER_READ_CONFIG(word, u16)
PCI_USER_READ_CONFIG(dword, u32)
PCI_USER_WRITE_CONFIG(byte, u8)
PCI_USER_WRITE_CONFIG(word, u16)
PCI_USER_WRITE_CONFIG(dword, u32)

278 279 280 281 282 283
/* VPD access through PCI 2.2+ VPD capability */

#define PCI_VPD_PCI22_SIZE (PCI_VPD_ADDR_MASK + 1)

struct pci_vpd_pci22 {
	struct pci_vpd base;
284 285
	struct mutex lock;
	u16	flag;
286
	bool	busy;
287
	u8	cap;
288 289
};

290 291 292 293 294
/*
 * Wait for last operation to complete.
 * This code has to spin since there is no other notification from the PCI
 * hardware. Since the VPD is often implemented by serial attachment to an
 * EEPROM, it may take many milliseconds to complete.
G
Greg Thelen 已提交
295 296
 *
 * Returns 0 on success, negative values indicate error.
297
 */
298 299 300 301
static int pci_vpd_pci22_wait(struct pci_dev *dev)
{
	struct pci_vpd_pci22 *vpd =
		container_of(dev->vpd, struct pci_vpd_pci22, base);
302 303
	unsigned long timeout = jiffies + HZ/20 + 2;
	u16 status;
304 305 306 307 308 309
	int ret;

	if (!vpd->busy)
		return 0;

	for (;;) {
310
		ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR,
311
						&status);
G
Greg Thelen 已提交
312
		if (ret < 0)
313
			return ret;
314 315

		if ((status & PCI_VPD_ADDR_F) == vpd->flag) {
316 317 318
			vpd->busy = false;
			return 0;
		}
319

320
		if (time_after(jiffies, timeout)) {
321
			dev_printk(KERN_DEBUG, &dev->dev, "vpd r/w failed.  This is likely a firmware bug on this device.  Contact the card vendor for a firmware update\n");
322
			return -ETIMEDOUT;
323
		}
324 325 326 327
		if (fatal_signal_pending(current))
			return -EINTR;
		if (!cond_resched())
			udelay(10);
328 329 330
	}
}

331 332
static ssize_t pci_vpd_pci22_read(struct pci_dev *dev, loff_t pos, size_t count,
				  void *arg)
333 334 335
{
	struct pci_vpd_pci22 *vpd =
		container_of(dev->vpd, struct pci_vpd_pci22, base);
336 337 338
	int ret;
	loff_t end = pos + count;
	u8 *buf = arg;
339

340
	if (pos < 0 || pos > vpd->base.len || end > vpd->base.len)
341 342
		return -EINVAL;

343 344 345
	if (mutex_lock_killable(&vpd->lock))
		return -EINTR;

346 347 348
	ret = pci_vpd_pci22_wait(dev);
	if (ret < 0)
		goto out;
349

350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377
	while (pos < end) {
		u32 val;
		unsigned int i, skip;

		ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
						 pos & ~3);
		if (ret < 0)
			break;
		vpd->busy = true;
		vpd->flag = PCI_VPD_ADDR_F;
		ret = pci_vpd_pci22_wait(dev);
		if (ret < 0)
			break;

		ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA, &val);
		if (ret < 0)
			break;

		skip = pos & 3;
		for (i = 0;  i < sizeof(u32); i++) {
			if (i >= skip) {
				*buf++ = val;
				if (++pos == end)
					break;
			}
			val >>= 8;
		}
	}
378
out:
379
	mutex_unlock(&vpd->lock);
380
	return ret ? ret : count;
381 382
}

383 384
static ssize_t pci_vpd_pci22_write(struct pci_dev *dev, loff_t pos, size_t count,
				   const void *arg)
385 386 387
{
	struct pci_vpd_pci22 *vpd =
		container_of(dev->vpd, struct pci_vpd_pci22, base);
388 389
	const u8 *buf = arg;
	loff_t end = pos + count;
390
	int ret = 0;
391

392
	if (pos < 0 || (pos & 3) || (count & 3) || end > vpd->base.len)
393 394
		return -EINVAL;

395 396
	if (mutex_lock_killable(&vpd->lock))
		return -EINTR;
397

398 399 400
	ret = pci_vpd_pci22_wait(dev);
	if (ret < 0)
		goto out;
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420

	while (pos < end) {
		u32 val;

		val = *buf++;
		val |= *buf++ << 8;
		val |= *buf++ << 16;
		val |= *buf++ << 24;

		ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, val);
		if (ret < 0)
			break;
		ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
						 pos | PCI_VPD_ADDR_F);
		if (ret < 0)
			break;

		vpd->busy = true;
		vpd->flag = 0;
		ret = pci_vpd_pci22_wait(dev);
421 422
		if (ret < 0)
			break;
423 424 425

		pos += sizeof(u32);
	}
426
out:
427
	mutex_unlock(&vpd->lock);
428
	return ret ? ret : count;
429 430 431 432 433 434 435
}

static void pci_vpd_pci22_release(struct pci_dev *dev)
{
	kfree(container_of(dev->vpd, struct pci_vpd_pci22, base));
}

436
static const struct pci_vpd_ops pci_vpd_pci22_ops = {
437 438 439 440 441
	.read = pci_vpd_pci22_read,
	.write = pci_vpd_pci22_write,
	.release = pci_vpd_pci22_release,
};

442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
			       void *arg)
{
	struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
	ssize_t ret;

	if (!tdev)
		return -ENODEV;

	ret = pci_read_vpd(tdev, pos, count, arg);
	pci_dev_put(tdev);
	return ret;
}

static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
				const void *arg)
{
	struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
	ssize_t ret;

	if (!tdev)
		return -ENODEV;

	ret = pci_write_vpd(tdev, pos, count, arg);
	pci_dev_put(tdev);
	return ret;
}

static const struct pci_vpd_ops pci_vpd_f0_ops = {
	.read = pci_vpd_f0_read,
	.write = pci_vpd_f0_write,
	.release = pci_vpd_pci22_release,
};

static int pci_vpd_f0_dev_check(struct pci_dev *dev)
{
	struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
	int ret = 0;

	if (!tdev)
		return -ENODEV;
	if (!tdev->vpd || !tdev->multifunction ||
	    dev->class != tdev->class || dev->vendor != tdev->vendor ||
	    dev->device != tdev->device)
		ret = -ENODEV;

	pci_dev_put(tdev);
	return ret;
}

492 493 494 495 496 497 498 499
int pci_vpd_pci22_init(struct pci_dev *dev)
{
	struct pci_vpd_pci22 *vpd;
	u8 cap;

	cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
	if (!cap)
		return -ENODEV;
500 501 502 503 504 505
	if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
		int ret = pci_vpd_f0_dev_check(dev);

		if (ret)
			return ret;
	}
506 507 508 509
	vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
	if (!vpd)
		return -ENOMEM;

510
	vpd->base.len = PCI_VPD_PCI22_SIZE;
511 512 513 514
	if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0)
		vpd->base.ops = &pci_vpd_f0_ops;
	else
		vpd->base.ops = &pci_vpd_pci22_ops;
515
	mutex_init(&vpd->lock);
516 517 518 519 520 521
	vpd->cap = cap;
	vpd->busy = false;
	dev->vpd = &vpd->base;
	return 0;
}

522
/**
523
 * pci_cfg_access_lock - Lock PCI config reads/writes
524 525
 * @dev:	pci device struct
 *
526 527 528
 * When access is locked, any userspace reads or writes to config
 * space and concurrent lock requests will sleep until access is
 * allowed via pci_cfg_access_unlocked again.
529
 */
530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550
void pci_cfg_access_lock(struct pci_dev *dev)
{
	might_sleep();

	raw_spin_lock_irq(&pci_lock);
	if (dev->block_cfg_access)
		pci_wait_cfg(dev);
	dev->block_cfg_access = 1;
	raw_spin_unlock_irq(&pci_lock);
}
EXPORT_SYMBOL_GPL(pci_cfg_access_lock);

/**
 * pci_cfg_access_trylock - try to lock PCI config reads/writes
 * @dev:	pci device struct
 *
 * Same as pci_cfg_access_lock, but will return 0 if access is
 * already locked, 1 otherwise. This function can be used from
 * atomic contexts.
 */
bool pci_cfg_access_trylock(struct pci_dev *dev)
551 552
{
	unsigned long flags;
553
	bool locked = true;
554

555
	raw_spin_lock_irqsave(&pci_lock, flags);
556 557 558 559
	if (dev->block_cfg_access)
		locked = false;
	else
		dev->block_cfg_access = 1;
560
	raw_spin_unlock_irqrestore(&pci_lock, flags);
561

562
	return locked;
563
}
564
EXPORT_SYMBOL_GPL(pci_cfg_access_trylock);
565 566

/**
567
 * pci_cfg_access_unlock - Unlock PCI config reads/writes
568 569
 * @dev:	pci device struct
 *
570
 * This function allows PCI config accesses to resume.
571
 */
572
void pci_cfg_access_unlock(struct pci_dev *dev)
573 574 575
{
	unsigned long flags;

576
	raw_spin_lock_irqsave(&pci_lock, flags);
577 578 579

	/* This indicates a problem in the caller, but we don't need
	 * to kill them, unlike a double-block above. */
580
	WARN_ON(!dev->block_cfg_access);
581

582 583
	dev->block_cfg_access = 0;
	wake_up_all(&pci_cfg_wait);
584
	raw_spin_unlock_irqrestore(&pci_lock, flags);
585
}
586
EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
587 588 589

static inline int pcie_cap_version(const struct pci_dev *dev)
{
590
	return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS;
591 592
}

593 594 595 596 597 598 599 600
static bool pcie_downstream_port(const struct pci_dev *dev)
{
	int type = pci_pcie_type(dev);

	return type == PCI_EXP_TYPE_ROOT_PORT ||
	       type == PCI_EXP_TYPE_DOWNSTREAM;
}

601
bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
602 603 604
{
	int type = pci_pcie_type(dev);

605
	return type == PCI_EXP_TYPE_ENDPOINT ||
606 607 608 609 610 611
	       type == PCI_EXP_TYPE_LEG_END ||
	       type == PCI_EXP_TYPE_ROOT_PORT ||
	       type == PCI_EXP_TYPE_UPSTREAM ||
	       type == PCI_EXP_TYPE_DOWNSTREAM ||
	       type == PCI_EXP_TYPE_PCI_BRIDGE ||
	       type == PCI_EXP_TYPE_PCIE_BRIDGE;
612 613 614 615
}

static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
{
616
	return pcie_downstream_port(dev) &&
617
	       pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT;
618 619 620 621 622 623
}

static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev)
{
	int type = pci_pcie_type(dev);

624
	return type == PCI_EXP_TYPE_ROOT_PORT ||
625 626 627 628 629 630 631 632 633
	       type == PCI_EXP_TYPE_RC_EC;
}

static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
{
	if (!pci_is_pcie(dev))
		return false;

	switch (pos) {
634
	case PCI_EXP_FLAGS:
635 636 637 638
		return true;
	case PCI_EXP_DEVCAP:
	case PCI_EXP_DEVCTL:
	case PCI_EXP_DEVSTA:
639
		return true;
640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694
	case PCI_EXP_LNKCAP:
	case PCI_EXP_LNKCTL:
	case PCI_EXP_LNKSTA:
		return pcie_cap_has_lnkctl(dev);
	case PCI_EXP_SLTCAP:
	case PCI_EXP_SLTCTL:
	case PCI_EXP_SLTSTA:
		return pcie_cap_has_sltctl(dev);
	case PCI_EXP_RTCTL:
	case PCI_EXP_RTCAP:
	case PCI_EXP_RTSTA:
		return pcie_cap_has_rtctl(dev);
	case PCI_EXP_DEVCAP2:
	case PCI_EXP_DEVCTL2:
	case PCI_EXP_LNKCAP2:
	case PCI_EXP_LNKCTL2:
	case PCI_EXP_LNKSTA2:
		return pcie_cap_version(dev) > 1;
	default:
		return false;
	}
}

/*
 * Note that these accessor functions are only for the "PCI Express
 * Capability" (see PCIe spec r3.0, sec 7.8).  They do not apply to the
 * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.)
 */
int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
{
	int ret;

	*val = 0;
	if (pos & 1)
		return -EINVAL;

	if (pcie_capability_reg_implemented(dev, pos)) {
		ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
		/*
		 * Reset *val to 0 if pci_read_config_word() fails, it may
		 * have been written as 0xFFFF if hardware error happens
		 * during pci_read_config_word().
		 */
		if (ret)
			*val = 0;
		return ret;
	}

	/*
	 * For Functions that do not implement the Slot Capabilities,
	 * Slot Status, and Slot Control registers, these spaces must
	 * be hardwired to 0b, with the exception of the Presence Detect
	 * State bit in the Slot Status register of Downstream Ports,
	 * which must be hardwired to 1b.  (PCIe Base Spec 3.0, sec 7.8)
	 */
695 696
	if (pci_is_pcie(dev) && pcie_downstream_port(dev) &&
	    pos == PCI_EXP_SLTSTA)
697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722
		*val = PCI_EXP_SLTSTA_PDS;

	return 0;
}
EXPORT_SYMBOL(pcie_capability_read_word);

int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val)
{
	int ret;

	*val = 0;
	if (pos & 3)
		return -EINVAL;

	if (pcie_capability_reg_implemented(dev, pos)) {
		ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val);
		/*
		 * Reset *val to 0 if pci_read_config_dword() fails, it may
		 * have been written as 0xFFFFFFFF if hardware error happens
		 * during pci_read_config_dword().
		 */
		if (ret)
			*val = 0;
		return ret;
	}

723 724
	if (pci_is_pcie(dev) && pcie_downstream_port(dev) &&
	    pos == PCI_EXP_SLTSTA)
725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787
		*val = PCI_EXP_SLTSTA_PDS;

	return 0;
}
EXPORT_SYMBOL(pcie_capability_read_dword);

int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
{
	if (pos & 1)
		return -EINVAL;

	if (!pcie_capability_reg_implemented(dev, pos))
		return 0;

	return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
}
EXPORT_SYMBOL(pcie_capability_write_word);

int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val)
{
	if (pos & 3)
		return -EINVAL;

	if (!pcie_capability_reg_implemented(dev, pos))
		return 0;

	return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val);
}
EXPORT_SYMBOL(pcie_capability_write_dword);

int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
				       u16 clear, u16 set)
{
	int ret;
	u16 val;

	ret = pcie_capability_read_word(dev, pos, &val);
	if (!ret) {
		val &= ~clear;
		val |= set;
		ret = pcie_capability_write_word(dev, pos, val);
	}

	return ret;
}
EXPORT_SYMBOL(pcie_capability_clear_and_set_word);

int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
					u32 clear, u32 set)
{
	int ret;
	u32 val;

	ret = pcie_capability_read_dword(dev, pos, &val);
	if (!ret) {
		val &= ~clear;
		val |= set;
		ret = pcie_capability_write_dword(dev, pos, val);
	}

	return ret;
}
EXPORT_SYMBOL(pcie_capability_clear_and_set_dword);