eeh_pseries.c 20.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
/*
 * The file intends to implement the platform dependent EEH operations on pseries.
 * Actually, the pseries platform is built based on RTAS heavily. That means the
 * pseries platform dependent EEH operations will be built on RTAS calls. The functions
 * are devired from arch/powerpc/platforms/pseries/eeh.c and necessary cleanup has
 * been done.
 *
 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2011.
 * Copyright IBM Corporation 2001, 2005, 2006
 * Copyright Dave Engebretsen & Todd Inglett 2001
 * Copyright Linas Vepstas 2005, 2006
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 */

#include <linux/atomic.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/of.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/rbtree.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>

#include <asm/eeh.h>
#include <asm/eeh_event.h>
#include <asm/io.h>
#include <asm/machdep.h>
#include <asm/ppc-pci.h>
#include <asm/rtas.h>

48 49 50 51 52 53 54 55 56 57 58
/* RTAS tokens */
static int ibm_set_eeh_option;
static int ibm_set_slot_reset;
static int ibm_read_slot_reset_state;
static int ibm_read_slot_reset_state2;
static int ibm_slot_error_detail;
static int ibm_get_config_addr_info;
static int ibm_get_config_addr_info2;
static int ibm_configure_bridge;
static int ibm_configure_pe;

59 60 61 62 63 64 65 66 67
/*
 * Buffer for reporting slot-error-detail rtas calls. Its here
 * in BSS, and not dynamically alloced, so that it ends up in
 * RMO where RTAS can access it.
 */
static unsigned char slot_errbuf[RTAS_ERROR_LOG_MAX];
static DEFINE_SPINLOCK(slot_errbuf_lock);
static int eeh_error_buf_size;

68 69 70 71 72 73 74
/**
 * pseries_eeh_init - EEH platform dependent initialization
 *
 * EEH platform dependent initialization on pseries.
 */
static int pseries_eeh_init(void)
{
75 76 77 78 79 80 81 82 83
	/* figure out EEH RTAS function call tokens */
	ibm_set_eeh_option		= rtas_token("ibm,set-eeh-option");
	ibm_set_slot_reset		= rtas_token("ibm,set-slot-reset");
	ibm_read_slot_reset_state2	= rtas_token("ibm,read-slot-reset-state2");
	ibm_read_slot_reset_state	= rtas_token("ibm,read-slot-reset-state");
	ibm_slot_error_detail		= rtas_token("ibm,slot-error-detail");
	ibm_get_config_addr_info2	= rtas_token("ibm,get-config-addr-info2");
	ibm_get_config_addr_info	= rtas_token("ibm,get-config-addr-info");
	ibm_configure_pe		= rtas_token("ibm,configure-pe");
84
	ibm_configure_bridge		= rtas_token("ibm,configure-bridge");
85

86 87 88 89 90
	/*
	 * Necessary sanity check. We needn't check "get-config-addr-info"
	 * and its variant since the old firmware probably support address
	 * of domain/bus/slot/function for EEH RTAS operations.
	 */
91 92 93 94 95 96 97 98
	if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE		||
	    ibm_set_slot_reset == RTAS_UNKNOWN_SERVICE		||
	    (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE &&
	     ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE)	||
	    ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE	||
	    (ibm_configure_pe == RTAS_UNKNOWN_SERVICE		&&
	     ibm_configure_bridge == RTAS_UNKNOWN_SERVICE)) {
		pr_info("EEH functionality not supported\n");
99 100 101
		return -EINVAL;
	}

102 103 104 105
	/* Initialize error log lock and size */
	spin_lock_init(&slot_errbuf_lock);
	eeh_error_buf_size = rtas_token("rtas-error-log-max");
	if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) {
106
		pr_info("%s: unknown EEH error log size\n",
107 108 109
			__func__);
		eeh_error_buf_size = 1024;
	} else if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) {
110
		pr_info("%s: EEH error log size %d exceeds the maximal %d\n",
111 112 113 114
			__func__, eeh_error_buf_size, RTAS_ERROR_LOG_MAX);
		eeh_error_buf_size = RTAS_ERROR_LOG_MAX;
	}

G
Gavin Shan 已提交
115
	/* Set EEH probe mode */
116
	eeh_add_flag(EEH_PROBE_MODE_DEVTREE | EEH_ENABLE_IO_FOR_LOG);
G
Gavin Shan 已提交
117

118 119 120
	return 0;
}

121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
static int pseries_eeh_cap_start(struct device_node *dn)
{
	struct pci_dn *pdn = PCI_DN(dn);
	u32 status;

	if (!pdn)
		return 0;

	rtas_read_config(pdn, PCI_STATUS, 2, &status);
	if (!(status & PCI_STATUS_CAP_LIST))
		return 0;

	return PCI_CAPABILITY_LIST;
}


static int pseries_eeh_find_cap(struct device_node *dn, int cap)
{
	struct pci_dn *pdn = PCI_DN(dn);
	int pos = pseries_eeh_cap_start(dn);
	int cnt = 48;	/* Maximal number of capabilities */
	u32 id;

	if (!pos)
		return 0;

        while (cnt--) {
		rtas_read_config(pdn, pos, 1, &pos);
		if (pos < 0x40)
			break;
		pos &= ~3;
		rtas_read_config(pdn, pos + PCI_CAP_LIST_ID, 1, &id);
		if (id == 0xff)
			break;
		if (id == cap)
			return pos;
		pos += PCI_CAP_LIST_NEXT;
	}

	return 0;
}

163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
static int pseries_eeh_find_ecap(struct device_node *dn, int cap)
{
	struct pci_dn *pdn = PCI_DN(dn);
	struct eeh_dev *edev = of_node_to_eeh_dev(dn);
	u32 header;
	int pos = 256;
	int ttl = (4096 - 256) / 8;

	if (!edev || !edev->pcie_cap)
		return 0;
	if (rtas_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
		return 0;
	else if (!header)
		return 0;

	while (ttl-- > 0) {
		if (PCI_EXT_CAP_ID(header) == cap && pos)
			return pos;

		pos = PCI_EXT_CAP_NEXT(header);
		if (pos < 256)
			break;

		if (rtas_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
			break;
	}

	return 0;
}

G
Gavin Shan 已提交
193 194 195 196 197 198 199 200 201 202 203 204 205
/**
 * pseries_eeh_of_probe - EEH probe on the given device
 * @dn: OF node
 * @flag: Unused
 *
 * When EEH module is installed during system boot, all PCI devices
 * are checked one by one to see if it supports EEH. The function
 * is introduced for the purpose.
 */
static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
{
	struct eeh_dev *edev;
	struct eeh_pe pe;
206
	struct pci_dn *pdn = PCI_DN(dn);
207 208 209
	const __be32 *classp, *vendorp, *devicep;
	u32 class_code;
	const __be32 *regs;
210
	u32 pcie_flags;
G
Gavin Shan 已提交
211 212 213 214 215
	int enable = 0;
	int ret;

	/* Retrieve OF node and eeh device */
	edev = of_node_to_eeh_dev(dn);
216
	if (edev->pe || !of_device_is_available(dn))
G
Gavin Shan 已提交
217 218 219
		return NULL;

	/* Retrieve class/vendor/device IDs */
220 221 222
	classp = of_get_property(dn, "class-code", NULL);
	vendorp = of_get_property(dn, "vendor-id", NULL);
	devicep = of_get_property(dn, "device-id", NULL);
G
Gavin Shan 已提交
223 224

	/* Skip for bad OF node or PCI-ISA bridge */
225
	if (!classp || !vendorp || !devicep)
G
Gavin Shan 已提交
226 227 228 229
		return NULL;
	if (dn->type && !strcmp(dn->type, "isa"))
		return NULL;

230 231
	class_code = of_read_number(classp, 1);

232 233 234 235 236
	/*
	 * Update class code and mode of eeh device. We need
	 * correctly reflects that current device is root port
	 * or PCIe switch downstream port.
	 */
237
	edev->class_code = class_code;
238
	edev->pcix_cap = pseries_eeh_find_cap(dn, PCI_CAP_ID_PCIX);
239
	edev->pcie_cap = pseries_eeh_find_cap(dn, PCI_CAP_ID_EXP);
240
	edev->aer_cap = pseries_eeh_find_ecap(dn, PCI_EXT_CAP_ID_ERR);
241
	edev->mode &= 0xFFFFFF00;
242 243 244 245 246 247 248 249 250 251 252 253
	if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) {
		edev->mode |= EEH_DEV_BRIDGE;
		if (edev->pcie_cap) {
			rtas_read_config(pdn, edev->pcie_cap + PCI_EXP_FLAGS,
					 2, &pcie_flags);
			pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4;
			if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT)
				edev->mode |= EEH_DEV_ROOT_PORT;
			else if (pcie_flags == PCI_EXP_TYPE_DOWNSTREAM)
				edev->mode |= EEH_DEV_DS_PORT;
		}
	}
G
Gavin Shan 已提交
254 255 256 257

	/* Retrieve the device address */
	regs = of_get_property(dn, "reg", NULL);
	if (!regs) {
258
		pr_warn("%s: OF node property %s::reg not found\n",
G
Gavin Shan 已提交
259 260 261 262 263 264 265
			__func__, dn->full_name);
		return NULL;
	}

	/* Initialize the fake PE */
	memset(&pe, 0, sizeof(struct eeh_pe));
	pe.phb = edev->phb;
266
	pe.config_addr = of_read_number(regs, 1);
G
Gavin Shan 已提交
267 268 269 270

	/* Enable EEH on the device */
	ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE);
	if (!ret) {
271
		edev->config_addr = of_read_number(regs, 1);
G
Gavin Shan 已提交
272 273 274 275 276 277 278 279 280 281 282 283 284
		/* Retrieve PE address */
		edev->pe_config_addr = eeh_ops->get_pe_addr(&pe);
		pe.addr = edev->pe_config_addr;

		/* Some older systems (Power4) allow the ibm,set-eeh-option
		 * call to succeed even on nodes where EEH is not supported.
		 * Verify support explicitly.
		 */
		ret = eeh_ops->get_state(&pe, NULL);
		if (ret > 0 && ret != EEH_STATE_NOT_SUPPORT)
			enable = 1;

		if (enable) {
285
			eeh_add_flag(EEH_ENABLED);
G
Gavin Shan 已提交
286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
			eeh_add_to_parent_pe(edev);

			pr_debug("%s: EEH enabled on %s PHB#%d-PE#%x, config addr#%x\n",
				__func__, dn->full_name, pe.phb->global_number,
				pe.addr, pe.config_addr);
		} else if (dn->parent && of_node_to_eeh_dev(dn->parent) &&
			   (of_node_to_eeh_dev(dn->parent))->pe) {
			/* This device doesn't support EEH, but it may have an
			 * EEH parent, in which case we mark it as supported.
			 */
			edev->config_addr = of_node_to_eeh_dev(dn->parent)->config_addr;
			edev->pe_config_addr = of_node_to_eeh_dev(dn->parent)->pe_config_addr;
			eeh_add_to_parent_pe(edev);
		}
	}

	/* Save memory bars */
	eeh_save_bars(edev);

	return NULL;
}

308 309
/**
 * pseries_eeh_set_option - Initialize EEH or MMIO/DMA reenable
310
 * @pe: EEH PE
311 312 313 314 315 316
 * @option: operation to be issued
 *
 * The function is used to control the EEH functionality globally.
 * Currently, following options are support according to PAPR:
 * Enable EEH, Disable EEH, Enable MMIO and Enable DMA
 */
317
static int pseries_eeh_set_option(struct eeh_pe *pe, int option)
318
{
319 320 321 322 323 324 325 326 327 328 329 330 331 332
	int ret = 0;
	int config_addr;

	/*
	 * When we're enabling or disabling EEH functioality on
	 * the particular PE, the PE config address is possibly
	 * unavailable. Therefore, we have to figure it out from
	 * the FDT node.
	 */
	switch (option) {
	case EEH_OPT_DISABLE:
	case EEH_OPT_ENABLE:
	case EEH_OPT_THAW_MMIO:
	case EEH_OPT_THAW_DMA:
333 334 335
		config_addr = pe->config_addr;
		if (pe->addr)
			config_addr = pe->addr;
336
		break;
337 338 339
	case EEH_OPT_FREEZE_PE:
		/* Not support */
		return 0;
340 341 342 343 344 345 346
	default:
		pr_err("%s: Invalid option %d\n",
			__func__, option);
		return -EINVAL;
	}

	ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL,
347 348
			config_addr, BUID_HI(pe->phb->buid),
			BUID_LO(pe->phb->buid), option);
349 350

	return ret;
351 352 353 354
}

/**
 * pseries_eeh_get_pe_addr - Retrieve PE address
355
 * @pe: EEH PE
356 357 358 359 360 361 362 363 364 365
 *
 * Retrieve the assocated PE address. Actually, there're 2 RTAS
 * function calls dedicated for the purpose. We need implement
 * it through the new function and then the old one. Besides,
 * you should make sure the config address is figured out from
 * FDT node before calling the function.
 *
 * It's notable that zero'ed return value means invalid PE config
 * address.
 */
366
static int pseries_eeh_get_pe_addr(struct eeh_pe *pe)
367
{
368 369 370 371 372 373 374 375 376 377
	int ret = 0;
	int rets[3];

	if (ibm_get_config_addr_info2 != RTAS_UNKNOWN_SERVICE) {
		/*
		 * First of all, we need to make sure there has one PE
		 * associated with the device. Otherwise, PE address is
		 * meaningless.
		 */
		ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets,
378 379
				pe->config_addr, BUID_HI(pe->phb->buid),
				BUID_LO(pe->phb->buid), 1);
380 381 382 383 384
		if (ret || (rets[0] == 0))
			return 0;

		/* Retrieve the associated PE config address */
		ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets,
385 386
				pe->config_addr, BUID_HI(pe->phb->buid),
				BUID_LO(pe->phb->buid), 0);
387
		if (ret) {
388
			pr_warn("%s: Failed to get address for PHB#%d-PE#%x\n",
389
				__func__, pe->phb->global_number, pe->config_addr);
390 391 392 393 394 395 396 397
			return 0;
		}

		return rets[0];
	}

	if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) {
		ret = rtas_call(ibm_get_config_addr_info, 4, 2, rets,
398 399
				pe->config_addr, BUID_HI(pe->phb->buid),
				BUID_LO(pe->phb->buid), 0);
400
		if (ret) {
401
			pr_warn("%s: Failed to get address for PHB#%d-PE#%x\n",
402
				__func__, pe->phb->global_number, pe->config_addr);
403 404 405 406 407 408 409
			return 0;
		}

		return rets[0];
	}

	return ret;
410 411 412 413
}

/**
 * pseries_eeh_get_state - Retrieve PE state
414
 * @pe: EEH PE
415 416 417 418 419 420 421 422 423 424
 * @state: return value
 *
 * Retrieve the state of the specified PE. On RTAS compliant
 * pseries platform, there already has one dedicated RTAS function
 * for the purpose. It's notable that the associated PE config address
 * might be ready when calling the function. Therefore, endeavour to
 * use the PE config address if possible. Further more, there're 2
 * RTAS calls for the purpose, we need to try the new one and back
 * to the old one if the new one couldn't work properly.
 */
425
static int pseries_eeh_get_state(struct eeh_pe *pe, int *state)
426
{
427 428 429 430 431 432
	int config_addr;
	int ret;
	int rets[4];
	int result;

	/* Figure out PE config address if possible */
433 434 435
	config_addr = pe->config_addr;
	if (pe->addr)
		config_addr = pe->addr;
436 437 438

	if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) {
		ret = rtas_call(ibm_read_slot_reset_state2, 3, 4, rets,
439 440
				config_addr, BUID_HI(pe->phb->buid),
				BUID_LO(pe->phb->buid));
441 442 443 444
	} else if (ibm_read_slot_reset_state != RTAS_UNKNOWN_SERVICE) {
		/* Fake PE unavailable info */
		rets[2] = 0;
		ret = rtas_call(ibm_read_slot_reset_state, 3, 3, rets,
445 446
				config_addr, BUID_HI(pe->phb->buid),
				BUID_LO(pe->phb->buid));
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
	} else {
		return EEH_STATE_NOT_SUPPORT;
	}

	if (ret)
		return ret;

	/* Parse the result out */
	result = 0;
	if (rets[1]) {
		switch(rets[0]) {
		case 0:
			result &= ~EEH_STATE_RESET_ACTIVE;
			result |= EEH_STATE_MMIO_ACTIVE;
			result |= EEH_STATE_DMA_ACTIVE;
			break;
		case 1:
			result |= EEH_STATE_RESET_ACTIVE;
			result |= EEH_STATE_MMIO_ACTIVE;
			result |= EEH_STATE_DMA_ACTIVE;
			break;
		case 2:
			result &= ~EEH_STATE_RESET_ACTIVE;
			result &= ~EEH_STATE_MMIO_ACTIVE;
			result &= ~EEH_STATE_DMA_ACTIVE;
			break;
		case 4:
			result &= ~EEH_STATE_RESET_ACTIVE;
			result &= ~EEH_STATE_MMIO_ACTIVE;
			result &= ~EEH_STATE_DMA_ACTIVE;
			result |= EEH_STATE_MMIO_ENABLED;
			break;
		case 5:
			if (rets[2]) {
				if (state) *state = rets[2];
				result = EEH_STATE_UNAVAILABLE;
			} else {
				result = EEH_STATE_NOT_SUPPORT;
			}
486
			break;
487 488 489 490 491 492 493 494
		default:
			result = EEH_STATE_NOT_SUPPORT;
		}
	} else {
		result = EEH_STATE_NOT_SUPPORT;
	}

	return result;
495 496 497 498
}

/**
 * pseries_eeh_reset - Reset the specified PE
499
 * @pe: EEH PE
500 501 502 503
 * @option: reset option
 *
 * Reset the specified PE
 */
504
static int pseries_eeh_reset(struct eeh_pe *pe, int option)
505
{
506 507 508 509
	int config_addr;
	int ret;

	/* Figure out PE address */
510 511 512
	config_addr = pe->config_addr;
	if (pe->addr)
		config_addr = pe->addr;
513 514 515

	/* Reset PE through RTAS call */
	ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
516 517
			config_addr, BUID_HI(pe->phb->buid),
			BUID_LO(pe->phb->buid), option);
518 519 520 521

	/* If fundamental-reset not supported, try hot-reset */
	if (option == EEH_RESET_FUNDAMENTAL &&
	    ret == -8) {
522
		option = EEH_RESET_HOT;
523
		ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
524
				config_addr, BUID_HI(pe->phb->buid),
525
				BUID_LO(pe->phb->buid), option);
526 527
	}

528 529 530 531 532 533 534
	/* We need reset hold or settlement delay */
	if (option == EEH_RESET_FUNDAMENTAL ||
	    option == EEH_RESET_HOT)
		msleep(EEH_PE_RST_HOLD_TIME);
	else
		msleep(EEH_PE_RST_SETTLE_TIME);

535
	return ret;
536 537 538 539
}

/**
 * pseries_eeh_wait_state - Wait for PE state
540
 * @pe: EEH PE
541 542 543 544 545
 * @max_wait: maximal period in microsecond
 *
 * Wait for the state of associated PE. It might take some time
 * to retrieve the PE's state.
 */
546
static int pseries_eeh_wait_state(struct eeh_pe *pe, int max_wait)
547
{
548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
	int ret;
	int mwait;

	/*
	 * According to PAPR, the state of PE might be temporarily
	 * unavailable. Under the circumstance, we have to wait
	 * for indicated time determined by firmware. The maximal
	 * wait time is 5 minutes, which is acquired from the original
	 * EEH implementation. Also, the original implementation
	 * also defined the minimal wait time as 1 second.
	 */
#define EEH_STATE_MIN_WAIT_TIME	(1000)
#define EEH_STATE_MAX_WAIT_TIME	(300 * 1000)

	while (1) {
563
		ret = pseries_eeh_get_state(pe, &mwait);
564 565 566 567 568 569 570 571 572 573

		/*
		 * If the PE's state is temporarily unavailable,
		 * we have to wait for the specified time. Otherwise,
		 * the PE's state will be returned immediately.
		 */
		if (ret != EEH_STATE_UNAVAILABLE)
			return ret;

		if (max_wait <= 0) {
574
			pr_warn("%s: Timeout when getting PE's state (%d)\n",
575 576 577 578 579
				__func__, max_wait);
			return EEH_STATE_NOT_SUPPORT;
		}

		if (mwait <= 0) {
580
			pr_warn("%s: Firmware returned bad wait value %d\n",
581 582 583
				__func__, mwait);
			mwait = EEH_STATE_MIN_WAIT_TIME;
		} else if (mwait > EEH_STATE_MAX_WAIT_TIME) {
584
			pr_warn("%s: Firmware returned too long wait value %d\n",
585 586 587 588 589 590 591 592 593
				__func__, mwait);
			mwait = EEH_STATE_MAX_WAIT_TIME;
		}

		max_wait -= mwait;
		msleep(mwait);
	}

	return EEH_STATE_NOT_SUPPORT;
594 595 596 597
}

/**
 * pseries_eeh_get_log - Retrieve error log
598
 * @pe: EEH PE
599 600 601 602 603 604 605 606
 * @severity: temporary or permanent error log
 * @drv_log: driver log to be combined with retrieved error log
 * @len: length of driver log
 *
 * Retrieve the temporary or permanent error from the PE.
 * Actually, the error will be retrieved through the dedicated
 * RTAS call.
 */
607
static int pseries_eeh_get_log(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len)
608
{
609 610 611 612 613 614 615 616
	int config_addr;
	unsigned long flags;
	int ret;

	spin_lock_irqsave(&slot_errbuf_lock, flags);
	memset(slot_errbuf, 0, eeh_error_buf_size);

	/* Figure out the PE address */
617 618 619
	config_addr = pe->config_addr;
	if (pe->addr)
		config_addr = pe->addr;
620 621

	ret = rtas_call(ibm_slot_error_detail, 8, 1, NULL, config_addr,
622
			BUID_HI(pe->phb->buid), BUID_LO(pe->phb->buid),
623 624 625 626 627 628 629 630
			virt_to_phys(drv_log), len,
			virt_to_phys(slot_errbuf), eeh_error_buf_size,
			severity);
	if (!ret)
		log_error(slot_errbuf, ERR_TYPE_RTAS_LOG, 0);
	spin_unlock_irqrestore(&slot_errbuf_lock, flags);

	return ret;
631 632 633 634
}

/**
 * pseries_eeh_configure_bridge - Configure PCI bridges in the indicated PE
635
 * @pe: EEH PE
636 637 638 639 640
 *
 * The function will be called to reconfigure the bridges included
 * in the specified PE so that the mulfunctional PE would be recovered
 * again.
 */
641
static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
642
{
643 644 645 646
	int config_addr;
	int ret;

	/* Figure out the PE address */
647 648 649
	config_addr = pe->config_addr;
	if (pe->addr)
		config_addr = pe->addr;
650 651 652 653

	/* Use new configure-pe function, if supported */
	if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
		ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
654 655
				config_addr, BUID_HI(pe->phb->buid),
				BUID_LO(pe->phb->buid));
656 657
	} else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
		ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
658 659
				config_addr, BUID_HI(pe->phb->buid),
				BUID_LO(pe->phb->buid));
660 661 662 663 664
	} else {
		return -EFAULT;
	}

	if (ret)
665
		pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
666
			__func__, pe->phb->global_number, pe->addr, ret);
667 668

	return ret;
669 670
}

671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
/**
 * pseries_eeh_read_config - Read PCI config space
 * @dn: device node
 * @where: PCI address
 * @size: size to read
 * @val: return value
 *
 * Read config space from the speicifed device
 */
static int pseries_eeh_read_config(struct device_node *dn, int where, int size, u32 *val)
{
	struct pci_dn *pdn;

	pdn = PCI_DN(dn);

	return rtas_read_config(pdn, where, size, val);
}

/**
 * pseries_eeh_write_config - Write PCI config space
 * @dn: device node
 * @where: PCI address
 * @size: size to write
 * @val: value to be written
 *
 * Write config space to the specified device
 */
static int pseries_eeh_write_config(struct device_node *dn, int where, int size, u32 val)
{
	struct pci_dn *pdn;

	pdn = PCI_DN(dn);

	return rtas_write_config(pdn, where, size, val);
}

707 708 709
static struct eeh_ops pseries_eeh_ops = {
	.name			= "pseries",
	.init			= pseries_eeh_init,
G
Gavin Shan 已提交
710 711
	.of_probe		= pseries_eeh_of_probe,
	.dev_probe		= NULL,
712 713 714 715 716 717
	.set_option		= pseries_eeh_set_option,
	.get_pe_addr		= pseries_eeh_get_pe_addr,
	.get_state		= pseries_eeh_get_state,
	.reset			= pseries_eeh_reset,
	.wait_state		= pseries_eeh_wait_state,
	.get_log		= pseries_eeh_get_log,
718
	.configure_bridge       = pseries_eeh_configure_bridge,
719
	.err_inject		= NULL,
720
	.read_config		= pseries_eeh_read_config,
721 722 723
	.write_config		= pseries_eeh_write_config,
	.next_error		= NULL,
	.restore_config		= NULL
724 725 726 727 728 729 730 731
};

/**
 * eeh_pseries_init - Register platform dependent EEH operations
 *
 * EEH initialization on pseries platform. This function should be
 * called before any EEH related functions.
 */
732
static int __init eeh_pseries_init(void)
733
{
734
	int ret;
735 736 737 738 739 740 741 742 743

	ret = eeh_ops_register(&pseries_eeh_ops);
	if (!ret)
		pr_info("EEH: pSeries platform initialized\n");
	else
		pr_info("EEH: pSeries platform initialization failure (%d)\n",
			ret);

	return ret;
744
}
745
machine_early_initcall(pseries, eeh_pseries_init);