提交 b7c8c194 编写于 作者: L Linus Torvalds

Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc

Pull more powerpc updates from Ben Herrenschmidt:
 "Here are the remaining bits I was mentioning earlier.  Mostly bug
  fixes and new selftests from Michael (yay !).  He also removed the WSP
  platform and A2 core support which were dead before release, so less
  clutter.

  One little "feature" I snuck in is the doorbell IPI support for
  non-virtualized P8 which speeds up IPIs significantly between threads
  of a core"

* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (34 commits)
  powerpc/book3s: Fix some ABIv2 issues in machine check code
  powerpc/book3s: Fix guest MC delivery mechanism to avoid soft lockups in guest.
  powerpc/book3s: Increment the mce counter during machine_check_early call.
  powerpc/book3s: Add stack overflow check in machine check handler.
  powerpc/book3s: Fix machine check handling for unhandled errors
  powerpc/eeh: Dump PE location code
  powerpc/powernv: Enable POWER8 doorbell IPIs
  powerpc/cpuidle: Only clear LPCR decrementer wakeup bit on fast sleep entry
  powerpc/powernv: Fix killed EEH event
  powerpc: fix typo 'CONFIG_PMAC'
  powerpc: fix typo 'CONFIG_PPC_CPU'
  powerpc/powernv: Don't escalate non-existing frozen PE
  powerpc/eeh: Report frozen parent PE prior to child PE
  powerpc/eeh: Clear frozen state for child PE
  powerpc/powernv: Reduce panic timeout from 180s to 10s
  powerpc/xmon: avoid format string leaking to printk
  selftests/powerpc: Add tests of PMU EBBs
  selftests/powerpc: Add support for skipping tests
  selftests/powerpc: Put the test in a separate process group
  selftests/powerpc: Fix instruction loop for ABIv2 (LE)
  ...
......@@ -235,11 +235,6 @@ config PPC_EARLY_DEBUG_USBGECKO
Select this to enable early debugging for Nintendo GameCube/Wii
consoles via an external USB Gecko adapter.
config PPC_EARLY_DEBUG_WSP
bool "Early debugging via WSP's internal UART"
depends on PPC_WSP
select PPC_UDBG_16550
config PPC_EARLY_DEBUG_PS3GELIC
bool "Early debugging through the PS3 Ethernet port"
depends on PPC_PS3
......
CONFIG_PPC64=y
CONFIG_PPC_BOOK3E_64=y
# CONFIG_VIRT_CPU_ACCOUNTING_NATIVE is not set
CONFIG_SMP=y
CONFIG_NR_CPUS=256
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_AUDIT=y
CONFIG_AUDITSYSCALL=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=19
CONFIG_CGROUPS=y
CONFIG_CGROUP_DEVICE=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_RESOURCE_COUNTERS=y
CONFIG_CGROUP_MEMCG=y
CONFIG_CGROUP_MEMCG_SWAP=y
CONFIG_NAMESPACES=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
CONFIG_RD_BZIP2=y
CONFIG_RD_LZMA=y
CONFIG_INITRAMFS_COMPRESSION_GZIP=y
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
CONFIG_PROFILING=y
CONFIG_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_SCOM_DEBUGFS=y
CONFIG_PPC_A2_DD2=y
CONFIG_KVM_GUEST=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_HZ_100=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_MISC=y
CONFIG_NUMA=y
# CONFIG_MIGRATION is not set
CONFIG_PPC_64K_PAGES=y
CONFIG_SCHED_SMT=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE=""
# CONFIG_SECCOMP is not set
CONFIG_PCIEPORTBUS=y
# CONFIG_PCIEASPM is not set
CONFIG_PCI_MSI=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_XFRM_USER=m
CONFIG_XFRM_SUB_POLICY=y
CONFIG_XFRM_STATISTICS=y
CONFIG_NET_KEY=m
CONFIG_NET_KEY_MIGRATE=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_ROUTE_MULTIPATH=y
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_NET_IPIP=y
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
CONFIG_SYN_COOKIES=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
CONFIG_IPV6=y
CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
CONFIG_IPV6_OPTIMISTIC_DAD=y
CONFIG_INET6_AH=y
CONFIG_INET6_ESP=y
CONFIG_INET6_IPCOMP=y
CONFIG_IPV6_MIP6=y
CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=y
CONFIG_IPV6_TUNNEL=y
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
CONFIG_IPV6_MROUTE=y
CONFIG_IPV6_PIMSM_V2=y
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CT_PROTO_UDPLITE=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_IRC=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_CT_NETLINK=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
CONFIG_NETFILTER_XT_TARGET_MARK=m
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
CONFIG_NETFILTER_XT_MATCH_DCCP=m
CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_HELPER=m
CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
CONFIG_NETFILTER_XT_MATCH_LIMIT=m
CONFIG_NETFILTER_XT_MATCH_MAC=m
CONFIG_NETFILTER_XT_MATCH_MARK=m
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
CONFIG_NETFILTER_XT_MATCH_OWNER=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
CONFIG_NETFILTER_XT_MATCH_SCTP=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
CONFIG_NETFILTER_XT_MATCH_TIME=m
CONFIG_NETFILTER_XT_MATCH_U32=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_TARGET_LOG=m
CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_NF_NAT=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_NET_TCPPROBE=y
# CONFIG_WIRELESS is not set
CONFIG_NET_9P=y
CONFIG_NET_9P_DEBUG=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_MTD=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_ADV_OPTIONS=y
CONFIG_MTD_CFI_LE_BYTE_SWAP=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_CFI_STAA=y
CONFIG_MTD_PHYSMAP_OF=y
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=y
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=65536
CONFIG_CDROM_PKTCDVD=y
CONFIG_MISC_DEVICES=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SPI_ATTRS=y
CONFIG_SCSI_FC_ATTRS=y
CONFIG_SCSI_ISCSI_ATTRS=m
CONFIG_SCSI_SAS_ATTRS=m
CONFIG_SCSI_SRP_ATTRS=y
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
CONFIG_SATA_SIL24=y
CONFIG_SATA_MV=y
CONFIG_SATA_SIL=y
CONFIG_PATA_CMD64X=y
CONFIG_PATA_MARVELL=y
CONFIG_PATA_SIL680=y
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
CONFIG_MD_LINEAR=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_SNAPSHOT=y
CONFIG_DM_MIRROR=y
CONFIG_DM_ZERO=y
CONFIG_DM_UEVENT=y
CONFIG_NETDEVICES=y
CONFIG_TUN=y
CONFIG_E1000E=y
CONFIG_TIGON3=y
# CONFIG_WLAN is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_RAW_DRIVER=y
CONFIG_MAX_RAW_DEVS=1024
# CONFIG_HWMON is not set
# CONFIG_VGA_ARB is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_EDAC=y
CONFIG_EDAC_MM_EDAC=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1511=y
CONFIG_RTC_DRV_DS1553=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT2_FS_XIP=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
CONFIG_EXT4_FS=y
# CONFIG_DNOTIFY is not set
CONFIG_FUSE_FS=y
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
CONFIG_UDF_FS=m
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_CONFIGFS_FS=m
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_NFS_V4_1=y
CONFIG_ROOT_NFS=y
CONFIG_CIFS=y
CONFIG_CIFS_WEAK_PW_HASH=y
CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_CRC_CCITT=m
CONFIG_CRC_T10DIF=y
CONFIG_LIBCRC32C=m
CONFIG_PRINTK_TIME=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_STRIP_ASM_SYMS=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_DEBUG_INFO=y
CONFIG_FTRACE_SYSCALLS=y
CONFIG_PPC_EMULATED_STATS=y
CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y
CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_PPC_EARLY_DEBUG=y
CONFIG_KEYS_DEBUG_PROC_KEYS=y
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_VIRTUALIZATION=y
......@@ -489,7 +489,6 @@ typedef struct scc_trans {
#define FCC_GFMR_TCI ((uint)0x20000000)
#define FCC_GFMR_TRX ((uint)0x10000000)
#define FCC_GFMR_TTX ((uint)0x08000000)
#define FCC_GFMR_TTX ((uint)0x08000000)
#define FCC_GFMR_CDP ((uint)0x04000000)
#define FCC_GFMR_CTSP ((uint)0x02000000)
#define FCC_GFMR_CDS ((uint)0x01000000)
......
......@@ -254,6 +254,7 @@ void *eeh_pe_traverse(struct eeh_pe *root,
void *eeh_pe_dev_traverse(struct eeh_pe *root,
eeh_traverse_func fn, void *flag);
void eeh_pe_restore_bars(struct eeh_pe *pe);
const char *eeh_pe_loc_get(struct eeh_pe *pe);
struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe);
void *eeh_dev_init(struct device_node *dn, void *data);
......
......@@ -33,7 +33,7 @@ struct eeh_event {
int eeh_event_init(void);
int eeh_send_failure_event(struct eeh_pe *pe);
void eeh_remove_event(struct eeh_pe *pe);
void eeh_remove_event(struct eeh_pe *pe, bool force);
void eeh_handle_event(struct eeh_pe *pe);
#endif /* __KERNEL__ */
......
......@@ -223,10 +223,6 @@ typedef struct {
unsigned int id;
unsigned int active;
unsigned long vdso_base;
#ifdef CONFIG_PPC_ICSWX
struct spinlock *cop_lockp; /* guard cop related stuff */
unsigned long acop; /* mask of enabled coprocessor types */
#endif /* CONFIG_PPC_ICSWX */
#ifdef CONFIG_PPC_MM_SLICES
u64 low_slices_psize; /* SLB page size encodings */
u64 high_slices_psize; /* 4 bits per slice for now */
......
......@@ -599,9 +599,9 @@ enum {
};
struct OpalIoPhbErrorCommon {
uint32_t version;
uint32_t ioType;
uint32_t len;
__be32 version;
__be32 ioType;
__be32 len;
};
struct OpalIoP7IOCPhbErrorData {
......@@ -666,64 +666,64 @@ struct OpalIoP7IOCPhbErrorData {
struct OpalIoPhb3ErrorData {
struct OpalIoPhbErrorCommon common;
uint32_t brdgCtl;
__be32 brdgCtl;
/* PHB3 UTL regs */
uint32_t portStatusReg;
uint32_t rootCmplxStatus;
uint32_t busAgentStatus;
__be32 portStatusReg;
__be32 rootCmplxStatus;
__be32 busAgentStatus;
/* PHB3 cfg regs */
uint32_t deviceStatus;
uint32_t slotStatus;
uint32_t linkStatus;
uint32_t devCmdStatus;
uint32_t devSecStatus;
__be32 deviceStatus;
__be32 slotStatus;
__be32 linkStatus;
__be32 devCmdStatus;
__be32 devSecStatus;
/* cfg AER regs */
uint32_t rootErrorStatus;
uint32_t uncorrErrorStatus;
uint32_t corrErrorStatus;
uint32_t tlpHdr1;
uint32_t tlpHdr2;
uint32_t tlpHdr3;
uint32_t tlpHdr4;
uint32_t sourceId;
__be32 rootErrorStatus;
__be32 uncorrErrorStatus;
__be32 corrErrorStatus;
__be32 tlpHdr1;
__be32 tlpHdr2;
__be32 tlpHdr3;
__be32 tlpHdr4;
__be32 sourceId;
uint32_t rsv3;
__be32 rsv3;
/* Record data about the call to allocate a buffer */
uint64_t errorClass;
uint64_t correlator;
__be64 errorClass;
__be64 correlator;
uint64_t nFir; /* 000 */
uint64_t nFirMask; /* 003 */
uint64_t nFirWOF; /* 008 */
__be64 nFir; /* 000 */
__be64 nFirMask; /* 003 */
__be64 nFirWOF; /* 008 */
/* PHB3 MMIO Error Regs */
uint64_t phbPlssr; /* 120 */
uint64_t phbCsr; /* 110 */
uint64_t lemFir; /* C00 */
uint64_t lemErrorMask; /* C18 */
uint64_t lemWOF; /* C40 */
uint64_t phbErrorStatus; /* C80 */
uint64_t phbFirstErrorStatus; /* C88 */
uint64_t phbErrorLog0; /* CC0 */
uint64_t phbErrorLog1; /* CC8 */
uint64_t mmioErrorStatus; /* D00 */
uint64_t mmioFirstErrorStatus; /* D08 */
uint64_t mmioErrorLog0; /* D40 */
uint64_t mmioErrorLog1; /* D48 */
uint64_t dma0ErrorStatus; /* D80 */
uint64_t dma0FirstErrorStatus; /* D88 */
uint64_t dma0ErrorLog0; /* DC0 */
uint64_t dma0ErrorLog1; /* DC8 */
uint64_t dma1ErrorStatus; /* E00 */
uint64_t dma1FirstErrorStatus; /* E08 */
uint64_t dma1ErrorLog0; /* E40 */
uint64_t dma1ErrorLog1; /* E48 */
uint64_t pestA[OPAL_PHB3_NUM_PEST_REGS];
uint64_t pestB[OPAL_PHB3_NUM_PEST_REGS];
__be64 phbPlssr; /* 120 */
__be64 phbCsr; /* 110 */
__be64 lemFir; /* C00 */
__be64 lemErrorMask; /* C18 */
__be64 lemWOF; /* C40 */
__be64 phbErrorStatus; /* C80 */
__be64 phbFirstErrorStatus; /* C88 */
__be64 phbErrorLog0; /* CC0 */
__be64 phbErrorLog1; /* CC8 */
__be64 mmioErrorStatus; /* D00 */
__be64 mmioFirstErrorStatus; /* D08 */
__be64 mmioErrorLog0; /* D40 */
__be64 mmioErrorLog1; /* D48 */
__be64 dma0ErrorStatus; /* D80 */
__be64 dma0FirstErrorStatus; /* D88 */
__be64 dma0ErrorLog0; /* DC0 */
__be64 dma0ErrorLog1; /* DC8 */
__be64 dma1ErrorStatus; /* E00 */
__be64 dma1FirstErrorStatus; /* E08 */
__be64 dma1ErrorLog0; /* E40 */
__be64 dma1ErrorLog1; /* E48 */
__be64 pestA[OPAL_PHB3_NUM_PEST_REGS];
__be64 pestB[OPAL_PHB3_NUM_PEST_REGS];
};
enum {
......@@ -851,8 +851,8 @@ int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t erro
int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action);
int64_t opal_get_epow_status(__be64 *status);
int64_t opal_set_system_attention_led(uint8_t led_action);
int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe,
uint16_t *pci_error_type, uint16_t *severity);
int64_t opal_pci_next_error(uint64_t phb_id, __be64 *first_frozen_pe,
__be16 *pci_error_type, __be16 *severity);
int64_t opal_pci_poll(uint64_t phb_id);
int64_t opal_return_cpu(void);
int64_t opal_reinit_cpus(uint64_t flags);
......
......@@ -110,15 +110,6 @@
#define TLB1_UR ASM_CONST(0x0000000000000002)
#define TLB1_SR ASM_CONST(0x0000000000000001)
#ifdef CONFIG_PPC_EARLY_DEBUG_WSP
#define WSP_UART_PHYS 0xffc000c000
/* This needs to be careful chosen to hit a !0 congruence class
* in the TLB since we bolt it in way 3, which is already occupied
* by our linear mapping primary bolted entry in CC 0.
*/
#define WSP_UART_VIRT 0xf000000000001000
#endif
/* A2 erativax attributes definitions */
#define ERATIVAX_RS_IS_ALL 0x000
#define ERATIVAX_RS_IS_TID 0x040
......
......@@ -16,13 +16,15 @@ struct thread_struct;
extern struct task_struct *_switch(struct thread_struct *prev,
struct thread_struct *next);
#ifdef CONFIG_PPC_BOOK3S_64
static inline void save_tar(struct thread_struct *prev)
static inline void save_early_sprs(struct thread_struct *prev)
{
if (cpu_has_feature(CPU_FTR_ARCH_207S))
prev->tar = mfspr(SPRN_TAR);
if (cpu_has_feature(CPU_FTR_DSCR))
prev->dscr = mfspr(SPRN_DSCR);
}
#else
static inline void save_tar(struct thread_struct *prev) {}
static inline void save_early_sprs(struct thread_struct *prev) {}
#endif
extern void enable_kernel_fp(void);
......@@ -84,6 +86,8 @@ static inline void clear_task_ebb(struct task_struct *t)
{
#ifdef CONFIG_PPC_BOOK3S_64
/* EBB perf events are not inherited, so clear all EBB state. */
t->thread.ebbrr = 0;
t->thread.ebbhr = 0;
t->thread.bescr = 0;
t->thread.mmcr2 = 0;
t->thread.mmcr0 = 0;
......
/*
* Copyright 2011 Michael Ellerman, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef __ASM_POWERPC_WSP_H
#define __ASM_POWERPC_WSP_H
extern int wsp_get_chip_id(struct device_node *dn);
#endif /* __ASM_POWERPC_WSP_H */
......@@ -41,5 +41,6 @@
#define PPC_FEATURE2_EBB 0x10000000
#define PPC_FEATURE2_ISEL 0x08000000
#define PPC_FEATURE2_TAR 0x04000000
#define PPC_FEATURE2_VEC_CRYPTO 0x02000000
#endif /* _UAPI__ASM_POWERPC_CPUTABLE_H */
......@@ -43,7 +43,6 @@ obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o
obj64-$(CONFIG_RELOCATABLE) += reloc_64.o
obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
obj-$(CONFIG_PPC_A2) += cpu_setup_a2.o
obj-$(CONFIG_PPC64) += vdso64/
obj-$(CONFIG_ALTIVEC) += vecemu.o
obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
......
/*
* A2 specific assembly support code
*
* Copyright 2009 Ben Herrenschmidt, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/asm-offsets.h>
#include <asm/ppc_asm.h>
#include <asm/ppc-opcode.h>
#include <asm/processor.h>
#include <asm/reg_a2.h>
#include <asm/reg.h>
#include <asm/thread_info.h>
/*
* Disable thdid and class fields in ERATs to bump PID to full 14 bits capacity.
* This also prevents external LPID accesses but that isn't a problem when not a
* guest. Under PV, this setting will be ignored and MMUCR will return the right
* number of PID bits we can use.
*/
#define MMUCR1_EXTEND_PID \
(MMUCR1_ICTID | MMUCR1_ITTID | MMUCR1_DCTID | \
MMUCR1_DTTID | MMUCR1_DCCD)
/*
* Use extended PIDs if enabled.
* Don't clear the ERATs on context sync events and enable I & D LRU.
* Enable ERAT back invalidate when tlbwe overwrites an entry.
*/
#define INITIAL_MMUCR1 \
(MMUCR1_EXTEND_PID | MMUCR1_CSINV_NEVER | MMUCR1_IRRE | \
MMUCR1_DRRE | MMUCR1_TLBWE_BINV)
_GLOBAL(__setup_cpu_a2)
/* Some of these are actually thread local and some are
* core local but doing it always won't hurt
*/
#ifdef CONFIG_PPC_ICSWX
/* Make sure ACOP starts out as zero */
li r3,0
mtspr SPRN_ACOP,r3
/* Skip the following if we are in Guest mode */
mfmsr r3
andis. r0,r3,MSR_GS@h
bne _icswx_skip_guest
/* Enable icswx instruction */
mfspr r3,SPRN_A2_CCR2
ori r3,r3,A2_CCR2_ENABLE_ICSWX
mtspr SPRN_A2_CCR2,r3
/* Unmask all CTs in HACOP */
li r3,-1
mtspr SPRN_HACOP,r3
_icswx_skip_guest:
#endif /* CONFIG_PPC_ICSWX */
/* Enable doorbell */
mfspr r3,SPRN_A2_CCR2
oris r3,r3,A2_CCR2_ENABLE_PC@h
mtspr SPRN_A2_CCR2,r3
isync
/* Setup CCR0 to disable power saving for now as it's busted
* in the current implementations. Setup CCR1 to wake on
* interrupts normally (we write the default value but who
* knows what FW may have clobbered...)
*/
li r3,0
mtspr SPRN_A2_CCR0, r3
LOAD_REG_IMMEDIATE(r3,0x0f0f0f0f)
mtspr SPRN_A2_CCR1, r3
/* Initialise MMUCR1 */
lis r3,INITIAL_MMUCR1@h
ori r3,r3,INITIAL_MMUCR1@l
mtspr SPRN_MMUCR1,r3
/* Set MMUCR2 to enable 4K, 64K, 1M, 16M and 1G pages */
LOAD_REG_IMMEDIATE(r3, 0x000a7531)
mtspr SPRN_MMUCR2,r3
/* Set MMUCR3 to write all thids bit to the TLB */
LOAD_REG_IMMEDIATE(r3, 0x0000000f)
mtspr SPRN_MMUCR3,r3
/* Don't do ERAT stuff if running guest mode */
mfmsr r3
andis. r0,r3,MSR_GS@h
bne 1f
/* Now set the I-ERAT watermark to 15 */
lis r4,(MMUCR0_TLBSEL_I|MMUCR0_ECL)@h
mtspr SPRN_MMUCR0, r4
li r4,A2_IERAT_SIZE-1
PPC_ERATWE(R4,R4,3)
/* Now set the D-ERAT watermark to 31 */
lis r4,(MMUCR0_TLBSEL_D|MMUCR0_ECL)@h
mtspr SPRN_MMUCR0, r4
li r4,A2_DERAT_SIZE-1
PPC_ERATWE(R4,R4,3)
/* And invalidate the beast just in case. That won't get rid of
* a bolted entry though it will be in LRU and so will go away eventually
* but let's not bother for now
*/
PPC_ERATILX(0,0,R0)
1:
blr
_GLOBAL(__restore_cpu_a2)
b __setup_cpu_a2
......@@ -56,6 +56,7 @@ _GLOBAL(__setup_cpu_power8)
li r0,0
mtspr SPRN_LPID,r0
mfspr r3,SPRN_LPCR
ori r3, r3, LPCR_PECEDH
bl __init_LPCR
bl __init_HFSCR
bl __init_tlb_power8
......@@ -74,6 +75,7 @@ _GLOBAL(__restore_cpu_power8)
li r0,0
mtspr SPRN_LPID,r0
mfspr r3,SPRN_LPCR
ori r3, r3, LPCR_PECEDH
bl __init_LPCR
bl __init_HFSCR
bl __init_tlb_power8
......
......@@ -109,7 +109,8 @@ extern void __restore_cpu_e6500(void);
PPC_FEATURE_PSERIES_PERFMON_COMPAT)
#define COMMON_USER2_POWER8 (PPC_FEATURE2_ARCH_2_07 | \
PPC_FEATURE2_HTM_COMP | PPC_FEATURE2_DSCR | \
PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR)
PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR | \
PPC_FEATURE2_VEC_CRYPTO)
#define COMMON_USER_PA6T (COMMON_USER_PPC64 | PPC_FEATURE_PA6T |\
PPC_FEATURE_TRUE_LE | \
PPC_FEATURE_HAS_ALTIVEC_COMP)
......@@ -2148,44 +2149,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
}
#endif /* CONFIG_PPC32 */
#endif /* CONFIG_E500 */
#ifdef CONFIG_PPC_A2
{ /* Standard A2 (>= DD2) + FPU core */
.pvr_mask = 0xffff0000,
.pvr_value = 0x00480000,
.cpu_name = "A2 (>= DD2)",
.cpu_features = CPU_FTRS_A2,
.cpu_user_features = COMMON_USER_PPC64,
.mmu_features = MMU_FTRS_A2,
.icache_bsize = 64,
.dcache_bsize = 64,
.num_pmcs = 0,
.cpu_setup = __setup_cpu_a2,
.cpu_restore = __restore_cpu_a2,
.machine_check = machine_check_generic,
.platform = "ppca2",
},
{ /* This is a default entry to get going, to be replaced by
* a real one at some stage
*/
#define CPU_FTRS_BASE_BOOK3E (CPU_FTR_USE_TB | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_SMT | \
CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
.pvr_mask = 0x00000000,
.pvr_value = 0x00000000,
.cpu_name = "Book3E",
.cpu_features = CPU_FTRS_BASE_BOOK3E,
.cpu_user_features = COMMON_USER_PPC64,
.mmu_features = MMU_FTR_TYPE_3E | MMU_FTR_USE_TLBILX |
MMU_FTR_USE_TLBIVAX_BCAST |
MMU_FTR_LOCK_BCAST_INVAL,
.icache_bsize = 64,
.dcache_bsize = 64,
.num_pmcs = 0,
.machine_check = machine_check_generic,
.platform = "power6",
},
#endif /* CONFIG_PPC_A2 */
};
static struct cpu_spec the_cpu_spec;
......
......@@ -330,8 +330,8 @@ static int eeh_phb_check_failure(struct eeh_pe *pe)
eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED);
eeh_serialize_unlock(flags);
pr_err("EEH: PHB#%x failure detected\n",
phb_pe->phb->global_number);
pr_err("EEH: PHB#%x failure detected, location: %s\n",
phb_pe->phb->global_number, eeh_pe_loc_get(phb_pe));
dump_stack();
eeh_send_failure_event(phb_pe);
......@@ -358,10 +358,11 @@ static int eeh_phb_check_failure(struct eeh_pe *pe)
int eeh_dev_check_failure(struct eeh_dev *edev)
{
int ret;
int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
unsigned long flags;
struct device_node *dn;
struct pci_dev *dev;
struct eeh_pe *pe;
struct eeh_pe *pe, *parent_pe, *phb_pe;
int rc = 0;
const char *location;
......@@ -439,14 +440,34 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
*/
if ((ret < 0) ||
(ret == EEH_STATE_NOT_SUPPORT) ||
(ret & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) ==
(EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) {
((ret & active_flags) == active_flags)) {
eeh_stats.false_positives++;
pe->false_positives++;
rc = 0;
goto dn_unlock;
}
/*
* It should be corner case that the parent PE has been
* put into frozen state as well. We should take care
* that at first.
*/
parent_pe = pe->parent;
while (parent_pe) {
/* Hit the ceiling ? */
if (parent_pe->type & EEH_PE_PHB)
break;
/* Frozen parent PE ? */
ret = eeh_ops->get_state(parent_pe, NULL);
if (ret > 0 &&
(ret & active_flags) != active_flags)
pe = parent_pe;
/* Next parent level */
parent_pe = parent_pe->parent;
}
eeh_stats.slot_resets++;
/* Avoid repeated reports of this failure, including problems
......@@ -460,8 +481,11 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
* a stack trace will help the device-driver authors figure
* out what happened. So print that out.
*/
pr_err("EEH: Frozen PE#%x detected on PHB#%x\n",
pe->addr, pe->phb->global_number);
phb_pe = eeh_phb_pe_get(pe->phb);
pr_err("EEH: Frozen PHB#%x-PE#%x detected\n",
pe->phb->global_number, pe->addr);
pr_err("EEH: PE location: %s, PHB location: %s\n",
eeh_pe_loc_get(pe), eeh_pe_loc_get(phb_pe));
dump_stack();
eeh_send_failure_event(pe);
......
......@@ -447,8 +447,9 @@ static void *eeh_pe_detach_dev(void *data, void *userdata)
* PE reset (for 3 times), we try to clear the frozen state
* for 3 times as well.
*/
static int eeh_clear_pe_frozen_state(struct eeh_pe *pe)
static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
{
struct eeh_pe *pe = (struct eeh_pe *)data;
int i, rc;
for (i = 0; i < 3; i++) {
......@@ -461,13 +462,24 @@ static int eeh_clear_pe_frozen_state(struct eeh_pe *pe)
}
/* The PE has been isolated, clear it */
if (rc)
if (rc) {
pr_warn("%s: Can't clear frozen PHB#%x-PE#%x (%d)\n",
__func__, pe->phb->global_number, pe->addr, rc);
else
return (void *)pe;
}
return NULL;
}
static int eeh_clear_pe_frozen_state(struct eeh_pe *pe)
{
void *rc;
rc = eeh_pe_traverse(pe, __eeh_clear_pe_frozen_state, NULL);
if (!rc)
eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
return rc;
return rc ? -EIO : 0;
}
/**
......@@ -758,7 +770,7 @@ static void eeh_handle_special_event(void)
eeh_serialize_lock(&flags);
/* Purge all events */
eeh_remove_event(NULL);
eeh_remove_event(NULL, true);
list_for_each_entry(hose, &hose_list, list_node) {
phb_pe = eeh_phb_pe_get(hose);
......@@ -777,7 +789,7 @@ static void eeh_handle_special_event(void)
eeh_serialize_lock(&flags);
/* Purge all events of the PHB */
eeh_remove_event(pe);
eeh_remove_event(pe, true);
if (rc == EEH_NEXT_ERR_DEAD_PHB)
eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
......
......@@ -152,24 +152,33 @@ int eeh_send_failure_event(struct eeh_pe *pe)
/**
* eeh_remove_event - Remove EEH event from the queue
* @pe: Event binding to the PE
* @force: Event will be removed unconditionally
*
* On PowerNV platform, we might have subsequent coming events
* is part of the former one. For that case, those subsequent
* coming events are totally duplicated and unnecessary, thus
* they should be removed.
*/
void eeh_remove_event(struct eeh_pe *pe)
void eeh_remove_event(struct eeh_pe *pe, bool force)
{
unsigned long flags;
struct eeh_event *event, *tmp;
/*
* If we have NULL PE passed in, we have dead IOC
* or we're sure we can report all existing errors
* by the caller.
*
* With "force", the event with associated PE that
* have been isolated, the event won't be removed
* to avoid event lost.
*/
spin_lock_irqsave(&eeh_eventlist_lock, flags);
list_for_each_entry_safe(event, tmp, &eeh_eventlist, list) {
/*
* If we don't have valid PE passed in, that means
* we already have event corresponding to dead IOC
* and all events should be purged.
*/
if (!force && event->pe &&
(event->pe->state & EEH_PE_ISOLATED))
continue;
if (!pe) {
list_del(&event->list);
kfree(event);
......
......@@ -791,6 +791,66 @@ void eeh_pe_restore_bars(struct eeh_pe *pe)
eeh_pe_dev_traverse(pe, eeh_restore_one_device_bars, NULL);
}
/**
* eeh_pe_loc_get - Retrieve location code binding to the given PE
* @pe: EEH PE
*
* Retrieve the location code of the given PE. If the primary PE bus
* is root bus, we will grab location code from PHB device tree node
* or root port. Otherwise, the upstream bridge's device tree node
* of the primary PE bus will be checked for the location code.
*/
const char *eeh_pe_loc_get(struct eeh_pe *pe)
{
struct pci_controller *hose;
struct pci_bus *bus = eeh_pe_bus_get(pe);
struct pci_dev *pdev;
struct device_node *dn;
const char *loc;
if (!bus)
return "N/A";
/* PHB PE or root PE ? */
if (pci_is_root_bus(bus)) {
hose = pci_bus_to_host(bus);
loc = of_get_property(hose->dn,
"ibm,loc-code", NULL);
if (loc)
return loc;
loc = of_get_property(hose->dn,
"ibm,io-base-loc-code", NULL);
if (loc)
return loc;
pdev = pci_get_slot(bus, 0x0);
} else {
pdev = bus->self;
}
if (!pdev) {
loc = "N/A";
goto out;
}
dn = pci_device_to_OF_node(pdev);
if (!dn) {
loc = "N/A";
goto out;
}
loc = of_get_property(dn, "ibm,loc-code", NULL);
if (!loc)
loc = of_get_property(dn, "ibm,slot-location-code", NULL);
if (!loc)
loc = "N/A";
out:
if (pci_is_root_bus(bus) && pdev)
pci_dev_put(pdev);
return loc;
}
/**
* eeh_pe_bus_get - Retrieve PCI bus according to the given PE
* @pe: EEH PE
......
......@@ -428,12 +428,6 @@ BEGIN_FTR_SECTION
std r24,THREAD_VRSAVE(r3)
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_PPC64
BEGIN_FTR_SECTION
mfspr r25,SPRN_DSCR
std r25,THREAD_DSCR(r3)
END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
#endif
and. r0,r0,r22
beq+ 1f
andc r22,r22,r0
......
......@@ -1467,22 +1467,6 @@ a2_tlbinit_after_linear_map:
.globl a2_tlbinit_after_iprot_flush
a2_tlbinit_after_iprot_flush:
#ifdef CONFIG_PPC_EARLY_DEBUG_WSP
/* Now establish early debug mappings if applicable */
/* Restore the MAS0 we used for linear mapping load */
mtspr SPRN_MAS0,r11
lis r3,(MAS1_VALID | MAS1_IPROT)@h
ori r3,r3,(BOOK3E_PAGESZ_4K << MAS1_TSIZE_SHIFT)
mtspr SPRN_MAS1,r3
LOAD_REG_IMMEDIATE(r3, WSP_UART_VIRT | MAS2_I | MAS2_G)
mtspr SPRN_MAS2,r3
LOAD_REG_IMMEDIATE(r3, WSP_UART_PHYS | MAS3_SR | MAS3_SW)
mtspr SPRN_MAS7_MAS3,r3
/* re-use the MAS8 value from the linear mapping */
tlbwe
#endif /* CONFIG_PPC_EARLY_DEBUG_WSP */
PPC_TLBILX(0,0,R0)
sync
isync
......
......@@ -439,9 +439,9 @@ BEGIN_FTR_SECTION
* R9 = CR
* Original R9 to R13 is saved on PACA_EXMC
*
* Switch to mc_emergency stack and handle re-entrancy (though we
* currently don't test for overflow). Save MCE registers srr1,
* srr0, dar and dsisr and then set ME=1
* Switch to mc_emergency stack and handle re-entrancy (we limit
* the nested MCE upto level 4 to avoid stack overflow).
* Save MCE registers srr1, srr0, dar and dsisr and then set ME=1
*
* We use paca->in_mce to check whether this is the first entry or
* nested machine check. We increment paca->in_mce to track nested
......@@ -464,6 +464,9 @@ BEGIN_FTR_SECTION
0: subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
addi r10,r10,1 /* increment paca->in_mce */
sth r10,PACA_IN_MCE(r13)
/* Limit nested MCE to level 4 to avoid stack overflow */
cmpwi r10,4
bgt 2f /* Check if we hit limit of 4 */
std r11,GPR1(r1) /* Save r1 on the stack. */
std r11,0(r1) /* make stack chain pointer */
mfspr r11,SPRN_SRR0 /* Save SRR0 */
......@@ -482,10 +485,23 @@ BEGIN_FTR_SECTION
ori r11,r11,MSR_RI /* turn on RI bit */
ld r12,PACAKBASE(r13) /* get high part of &label */
LOAD_HANDLER(r12, machine_check_handle_early)
mtspr SPRN_SRR0,r12
1: mtspr SPRN_SRR0,r12
mtspr SPRN_SRR1,r11
rfid
b . /* prevent speculative execution */
2:
/* Stack overflow. Stay on emergency stack and panic.
* Keep the ME bit off while panic-ing, so that if we hit
* another machine check we checkstop.
*/
addi r1,r1,INT_FRAME_SIZE /* go back to previous stack frame */
ld r11,PACAKMSR(r13)
ld r12,PACAKBASE(r13)
LOAD_HANDLER(r12, unrecover_mce)
li r10,MSR_ME
andc r11,r11,r10 /* Turn off MSR_ME */
b 1b
b . /* prevent speculative execution */
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
machine_check_pSeries:
......@@ -1389,6 +1405,7 @@ machine_check_handle_early:
bl save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD
bl machine_check_early
std r3,RESULT(r1) /* Save result */
ld r12,_MSR(r1)
#ifdef CONFIG_PPC_P7_NAP
/*
......@@ -1443,10 +1460,32 @@ machine_check_handle_early:
*/
andi. r11,r12,MSR_RI
bne 2f
1: addi r3,r1,STACK_FRAME_OVERHEAD
bl unrecoverable_exception
b 1b
1: mfspr r11,SPRN_SRR0
ld r10,PACAKBASE(r13)
LOAD_HANDLER(r10,unrecover_mce)
mtspr SPRN_SRR0,r10
ld r10,PACAKMSR(r13)
/*
* We are going down. But there are chances that we might get hit by
* another MCE during panic path and we may run into unstable state
* with no way out. Hence, turn ME bit off while going down, so that
* when another MCE is hit during panic path, system will checkstop
* and hypervisor will get restarted cleanly by SP.
*/
li r3,MSR_ME
andc r10,r10,r3 /* Turn off MSR_ME */
mtspr SPRN_SRR1,r10
rfid
b .
2:
/*
* Check if we have successfully handled/recovered from error, if not
* then stay on emergency stack and panic.
*/
ld r3,RESULT(r1) /* Load result */
cmpdi r3,0 /* see if we handled MCE successfully */
beq 1b /* if !handled then panic */
/*
* Return from MC interrupt.
* Queue up the MCE event so that we can log it later, while
......@@ -1460,6 +1499,17 @@ machine_check_handle_early:
MACHINE_CHECK_HANDLER_WINDUP
b machine_check_pSeries
unrecover_mce:
/* Invoke machine_check_exception to print MCE event and panic. */
addi r3,r1,STACK_FRAME_OVERHEAD
bl machine_check_exception
/*
* We will not reach here. Even if we did, there is no way out. Call
* unrecoverable_exception and die.
*/
1: addi r3,r1,STACK_FRAME_OVERHEAD
bl unrecoverable_exception
b 1b
/*
* r13 points to the PACA, r9 contains the saved CR,
* r12 contain the saved SRR1, SRR0 is still ready for return
......
......@@ -930,25 +930,6 @@ initial_mmu:
tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */
tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */
#if defined(CONFIG_SERIAL_TEXT_DEBUG) && defined(SERIAL_DEBUG_IO_BASE)
/* Load a TLB entry for the UART, so that ppc4xx_progress() can use
* the UARTs nice and early. We use a 4k real==virtual mapping. */
lis r3,SERIAL_DEBUG_IO_BASE@h
ori r3,r3,SERIAL_DEBUG_IO_BASE@l
mr r4,r3
clrrwi r4,r4,12
ori r4,r4,(TLB_WR|TLB_I|TLB_M|TLB_G)
clrrwi r3,r3,12
ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
li r0,0 /* TLB slot 0 */
tlbwe r4,r0,TLB_DATA
tlbwe r3,r0,TLB_TAG
#endif /* CONFIG_SERIAL_DEBUG_TEXT && SERIAL_DEBUG_IO_BASE */
isync
/* Establish the exception vector base
......
......@@ -755,15 +755,15 @@ struct task_struct *__switch_to(struct task_struct *prev,
WARN_ON(!irqs_disabled());
/* Back up the TAR across context switches.
/* Back up the TAR and DSCR across context switches.
* Note that the TAR is not available for use in the kernel. (To
* provide this, the TAR should be backed up/restored on exception
* entry/exit instead, and be in pt_regs. FIXME, this should be in
* pt_regs anyway (for debug).)
* Save the TAR here before we do treclaim/trecheckpoint as these
* will change the TAR.
* Save the TAR and DSCR here before we do treclaim/trecheckpoint as
* these will change them.
*/
save_tar(&prev->thread);
save_early_sprs(&prev->thread);
__switch_to_tm(prev);
......
......@@ -471,7 +471,7 @@ void __init smp_setup_cpu_maps(void)
for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) {
DBG(" thread %d -> cpu %d (hard id %d)\n",
j, cpu, be32_to_cpu(intserv[j]));
set_cpu_present(cpu, true);
set_cpu_present(cpu, of_device_is_available(dn));
set_hard_smp_processor_id(cpu, be32_to_cpu(intserv[j]));
set_cpu_possible(cpu, true);
cpu++;
......
......@@ -551,7 +551,7 @@ void timer_interrupt(struct pt_regs * regs)
may_hard_irq_enable();
#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
if (atomic_read(&ppc_n_lost_interrupts) != 0)
do_IRQ(regs);
#endif
......
......@@ -295,6 +295,8 @@ long machine_check_early(struct pt_regs *regs)
{
long handled = 0;
__get_cpu_var(irq_stat).mce_exceptions++;
if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
handled = cur_cpu_spec->machine_check_early(regs);
return handled;
......
......@@ -62,8 +62,6 @@ void __init udbg_early_init(void)
udbg_init_cpm();
#elif defined(CONFIG_PPC_EARLY_DEBUG_USBGECKO)
udbg_init_usbgecko();
#elif defined(CONFIG_PPC_EARLY_DEBUG_WSP)
udbg_init_wsp();
#elif defined(CONFIG_PPC_EARLY_DEBUG_MEMCONS)
/* In memory console */
udbg_init_memcons();
......
......@@ -296,14 +296,3 @@ void __init udbg_init_40x_realmode(void)
}
#endif /* CONFIG_PPC_EARLY_DEBUG_40x */
#ifdef CONFIG_PPC_EARLY_DEBUG_WSP
void __init udbg_init_wsp(void)
{
udbg_uart_init_mmio((void *)WSP_UART_VIRT, 1);
udbg_uart_setup(57600, 50000000);
}
#endif /* CONFIG_PPC_EARLY_DEBUG_WSP */
......@@ -113,10 +113,8 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
* We assume that if the condition is recovered then linux host
* will have generated an error log event that we will pick
* up and log later.
* Don't release mce event now. In case if condition is not
* recovered we do guest exit and go back to linux host machine
* check handler. Hence we need make sure that current mce event
* is available for linux host to consume.
* Don't release mce event now. We will queue up the event so that
* we can log the MCE event info on host console.
*/
if (!get_mce_event(&mce_evt, MCE_EVENT_DONTRELEASE))
goto out;
......@@ -128,11 +126,12 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
out:
/*
* If we have handled the error, then release the mce event because
* we will be delivering machine check to guest.
* We are now going enter guest either through machine check
* interrupt (for unhandled errors) or will continue from
* current HSRR0 (for handled errors) in guest. Hence
* queue up the event so that we can log it from host console later.
*/
if (handled)
release_mce_event();
machine_check_queue_event();
return handled;
}
......
......@@ -2257,15 +2257,28 @@ machine_check_realmode:
mr r3, r9 /* get vcpu pointer */
bl kvmppc_realmode_machine_check
nop
cmpdi r3, 0 /* continue exiting from guest? */
cmpdi r3, 0 /* Did we handle MCE ? */
ld r9, HSTATE_KVM_VCPU(r13)
li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
beq mc_cont
/*
* Deliver unhandled/fatal (e.g. UE) MCE errors to guest through
* machine check interrupt (set HSRR0 to 0x200). And for handled
* errors (no-fatal), just go back to guest execution with current
* HSRR0 instead of exiting guest. This new approach will inject
* machine check to guest for fatal error causing guest to crash.
*
* The old code used to return to host for unhandled errors which
* was causing guest to hang with soft lockups inside guest and
* makes it difficult to recover guest instance.
*/
ld r10, VCPU_PC(r9)
ld r11, VCPU_MSR(r9)
bne 2f /* Continue guest execution. */
/* If not, deliver a machine check. SRR0/1 are already set */
li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
ld r11, VCPU_MSR(r9)
bl kvmppc_msr_interrupt
b fast_interrupt_c_return
2: b fast_interrupt_c_return
/*
* Check the reason we woke from nap, and take appropriate action.
......
......@@ -1470,7 +1470,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
regs->gpr[rd] = byterev_4(val);
goto ldst_done;
#ifdef CONFIG_PPC_CPU
#ifdef CONFIG_PPC_FPU
case 535: /* lfsx */
case 567: /* lfsux */
if (!(regs->msr & MSR_FP))
......
......@@ -19,7 +19,6 @@ source "arch/powerpc/platforms/embedded6xx/Kconfig"
source "arch/powerpc/platforms/44x/Kconfig"
source "arch/powerpc/platforms/40x/Kconfig"
source "arch/powerpc/platforms/amigaone/Kconfig"
source "arch/powerpc/platforms/wsp/Kconfig"
config KVM_GUEST
bool "KVM Guest support"
......
......@@ -148,10 +148,6 @@ config POWER4
depends on PPC64 && PPC_BOOK3S
def_bool y
config PPC_A2
bool
depends on PPC_BOOK3E_64
config TUNE_CELL
bool "Optimize for Cell Broadband Engine"
depends on PPC64 && PPC_BOOK3S
......@@ -280,7 +276,7 @@ config VSX
config PPC_ICSWX
bool "Support for PowerPC icswx coprocessor instruction"
depends on POWER4 || PPC_A2
depends on POWER4
default n
---help---
......
......@@ -22,4 +22,3 @@ obj-$(CONFIG_PPC_CELL) += cell/
obj-$(CONFIG_PPC_PS3) += ps3/
obj-$(CONFIG_EMBEDDED6xx) += embedded6xx/
obj-$(CONFIG_AMIGAONE) += amigaone/
obj-$(CONFIG_PPC_WSP) += wsp/
......@@ -35,7 +35,6 @@
#define SPUFS_PS_MAP_SIZE 0x20000
#define SPUFS_MFC_MAP_SIZE 0x1000
#define SPUFS_CNTL_MAP_SIZE 0x1000
#define SPUFS_CNTL_MAP_SIZE 0x1000
#define SPUFS_SIGNAL_MAP_SIZE PAGE_SIZE
#define SPUFS_MSS_MAP_SIZE 0x1000
......
......@@ -17,6 +17,7 @@ config PPC_POWERNV
select CPU_FREQ_GOV_USERSPACE
select CPU_FREQ_GOV_ONDEMAND
select CPU_FREQ_GOV_CONSERVATIVE
select PPC_DOORBELL
default y
config PPC_POWERNV_RTAS
......
obj-y += setup.o opal-takeover.o opal-wrappers.o opal.o opal-async.o
obj-y += opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o
obj-y += rng.o opal-elog.o opal-dump.o opal-sysparam.o opal-sensor.o
obj-y += opal-msglog.o subcore.o subcore-asm.o
obj-y += opal-msglog.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SMP) += smp.o subcore.o subcore-asm.o
obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o pci-ioda.o
obj-$(CONFIG_EEH) += eeh-ioda.o eeh-powernv.o
obj-$(CONFIG_PPC_SCOM) += opal-xscom.o
......
......@@ -267,7 +267,7 @@ static int ioda_eeh_get_state(struct eeh_pe *pe)
{
s64 ret = 0;
u8 fstate;
u16 pcierr;
__be16 pcierr;
u32 pe_no;
int result;
struct pci_controller *hose = pe->phb;
......@@ -316,7 +316,7 @@ static int ioda_eeh_get_state(struct eeh_pe *pe)
result = 0;
result &= ~EEH_STATE_RESET_ACTIVE;
if (pcierr != OPAL_EEH_PHB_ERROR) {
if (be16_to_cpu(pcierr) != OPAL_EEH_PHB_ERROR) {
result |= EEH_STATE_MMIO_ACTIVE;
result |= EEH_STATE_DMA_ACTIVE;
result |= EEH_STATE_MMIO_ENABLED;
......@@ -705,18 +705,19 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
{
struct pci_controller *hose;
struct pnv_phb *phb;
struct eeh_pe *phb_pe;
u64 frozen_pe_no;
u16 err_type, severity;
struct eeh_pe *phb_pe, *parent_pe;
__be64 frozen_pe_no;
__be16 err_type, severity;
int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
long rc;
int ret = EEH_NEXT_ERR_NONE;
int state, ret = EEH_NEXT_ERR_NONE;
/*
* While running here, it's safe to purge the event queue.
* And we should keep the cached OPAL notifier event sychronized
* between the kernel and firmware.
*/
eeh_remove_event(NULL);
eeh_remove_event(NULL, false);
opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
list_for_each_entry(hose, &hose_list, list_node) {
......@@ -742,8 +743,8 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
}
/* If the PHB doesn't have error, stop processing */
if (err_type == OPAL_EEH_NO_ERROR ||
severity == OPAL_EEH_SEV_NO_ERROR) {
if (be16_to_cpu(err_type) == OPAL_EEH_NO_ERROR ||
be16_to_cpu(severity) == OPAL_EEH_SEV_NO_ERROR) {
pr_devel("%s: No error found on PHB#%x\n",
__func__, hose->global_number);
continue;
......@@ -755,14 +756,14 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
* specific PHB.
*/
pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n",
__func__, err_type, severity,
frozen_pe_no, hose->global_number);
switch (err_type) {
__func__, be16_to_cpu(err_type), be16_to_cpu(severity),
be64_to_cpu(frozen_pe_no), hose->global_number);
switch (be16_to_cpu(err_type)) {
case OPAL_EEH_IOC_ERROR:
if (severity == OPAL_EEH_SEV_IOC_DEAD) {
if (be16_to_cpu(severity) == OPAL_EEH_SEV_IOC_DEAD) {
pr_err("EEH: dead IOC detected\n");
ret = EEH_NEXT_ERR_DEAD_IOC;
} else if (severity == OPAL_EEH_SEV_INF) {
} else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) {
pr_info("EEH: IOC informative error "
"detected\n");
ioda_eeh_hub_diag(hose);
......@@ -771,20 +772,26 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
break;
case OPAL_EEH_PHB_ERROR:
if (severity == OPAL_EEH_SEV_PHB_DEAD) {
if (be16_to_cpu(severity) == OPAL_EEH_SEV_PHB_DEAD) {
*pe = phb_pe;
pr_err("EEH: dead PHB#%x detected\n",
hose->global_number);
pr_err("EEH: dead PHB#%x detected, "
"location: %s\n",
hose->global_number,
eeh_pe_loc_get(phb_pe));
ret = EEH_NEXT_ERR_DEAD_PHB;
} else if (severity == OPAL_EEH_SEV_PHB_FENCED) {
} else if (be16_to_cpu(severity) ==
OPAL_EEH_SEV_PHB_FENCED) {
*pe = phb_pe;
pr_err("EEH: fenced PHB#%x detected\n",
hose->global_number);
pr_err("EEH: Fenced PHB#%x detected, "
"location: %s\n",
hose->global_number,
eeh_pe_loc_get(phb_pe));
ret = EEH_NEXT_ERR_FENCED_PHB;
} else if (severity == OPAL_EEH_SEV_INF) {
} else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) {
pr_info("EEH: PHB#%x informative error "
"detected\n",
hose->global_number);
"detected, location: %s\n",
hose->global_number,
eeh_pe_loc_get(phb_pe));
ioda_eeh_phb_diag(hose);
ret = EEH_NEXT_ERR_NONE;
}
......@@ -792,34 +799,33 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
break;
case OPAL_EEH_PE_ERROR:
/*
* If we can't find the corresponding PE, the
* PEEV / PEST would be messy. So we force an
* fenced PHB so that it can be recovered.
*
* If the PE has been marked as isolated, that
* should have been removed permanently or in
* progress with recovery. We needn't report
* it again.
* If we can't find the corresponding PE, we
* just try to unfreeze.
*/
if (ioda_eeh_get_pe(hose, frozen_pe_no, pe)) {
*pe = phb_pe;
pr_err("EEH: Escalated fenced PHB#%x "
"detected for PE#%llx\n",
hose->global_number,
frozen_pe_no);
ret = EEH_NEXT_ERR_FENCED_PHB;
if (ioda_eeh_get_pe(hose,
be64_to_cpu(frozen_pe_no), pe)) {
/* Try best to clear it */
pr_info("EEH: Clear non-existing PHB#%x-PE#%llx\n",
hose->global_number, frozen_pe_no);
pr_info("EEH: PHB location: %s\n",
eeh_pe_loc_get(phb_pe));
opal_pci_eeh_freeze_clear(phb->opal_id, frozen_pe_no,
OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
ret = EEH_NEXT_ERR_NONE;
} else if ((*pe)->state & EEH_PE_ISOLATED) {
ret = EEH_NEXT_ERR_NONE;
} else {
pr_err("EEH: Frozen PE#%x on PHB#%x detected\n",
(*pe)->addr, (*pe)->phb->global_number);
pr_err("EEH: PE location: %s, PHB location: %s\n",
eeh_pe_loc_get(*pe), eeh_pe_loc_get(phb_pe));
ret = EEH_NEXT_ERR_FROZEN_PE;
}
break;
default:
pr_warn("%s: Unexpected error type %d\n",
__func__, err_type);
__func__, be16_to_cpu(err_type));
}
/*
......@@ -836,6 +842,31 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
ioda_eeh_phb_diag(hose);
}
/*
* We probably have the frozen parent PE out there and
* we need have to handle frozen parent PE firstly.
*/
if (ret == EEH_NEXT_ERR_FROZEN_PE) {
parent_pe = (*pe)->parent;
while (parent_pe) {
/* Hit the ceiling ? */
if (parent_pe->type & EEH_PE_PHB)
break;
/* Frozen parent PE ? */
state = ioda_eeh_get_state(parent_pe);
if (state > 0 &&
(state & active_flags) != active_flags)
*pe = parent_pe;
/* Next parent level */
parent_pe = parent_pe->parent;
}
/* We possibly migrate to another PE */
eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
}
/*
* If we have no errors on the specific PHB or only
* informative error there, we continue poking it.
......
......@@ -37,7 +37,8 @@ static ssize_t opal_msglog_read(struct file *file, struct kobject *kobj,
{
struct memcons *mc = bin_attr->private;
const char *conbuf;
size_t ret, first_read = 0;
ssize_t ret;
size_t first_read = 0;
uint32_t out_pos, avail;
if (!mc)
......@@ -69,6 +70,9 @@ static ssize_t opal_msglog_read(struct file *file, struct kobject *kobj,
to += first_read;
count -= first_read;
pos -= avail;
if (count <= 0)
goto out;
}
/* Sanity check. The firmware should not do this to us. */
......
......@@ -260,10 +260,10 @@ void __init opal_sys_param_init(void)
attr[i].kobj_attr.attr.mode = S_IRUGO;
break;
case OPAL_SYSPARAM_WRITE:
attr[i].kobj_attr.attr.mode = S_IWUGO;
attr[i].kobj_attr.attr.mode = S_IWUSR;
break;
case OPAL_SYSPARAM_RW:
attr[i].kobj_attr.attr.mode = S_IRUGO | S_IWUGO;
attr[i].kobj_attr.attr.mode = S_IRUGO | S_IWUSR;
break;
default:
break;
......
......@@ -206,72 +206,91 @@ static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose,
data = (struct OpalIoPhb3ErrorData*)common;
pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n",
hose->global_number, common->version);
hose->global_number, be32_to_cpu(common->version));
if (data->brdgCtl)
pr_info("brdgCtl: %08x\n",
data->brdgCtl);
be32_to_cpu(data->brdgCtl));
if (data->portStatusReg || data->rootCmplxStatus ||
data->busAgentStatus)
pr_info("UtlSts: %08x %08x %08x\n",
data->portStatusReg, data->rootCmplxStatus,
data->busAgentStatus);
be32_to_cpu(data->portStatusReg),
be32_to_cpu(data->rootCmplxStatus),
be32_to_cpu(data->busAgentStatus));
if (data->deviceStatus || data->slotStatus ||
data->linkStatus || data->devCmdStatus ||
data->devSecStatus)
pr_info("RootSts: %08x %08x %08x %08x %08x\n",
data->deviceStatus, data->slotStatus,
data->linkStatus, data->devCmdStatus,
data->devSecStatus);
be32_to_cpu(data->deviceStatus),
be32_to_cpu(data->slotStatus),
be32_to_cpu(data->linkStatus),
be32_to_cpu(data->devCmdStatus),
be32_to_cpu(data->devSecStatus));
if (data->rootErrorStatus || data->uncorrErrorStatus ||
data->corrErrorStatus)
pr_info("RootErrSts: %08x %08x %08x\n",
data->rootErrorStatus, data->uncorrErrorStatus,
data->corrErrorStatus);
be32_to_cpu(data->rootErrorStatus),
be32_to_cpu(data->uncorrErrorStatus),
be32_to_cpu(data->corrErrorStatus));
if (data->tlpHdr1 || data->tlpHdr2 ||
data->tlpHdr3 || data->tlpHdr4)
pr_info("RootErrLog: %08x %08x %08x %08x\n",
data->tlpHdr1, data->tlpHdr2,
data->tlpHdr3, data->tlpHdr4);
be32_to_cpu(data->tlpHdr1),
be32_to_cpu(data->tlpHdr2),
be32_to_cpu(data->tlpHdr3),
be32_to_cpu(data->tlpHdr4));
if (data->sourceId || data->errorClass ||
data->correlator)
pr_info("RootErrLog1: %08x %016llx %016llx\n",
data->sourceId, data->errorClass,
data->correlator);
be32_to_cpu(data->sourceId),
be64_to_cpu(data->errorClass),
be64_to_cpu(data->correlator));
if (data->nFir)
pr_info("nFir: %016llx %016llx %016llx\n",
data->nFir, data->nFirMask,
data->nFirWOF);
be64_to_cpu(data->nFir),
be64_to_cpu(data->nFirMask),
be64_to_cpu(data->nFirWOF));
if (data->phbPlssr || data->phbCsr)
pr_info("PhbSts: %016llx %016llx\n",
data->phbPlssr, data->phbCsr);
be64_to_cpu(data->phbPlssr),
be64_to_cpu(data->phbCsr));
if (data->lemFir)
pr_info("Lem: %016llx %016llx %016llx\n",
data->lemFir, data->lemErrorMask,
data->lemWOF);
be64_to_cpu(data->lemFir),
be64_to_cpu(data->lemErrorMask),
be64_to_cpu(data->lemWOF));
if (data->phbErrorStatus)
pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
data->phbErrorStatus, data->phbFirstErrorStatus,
data->phbErrorLog0, data->phbErrorLog1);
be64_to_cpu(data->phbErrorStatus),
be64_to_cpu(data->phbFirstErrorStatus),
be64_to_cpu(data->phbErrorLog0),
be64_to_cpu(data->phbErrorLog1));
if (data->mmioErrorStatus)
pr_info("OutErr: %016llx %016llx %016llx %016llx\n",
data->mmioErrorStatus, data->mmioFirstErrorStatus,
data->mmioErrorLog0, data->mmioErrorLog1);
be64_to_cpu(data->mmioErrorStatus),
be64_to_cpu(data->mmioFirstErrorStatus),
be64_to_cpu(data->mmioErrorLog0),
be64_to_cpu(data->mmioErrorLog1));
if (data->dma0ErrorStatus)
pr_info("InAErr: %016llx %016llx %016llx %016llx\n",
data->dma0ErrorStatus, data->dma0FirstErrorStatus,
data->dma0ErrorLog0, data->dma0ErrorLog1);
be64_to_cpu(data->dma0ErrorStatus),
be64_to_cpu(data->dma0FirstErrorStatus),
be64_to_cpu(data->dma0ErrorLog0),
be64_to_cpu(data->dma0ErrorLog1));
if (data->dma1ErrorStatus)
pr_info("InBErr: %016llx %016llx %016llx %016llx\n",
data->dma1ErrorStatus, data->dma1FirstErrorStatus,
data->dma1ErrorLog0, data->dma1ErrorLog1);
be64_to_cpu(data->dma1ErrorStatus),
be64_to_cpu(data->dma1FirstErrorStatus),
be64_to_cpu(data->dma1ErrorLog0),
be64_to_cpu(data->dma1ErrorLog1));
for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) {
if ((data->pestA[i] >> 63) == 0 &&
(data->pestB[i] >> 63) == 0)
if ((be64_to_cpu(data->pestA[i]) >> 63) == 0 &&
(be64_to_cpu(data->pestB[i]) >> 63) == 0)
continue;
pr_info("PE[%3d] A/B: %016llx %016llx\n",
i, data->pestA[i], data->pestB[i]);
i, be64_to_cpu(data->pestA[i]),
be64_to_cpu(data->pestB[i]));
}
}
......@@ -284,7 +303,7 @@ void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
return;
common = (struct OpalIoPhbErrorCommon *)log_buff;
switch (common->ioType) {
switch (be32_to_cpu(common->ioType)) {
case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
pnv_pci_dump_p7ioc_diag_data(hose, common);
break;
......@@ -293,7 +312,7 @@ void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
break;
default:
pr_warn("%s: Unrecognized ioType %d\n",
__func__, common->ioType);
__func__, be32_to_cpu(common->ioType));
}
}
......
......@@ -35,11 +35,14 @@
#include <asm/rtas.h>
#include <asm/opal.h>
#include <asm/kexec.h>
#include <asm/smp.h>
#include "powernv.h"
static void __init pnv_setup_arch(void)
{
set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
/* Initialize SMP */
pnv_smp_init();
......
......@@ -32,6 +32,7 @@
#include <asm/opal.h>
#include <asm/runlatch.h>
#include <asm/code-patching.h>
#include <asm/dbell.h>
#include "powernv.h"
......@@ -46,6 +47,11 @@ static void pnv_smp_setup_cpu(int cpu)
{
if (cpu != boot_cpuid)
xics_setup_cpu();
#ifdef CONFIG_PPC_DOORBELL
if (cpu_has_feature(CPU_FTR_DBELL))
doorbell_setup_this_cpu();
#endif
}
int pnv_smp_kick_cpu(int nr)
......
......@@ -21,6 +21,7 @@ config PPC_PSERIES
select HAVE_CONTEXT_TRACKING
select HOTPLUG_CPU if SMP
select ARCH_RANDOM
select PPC_DOORBELL
default y
config PPC_SPLPAR
......
config PPC_WSP
bool
select PPC_A2
select GENERIC_TBSYNC
select PPC_ICSWX
select PPC_SCOM
select PPC_XICS
select PPC_ICP_NATIVE
select PCI
select PPC_IO_WORKAROUNDS if PCI
select PPC_INDIRECT_PIO if PCI
default n
menu "WSP platform selection"
depends on PPC_BOOK3E_64
config PPC_PSR2
bool "PowerEN System Reference Platform 2"
select EPAPR_BOOT
select PPC_WSP
default y
config PPC_CHROMA
bool "PowerEN PCIe Chroma Card"
select EPAPR_BOOT
select PPC_WSP
select OF_DYNAMIC
default y
endmenu
ccflags-y += $(NO_MINIMAL_TOC)
obj-y += setup.o ics.o wsp.o
obj-$(CONFIG_PPC_PSR2) += psr2.o
obj-$(CONFIG_PPC_CHROMA) += chroma.o h8.o
obj-$(CONFIG_PPC_WSP) += opb_pic.o
obj-$(CONFIG_PPC_WSP) += scom_wsp.o
obj-$(CONFIG_SMP) += smp.o scom_smp.o
obj-$(CONFIG_PCI) += wsp_pci.o
obj-$(CONFIG_PCI_MSI) += msi.o
/*
* Copyright 2008-2011, IBM Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/of.h>
#include <linux/smp.h>
#include <linux/time.h>
#include <linux/of_fdt.h>
#include <asm/machdep.h>
#include <asm/udbg.h>
#include "ics.h"
#include "wsp.h"
void __init chroma_setup_arch(void)
{
wsp_setup_arch();
wsp_setup_h8();
}
static int __init chroma_probe(void)
{
unsigned long root = of_get_flat_dt_root();
if (!of_flat_dt_is_compatible(root, "ibm,wsp-chroma"))
return 0;
return 1;
}
define_machine(chroma_md) {
.name = "Chroma PCIe",
.probe = chroma_probe,
.setup_arch = chroma_setup_arch,
.restart = wsp_h8_restart,
.power_off = wsp_h8_power_off,
.halt = wsp_halt,
.calibrate_decr = generic_calibrate_decr,
.init_IRQ = wsp_setup_irq,
.progress = udbg_progress,
.power_save = book3e_idle,
};
machine_arch_initcall(chroma_md, wsp_probe_devices);
/*
* Copyright 2008-2011, IBM Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include "wsp.h"
/*
* The UART connection to the H8 is over ttyS1 which is just a 16550.
* We assume that FW has it setup right and no one messes with it.
*/
static u8 __iomem *h8;
#define RBR 0 /* Receiver Buffer Register */
#define THR 0 /* Transmitter Holding Register */
#define LSR 5 /* Line Status Register */
#define LSR_DR 0x01 /* LSR value for Data-Ready */
#define LSR_THRE 0x20 /* LSR value for Transmitter-Holding-Register-Empty */
static void wsp_h8_putc(int c)
{
u8 lsr;
do {
lsr = readb(h8 + LSR);
} while ((lsr & LSR_THRE) != LSR_THRE);
writeb(c, h8 + THR);
}
static int wsp_h8_getc(void)
{
u8 lsr;
do {
lsr = readb(h8 + LSR);
} while ((lsr & LSR_DR) != LSR_DR);
return readb(h8 + RBR);
}
static void wsp_h8_puts(const char *s, int sz)
{
int i;
for (i = 0; i < sz; i++) {
wsp_h8_putc(s[i]);
/* no flow control so wait for echo */
wsp_h8_getc();
}
wsp_h8_putc('\r');
wsp_h8_putc('\n');
}
static void wsp_h8_terminal_cmd(const char *cmd, int sz)
{
hard_irq_disable();
wsp_h8_puts(cmd, sz);
/* should never return, but just in case */
for (;;)
continue;
}
void wsp_h8_restart(char *cmd)
{
static const char restart[] = "warm-reset";
(void)cmd;
wsp_h8_terminal_cmd(restart, sizeof(restart) - 1);
}
void wsp_h8_power_off(void)
{
static const char off[] = "power-off";
wsp_h8_terminal_cmd(off, sizeof(off) - 1);
}
static void __iomem *wsp_h8_getaddr(void)
{
struct device_node *aliases;
struct device_node *uart;
struct property *path;
void __iomem *va = NULL;
/*
* there is nothing in the devtree to tell us which is mapped
* to the H8, but se know it is the second serial port.
*/
aliases = of_find_node_by_path("/aliases");
if (aliases == NULL)
return NULL;
path = of_find_property(aliases, "serial1", NULL);
if (path == NULL)
goto out;
uart = of_find_node_by_path(path->value);
if (uart == NULL)
goto out;
va = of_iomap(uart, 0);
/* remove it so no one messes with it */
of_detach_node(uart);
of_node_put(uart);
out:
of_node_put(aliases);
return va;
}
void __init wsp_setup_h8(void)
{
h8 = wsp_h8_getaddr();
/* Devtree change? lets hard map it anyway */
if (h8 == NULL) {
pr_warn("UART to H8 could not be found");
h8 = ioremap(0xffc0008000ULL, 0x100);
}
}
/*
* Copyright 2008-2011 IBM Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/cpu.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/msi.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/xics.h>
#include "wsp.h"
#include "ics.h"
/* WSP ICS */
struct wsp_ics {
struct ics ics;
struct device_node *dn;
void __iomem *regs;
spinlock_t lock;
unsigned long *bitmap;
u32 chip_id;
u32 lsi_base;
u32 lsi_count;
u64 hwirq_start;
u64 count;
#ifdef CONFIG_SMP
int *hwirq_cpu_map;
#endif
};
#define to_wsp_ics(ics) container_of(ics, struct wsp_ics, ics)
#define INT_SRC_LAYER_BUID_REG(base) ((base) + 0x00)
#define IODA_TBL_ADDR_REG(base) ((base) + 0x18)
#define IODA_TBL_DATA_REG(base) ((base) + 0x20)
#define XIVE_UPDATE_REG(base) ((base) + 0x28)
#define ICS_INT_CAPS_REG(base) ((base) + 0x30)
#define TBL_AUTO_INCREMENT ((1UL << 63) | (1UL << 15))
#define TBL_SELECT_XIST (1UL << 48)
#define TBL_SELECT_XIVT (1UL << 49)
#define IODA_IRQ(irq) ((irq) & (0x7FFULL)) /* HRM 5.1.3.4 */
#define XIST_REQUIRED 0x8
#define XIST_REJECTED 0x4
#define XIST_PRESENTED 0x2
#define XIST_PENDING 0x1
#define XIVE_SERVER_SHIFT 42
#define XIVE_SERVER_MASK 0xFFFFULL
#define XIVE_PRIORITY_MASK 0xFFULL
#define XIVE_PRIORITY_SHIFT 32
#define XIVE_WRITE_ENABLE (1ULL << 63)
/*
* The docs refer to a 6 bit field called ChipID, which consists of a
* 3 bit NodeID and a 3 bit ChipID. On WSP the ChipID is always zero
* so we ignore it, and every where we use "chip id" in this code we
* mean the NodeID.
*/
#define WSP_ICS_CHIP_SHIFT 17
static struct wsp_ics *ics_list;
static int num_ics;
/* ICS Source controller accessors */
static u64 wsp_ics_get_xive(struct wsp_ics *ics, unsigned int irq)
{
unsigned long flags;
u64 xive;
spin_lock_irqsave(&ics->lock, flags);
out_be64(IODA_TBL_ADDR_REG(ics->regs), TBL_SELECT_XIVT | IODA_IRQ(irq));
xive = in_be64(IODA_TBL_DATA_REG(ics->regs));
spin_unlock_irqrestore(&ics->lock, flags);
return xive;
}
static void wsp_ics_set_xive(struct wsp_ics *ics, unsigned int irq, u64 xive)
{
xive &= ~XIVE_ADDR_MASK;
xive |= (irq & XIVE_ADDR_MASK);
xive |= XIVE_WRITE_ENABLE;
out_be64(XIVE_UPDATE_REG(ics->regs), xive);
}
static u64 xive_set_server(u64 xive, unsigned int server)
{
u64 mask = ~(XIVE_SERVER_MASK << XIVE_SERVER_SHIFT);
xive &= mask;
xive |= (server & XIVE_SERVER_MASK) << XIVE_SERVER_SHIFT;
return xive;
}
static u64 xive_set_priority(u64 xive, unsigned int priority)
{
u64 mask = ~(XIVE_PRIORITY_MASK << XIVE_PRIORITY_SHIFT);
xive &= mask;
xive |= (priority & XIVE_PRIORITY_MASK) << XIVE_PRIORITY_SHIFT;
return xive;
}
#ifdef CONFIG_SMP
/* Find logical CPUs within mask on a given chip and store result in ret */
void cpus_on_chip(int chip_id, cpumask_t *mask, cpumask_t *ret)
{
int cpu, chip;
struct device_node *cpu_dn, *dn;
const u32 *prop;
cpumask_clear(ret);
for_each_cpu(cpu, mask) {
cpu_dn = of_get_cpu_node(cpu, NULL);
if (!cpu_dn)
continue;
prop = of_get_property(cpu_dn, "at-node", NULL);
if (!prop) {
of_node_put(cpu_dn);
continue;
}
dn = of_find_node_by_phandle(*prop);
of_node_put(cpu_dn);
chip = wsp_get_chip_id(dn);
if (chip == chip_id)
cpumask_set_cpu(cpu, ret);
of_node_put(dn);
}
}
/* Store a suitable CPU to handle a hwirq in the ics->hwirq_cpu_map cache */
static int cache_hwirq_map(struct wsp_ics *ics, unsigned int hwirq,
const cpumask_t *affinity)
{
cpumask_var_t avail, newmask;
int ret = -ENOMEM, cpu, cpu_rover = 0, target;
int index = hwirq - ics->hwirq_start;
unsigned int nodeid;
BUG_ON(index < 0 || index >= ics->count);
if (!ics->hwirq_cpu_map)
return -ENOMEM;
if (!distribute_irqs) {
ics->hwirq_cpu_map[hwirq - ics->hwirq_start] = xics_default_server;
return 0;
}
/* Allocate needed CPU masks */
if (!alloc_cpumask_var(&avail, GFP_KERNEL))
goto ret;
if (!alloc_cpumask_var(&newmask, GFP_KERNEL))
goto freeavail;
/* Find PBus attached to the source of this IRQ */
nodeid = (hwirq >> WSP_ICS_CHIP_SHIFT) & 0x3; /* 12:14 */
/* Find CPUs that could handle this IRQ */
if (affinity)
cpumask_and(avail, cpu_online_mask, affinity);
else
cpumask_copy(avail, cpu_online_mask);
/* Narrow selection down to logical CPUs on the same chip */
cpus_on_chip(nodeid, avail, newmask);
/* Ensure we haven't narrowed it down to 0 */
if (unlikely(cpumask_empty(newmask))) {
if (unlikely(cpumask_empty(avail))) {
ret = -1;
goto out;
}
cpumask_copy(newmask, avail);
}
/* Choose a CPU out of those we narrowed it down to in round robin */
target = hwirq % cpumask_weight(newmask);
for_each_cpu(cpu, newmask) {
if (cpu_rover++ >= target) {
ics->hwirq_cpu_map[index] = get_hard_smp_processor_id(cpu);
ret = 0;
goto out;
}
}
/* Shouldn't happen */
WARN_ON(1);
out:
free_cpumask_var(newmask);
freeavail:
free_cpumask_var(avail);
ret:
if (ret < 0) {
ics->hwirq_cpu_map[index] = cpumask_first(cpu_online_mask);
pr_warning("Error, falling hwirq 0x%x routing back to CPU %i\n",
hwirq, ics->hwirq_cpu_map[index]);
}
return ret;
}
static void alloc_irq_map(struct wsp_ics *ics)
{
int i;
ics->hwirq_cpu_map = kmalloc(sizeof(int) * ics->count, GFP_KERNEL);
if (!ics->hwirq_cpu_map) {
pr_warning("Allocate hwirq_cpu_map failed, "
"IRQ balancing disabled\n");
return;
}
for (i=0; i < ics->count; i++)
ics->hwirq_cpu_map[i] = xics_default_server;
}
static int get_irq_server(struct wsp_ics *ics, unsigned int hwirq)
{
int index = hwirq - ics->hwirq_start;
BUG_ON(index < 0 || index >= ics->count);
if (!ics->hwirq_cpu_map)
return xics_default_server;
return ics->hwirq_cpu_map[index];
}
#else /* !CONFIG_SMP */
static int cache_hwirq_map(struct wsp_ics *ics, unsigned int hwirq,
const cpumask_t *affinity)
{
return 0;
}
static int get_irq_server(struct wsp_ics *ics, unsigned int hwirq)
{
return xics_default_server;
}
static void alloc_irq_map(struct wsp_ics *ics) { }
#endif
static void wsp_chip_unmask_irq(struct irq_data *d)
{
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
struct wsp_ics *ics;
int server;
u64 xive;
if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
return;
ics = d->chip_data;
if (WARN_ON(!ics))
return;
server = get_irq_server(ics, hw_irq);
xive = wsp_ics_get_xive(ics, hw_irq);
xive = xive_set_server(xive, server);
xive = xive_set_priority(xive, DEFAULT_PRIORITY);
wsp_ics_set_xive(ics, hw_irq, xive);
}
static unsigned int wsp_chip_startup(struct irq_data *d)
{
/* unmask it */
wsp_chip_unmask_irq(d);
return 0;
}
static void wsp_mask_real_irq(unsigned int hw_irq, struct wsp_ics *ics)
{
u64 xive;
if (hw_irq == XICS_IPI)
return;
if (WARN_ON(!ics))
return;
xive = wsp_ics_get_xive(ics, hw_irq);
xive = xive_set_server(xive, xics_default_server);
xive = xive_set_priority(xive, LOWEST_PRIORITY);
wsp_ics_set_xive(ics, hw_irq, xive);
}
static void wsp_chip_mask_irq(struct irq_data *d)
{
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
struct wsp_ics *ics = d->chip_data;
if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
return;
wsp_mask_real_irq(hw_irq, ics);
}
static int wsp_chip_set_affinity(struct irq_data *d,
const struct cpumask *cpumask, bool force)
{
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
struct wsp_ics *ics;
int ret;
u64 xive;
if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
return -1;
ics = d->chip_data;
if (WARN_ON(!ics))
return -1;
xive = wsp_ics_get_xive(ics, hw_irq);
/*
* For the moment only implement delivery to all cpus or one cpu.
* Get current irq_server for the given irq
*/
ret = cache_hwirq_map(ics, hw_irq, cpumask);
if (ret == -1) {
char cpulist[128];
cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
pr_warning("%s: No online cpus in the mask %s for irq %d\n",
__func__, cpulist, d->irq);
return -1;
} else if (ret == -ENOMEM) {
pr_warning("%s: Out of memory\n", __func__);
return -1;
}
xive = xive_set_server(xive, get_irq_server(ics, hw_irq));
wsp_ics_set_xive(ics, hw_irq, xive);
return IRQ_SET_MASK_OK;
}
static struct irq_chip wsp_irq_chip = {
.name = "WSP ICS",
.irq_startup = wsp_chip_startup,
.irq_mask = wsp_chip_mask_irq,
.irq_unmask = wsp_chip_unmask_irq,
.irq_set_affinity = wsp_chip_set_affinity
};
static int wsp_ics_host_match(struct ics *ics, struct device_node *dn)
{
/* All ICSs in the system implement a global irq number space,
* so match against them all. */
return of_device_is_compatible(dn, "ibm,ppc-xics");
}
static int wsp_ics_match_hwirq(struct wsp_ics *wsp_ics, unsigned int hwirq)
{
if (hwirq >= wsp_ics->hwirq_start &&
hwirq < wsp_ics->hwirq_start + wsp_ics->count)
return 1;
return 0;
}
static int wsp_ics_map(struct ics *ics, unsigned int virq)
{
struct wsp_ics *wsp_ics = to_wsp_ics(ics);
unsigned int hw_irq = virq_to_hw(virq);
unsigned long flags;
if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
return -ENOENT;
irq_set_chip_and_handler(virq, &wsp_irq_chip, handle_fasteoi_irq);
irq_set_chip_data(virq, wsp_ics);
spin_lock_irqsave(&wsp_ics->lock, flags);
bitmap_allocate_region(wsp_ics->bitmap, hw_irq - wsp_ics->hwirq_start, 0);
spin_unlock_irqrestore(&wsp_ics->lock, flags);
return 0;
}
static void wsp_ics_mask_unknown(struct ics *ics, unsigned long hw_irq)
{
struct wsp_ics *wsp_ics = to_wsp_ics(ics);
if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
return;
pr_err("%s: IRQ %lu (real) is invalid, disabling it.\n", __func__, hw_irq);
wsp_mask_real_irq(hw_irq, wsp_ics);
}
static long wsp_ics_get_server(struct ics *ics, unsigned long hw_irq)
{
struct wsp_ics *wsp_ics = to_wsp_ics(ics);
if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
return -ENOENT;
return get_irq_server(wsp_ics, hw_irq);
}
/* HW Number allocation API */
static struct wsp_ics *wsp_ics_find_dn_ics(struct device_node *dn)
{
struct device_node *iparent;
int i;
iparent = of_irq_find_parent(dn);
if (!iparent) {
pr_err("wsp_ics: Failed to find interrupt parent!\n");
return NULL;
}
for(i = 0; i < num_ics; i++) {
if(ics_list[i].dn == iparent)
break;
}
if (i >= num_ics) {
pr_err("wsp_ics: Unable to find parent bitmap!\n");
return NULL;
}
return &ics_list[i];
}
int wsp_ics_alloc_irq(struct device_node *dn, int num)
{
struct wsp_ics *ics;
int order, offset;
ics = wsp_ics_find_dn_ics(dn);
if (!ics)
return -ENODEV;
/* Fast, but overly strict if num isn't a power of two */
order = get_count_order(num);
spin_lock_irq(&ics->lock);
offset = bitmap_find_free_region(ics->bitmap, ics->count, order);
spin_unlock_irq(&ics->lock);
if (offset < 0)
return offset;
return offset + ics->hwirq_start;
}
void wsp_ics_free_irq(struct device_node *dn, unsigned int irq)
{
struct wsp_ics *ics;
ics = wsp_ics_find_dn_ics(dn);
if (WARN_ON(!ics))
return;
spin_lock_irq(&ics->lock);
bitmap_release_region(ics->bitmap, irq, 0);
spin_unlock_irq(&ics->lock);
}
/* Initialisation */
static int __init wsp_ics_bitmap_setup(struct wsp_ics *ics,
struct device_node *dn)
{
int len, i, j, size;
u32 start, count;
const u32 *p;
size = BITS_TO_LONGS(ics->count) * sizeof(long);
ics->bitmap = kzalloc(size, GFP_KERNEL);
if (!ics->bitmap) {
pr_err("wsp_ics: ENOMEM allocating IRQ bitmap!\n");
return -ENOMEM;
}
spin_lock_init(&ics->lock);
p = of_get_property(dn, "available-ranges", &len);
if (!p || !len) {
/* FIXME this should be a WARN() once mambo is updated */
pr_err("wsp_ics: No available-ranges defined for %s\n",
dn->full_name);
return 0;
}
if (len % (2 * sizeof(u32)) != 0) {
/* FIXME this should be a WARN() once mambo is updated */
pr_err("wsp_ics: Invalid available-ranges for %s\n",
dn->full_name);
return 0;
}
bitmap_fill(ics->bitmap, ics->count);
for (i = 0; i < len / sizeof(u32); i += 2) {
start = of_read_number(p + i, 1);
count = of_read_number(p + i + 1, 1);
pr_devel("%s: start: %d count: %d\n", __func__, start, count);
if ((start + count) > (ics->hwirq_start + ics->count) ||
start < ics->hwirq_start) {
pr_err("wsp_ics: Invalid range! -> %d to %d\n",
start, start + count);
break;
}
for (j = 0; j < count; j++)
bitmap_release_region(ics->bitmap,
(start + j) - ics->hwirq_start, 0);
}
/* Ensure LSIs are not available for allocation */
bitmap_allocate_region(ics->bitmap, ics->lsi_base,
get_count_order(ics->lsi_count));
return 0;
}
static int __init wsp_ics_setup(struct wsp_ics *ics, struct device_node *dn)
{
u32 lsi_buid, msi_buid, msi_base, msi_count;
void __iomem *regs;
const u32 *p;
int rc, len, i;
u64 caps, buid;
p = of_get_property(dn, "interrupt-ranges", &len);
if (!p || len < (2 * sizeof(u32))) {
pr_err("wsp_ics: No/bad interrupt-ranges found on %s\n",
dn->full_name);
return -ENOENT;
}
if (len > (2 * sizeof(u32))) {
pr_err("wsp_ics: Multiple ics ranges not supported.\n");
return -EINVAL;
}
regs = of_iomap(dn, 0);
if (!regs) {
pr_err("wsp_ics: of_iomap(%s) failed\n", dn->full_name);
return -ENXIO;
}
ics->hwirq_start = of_read_number(p, 1);
ics->count = of_read_number(p + 1, 1);
ics->regs = regs;
ics->chip_id = wsp_get_chip_id(dn);
if (WARN_ON(ics->chip_id < 0))
ics->chip_id = 0;
/* Get some informations about the critter */
caps = in_be64(ICS_INT_CAPS_REG(ics->regs));
buid = in_be64(INT_SRC_LAYER_BUID_REG(ics->regs));
ics->lsi_count = caps >> 56;
msi_count = (caps >> 44) & 0x7ff;
/* Note: LSI BUID is 9 bits, but really only 3 are BUID and the
* rest is mixed in the interrupt number. We store the whole
* thing though
*/
lsi_buid = (buid >> 48) & 0x1ff;
ics->lsi_base = (ics->chip_id << WSP_ICS_CHIP_SHIFT) | lsi_buid << 5;
msi_buid = (buid >> 37) & 0x7;
msi_base = (ics->chip_id << WSP_ICS_CHIP_SHIFT) | msi_buid << 11;
pr_info("wsp_ics: Found %s\n", dn->full_name);
pr_info("wsp_ics: irq range : 0x%06llx..0x%06llx\n",
ics->hwirq_start, ics->hwirq_start + ics->count - 1);
pr_info("wsp_ics: %4d LSIs : 0x%06x..0x%06x\n",
ics->lsi_count, ics->lsi_base,
ics->lsi_base + ics->lsi_count - 1);
pr_info("wsp_ics: %4d MSIs : 0x%06x..0x%06x\n",
msi_count, msi_base,
msi_base + msi_count - 1);
/* Let's check the HW config is sane */
if (ics->lsi_base < ics->hwirq_start ||
(ics->lsi_base + ics->lsi_count) > (ics->hwirq_start + ics->count))
pr_warning("wsp_ics: WARNING ! LSIs out of interrupt-ranges !\n");
if (msi_base < ics->hwirq_start ||
(msi_base + msi_count) > (ics->hwirq_start + ics->count))
pr_warning("wsp_ics: WARNING ! MSIs out of interrupt-ranges !\n");
/* We don't check for overlap between LSI and MSI, which will happen
* if we use the same BUID, I'm not sure yet how legit that is.
*/
rc = wsp_ics_bitmap_setup(ics, dn);
if (rc) {
iounmap(regs);
return rc;
}
ics->dn = of_node_get(dn);
alloc_irq_map(ics);
for(i = 0; i < ics->count; i++)
wsp_mask_real_irq(ics->hwirq_start + i, ics);
ics->ics.map = wsp_ics_map;
ics->ics.mask_unknown = wsp_ics_mask_unknown;
ics->ics.get_server = wsp_ics_get_server;
ics->ics.host_match = wsp_ics_host_match;
xics_register_ics(&ics->ics);
return 0;
}
static void __init wsp_ics_set_default_server(void)
{
struct device_node *np;
u32 hwid;
/* Find the server number for the boot cpu. */
np = of_get_cpu_node(boot_cpuid, NULL);
BUG_ON(!np);
hwid = get_hard_smp_processor_id(boot_cpuid);
pr_info("wsp_ics: default server is %#x, CPU %s\n", hwid, np->full_name);
xics_default_server = hwid;
of_node_put(np);
}
static int __init wsp_ics_init(void)
{
struct device_node *dn;
struct wsp_ics *ics;
int rc, found;
wsp_ics_set_default_server();
found = 0;
for_each_compatible_node(dn, NULL, "ibm,ppc-xics")
found++;
if (found == 0) {
pr_err("wsp_ics: No ICS's found!\n");
return -ENODEV;
}
ics_list = kmalloc(sizeof(*ics) * found, GFP_KERNEL);
if (!ics_list) {
pr_err("wsp_ics: No memory for structs.\n");
return -ENOMEM;
}
num_ics = 0;
ics = ics_list;
for_each_compatible_node(dn, NULL, "ibm,wsp-xics") {
rc = wsp_ics_setup(ics, dn);
if (rc == 0) {
ics++;
num_ics++;
}
}
if (found != num_ics) {
pr_err("wsp_ics: Failed setting up %d ICS's\n",
found - num_ics);
return -1;
}
return 0;
}
void __init wsp_init_irq(void)
{
wsp_ics_init();
xics_init();
/* We need to patch our irq chip's EOI to point to the right ICP */
wsp_irq_chip.irq_eoi = icp_ops->eoi;
}
#ifdef CONFIG_PCI_MSI
static void wsp_ics_msi_unmask_irq(struct irq_data *d)
{
wsp_chip_unmask_irq(d);
unmask_msi_irq(d);
}
static unsigned int wsp_ics_msi_startup(struct irq_data *d)
{
wsp_ics_msi_unmask_irq(d);
return 0;
}
static void wsp_ics_msi_mask_irq(struct irq_data *d)
{
mask_msi_irq(d);
wsp_chip_mask_irq(d);
}
/*
* we do it this way because we reassinge default EOI handling in
* irq_init() above
*/
static void wsp_ics_eoi(struct irq_data *data)
{
wsp_irq_chip.irq_eoi(data);
}
static struct irq_chip wsp_ics_msi = {
.name = "WSP ICS MSI",
.irq_startup = wsp_ics_msi_startup,
.irq_mask = wsp_ics_msi_mask_irq,
.irq_unmask = wsp_ics_msi_unmask_irq,
.irq_eoi = wsp_ics_eoi,
.irq_set_affinity = wsp_chip_set_affinity
};
void wsp_ics_set_msi_chip(unsigned int irq)
{
irq_set_chip(irq, &wsp_ics_msi);
}
void wsp_ics_set_std_chip(unsigned int irq)
{
irq_set_chip(irq, &wsp_irq_chip);
}
#endif /* CONFIG_PCI_MSI */
/*
* Copyright 2009 IBM Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef __ICS_H
#define __ICS_H
#define XIVE_ADDR_MASK 0x7FFULL
extern void wsp_init_irq(void);
extern int wsp_ics_alloc_irq(struct device_node *dn, int num);
extern void wsp_ics_free_irq(struct device_node *dn, unsigned int irq);
#ifdef CONFIG_PCI_MSI
extern void wsp_ics_set_msi_chip(unsigned int irq);
extern void wsp_ics_set_std_chip(unsigned int irq);
#endif /* CONFIG_PCI_MSI */
#endif /* __ICS_H */
/*
* Copyright 2011 Michael Ellerman, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/msi.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include "msi.h"
#include "ics.h"
#include "wsp_pci.h"
/* Magic addresses for 32 & 64-bit MSIs with hardcoded MVE 0 */
#define MSI_ADDR_32 0xFFFF0000ul
#define MSI_ADDR_64 0x1000000000000000ul
int wsp_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
struct pci_controller *phb;
struct msi_desc *entry;
struct msi_msg msg;
unsigned int virq;
int hwirq;
phb = pci_bus_to_host(dev->bus);
if (!phb)
return -ENOENT;
entry = list_first_entry(&dev->msi_list, struct msi_desc, list);
if (entry->msi_attrib.is_64) {
msg.address_lo = 0;
msg.address_hi = MSI_ADDR_64 >> 32;
} else {
msg.address_lo = MSI_ADDR_32;
msg.address_hi = 0;
}
list_for_each_entry(entry, &dev->msi_list, list) {
hwirq = wsp_ics_alloc_irq(phb->dn, 1);
if (hwirq < 0) {
dev_warn(&dev->dev, "wsp_msi: hwirq alloc failed!\n");
return hwirq;
}
virq = irq_create_mapping(NULL, hwirq);
if (virq == NO_IRQ) {
dev_warn(&dev->dev, "wsp_msi: virq alloc failed!\n");
return -1;
}
dev_dbg(&dev->dev, "wsp_msi: allocated irq %#x/%#x\n",
hwirq, virq);
wsp_ics_set_msi_chip(virq);
irq_set_msi_desc(virq, entry);
msg.data = hwirq & XIVE_ADDR_MASK;
write_msi_msg(virq, &msg);
}
return 0;
}
void wsp_teardown_msi_irqs(struct pci_dev *dev)
{
struct pci_controller *phb;
struct msi_desc *entry;
int hwirq;
phb = pci_bus_to_host(dev->bus);
dev_dbg(&dev->dev, "wsp_msi: tearing down msi irqs\n");
list_for_each_entry(entry, &dev->msi_list, list) {
if (entry->irq == NO_IRQ)
continue;
irq_set_msi_desc(entry->irq, NULL);
wsp_ics_set_std_chip(entry->irq);
hwirq = virq_to_hw(entry->irq);
/* In this order to avoid racing with irq_create_mapping() */
irq_dispose_mapping(entry->irq);
wsp_ics_free_irq(phb->dn, hwirq);
}
}
void wsp_setup_phb_msi(struct pci_controller *phb)
{
/* Create a single MVE at offset 0 that matches everything */
out_be64(phb->cfg_data + PCIE_REG_IODA_ADDR, PCIE_REG_IODA_AD_TBL_MVT);
out_be64(phb->cfg_data + PCIE_REG_IODA_DATA0, 1ull << 63);
ppc_md.setup_msi_irqs = wsp_setup_msi_irqs;
ppc_md.teardown_msi_irqs = wsp_teardown_msi_irqs;
}
/*
* Copyright 2011 Michael Ellerman, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef __WSP_MSI_H
#define __WSP_MSI_H
#ifdef CONFIG_PCI_MSI
extern void wsp_setup_phb_msi(struct pci_controller *phb);
#else
static inline void wsp_setup_phb_msi(struct pci_controller *phb) { }
#endif
#endif /* __WSP_MSI_H */
/*
* IBM Onboard Peripheral Bus Interrupt Controller
*
* Copyright 2010 Jack Miller, IBM Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <asm/reg_a2.h>
#include <asm/irq.h>
#define OPB_NR_IRQS 32
#define OPB_MLSASIER 0x04 /* MLS Accumulated Status IER */
#define OPB_MLSIR 0x50 /* MLS Interrupt Register */
#define OPB_MLSIER 0x54 /* MLS Interrupt Enable Register */
#define OPB_MLSIPR 0x58 /* MLS Interrupt Polarity Register */
#define OPB_MLSIIR 0x5c /* MLS Interrupt Inputs Register */
static int opb_index = 0;
struct opb_pic {
struct irq_domain *host;
void *regs;
int index;
spinlock_t lock;
};
static u32 opb_in(struct opb_pic *opb, int offset)
{
return in_be32(opb->regs + offset);
}
static void opb_out(struct opb_pic *opb, int offset, u32 val)
{
out_be32(opb->regs + offset, val);
}
static void opb_unmask_irq(struct irq_data *d)
{
struct opb_pic *opb;
unsigned long flags;
u32 ier, bitset;
opb = d->chip_data;
bitset = (1 << (31 - irqd_to_hwirq(d)));
spin_lock_irqsave(&opb->lock, flags);
ier = opb_in(opb, OPB_MLSIER);
opb_out(opb, OPB_MLSIER, ier | bitset);
ier = opb_in(opb, OPB_MLSIER);
spin_unlock_irqrestore(&opb->lock, flags);
}
static void opb_mask_irq(struct irq_data *d)
{
struct opb_pic *opb;
unsigned long flags;
u32 ier, mask;
opb = d->chip_data;
mask = ~(1 << (31 - irqd_to_hwirq(d)));
spin_lock_irqsave(&opb->lock, flags);
ier = opb_in(opb, OPB_MLSIER);
opb_out(opb, OPB_MLSIER, ier & mask);
ier = opb_in(opb, OPB_MLSIER); // Flush posted writes
spin_unlock_irqrestore(&opb->lock, flags);
}
static void opb_ack_irq(struct irq_data *d)
{
struct opb_pic *opb;
unsigned long flags;
u32 bitset;
opb = d->chip_data;
bitset = (1 << (31 - irqd_to_hwirq(d)));
spin_lock_irqsave(&opb->lock, flags);
opb_out(opb, OPB_MLSIR, bitset);
opb_in(opb, OPB_MLSIR); // Flush posted writes
spin_unlock_irqrestore(&opb->lock, flags);
}
static void opb_mask_ack_irq(struct irq_data *d)
{
struct opb_pic *opb;
unsigned long flags;
u32 bitset;
u32 ier, ir;
opb = d->chip_data;
bitset = (1 << (31 - irqd_to_hwirq(d)));
spin_lock_irqsave(&opb->lock, flags);
ier = opb_in(opb, OPB_MLSIER);
opb_out(opb, OPB_MLSIER, ier & ~bitset);
ier = opb_in(opb, OPB_MLSIER); // Flush posted writes
opb_out(opb, OPB_MLSIR, bitset);
ir = opb_in(opb, OPB_MLSIR); // Flush posted writes
spin_unlock_irqrestore(&opb->lock, flags);
}
static int opb_set_irq_type(struct irq_data *d, unsigned int flow)
{
struct opb_pic *opb;
unsigned long flags;
int invert, ipr, mask, bit;
opb = d->chip_data;
/* The only information we're interested in in the type is whether it's
* a high or low trigger. For high triggered interrupts, the polarity
* set for it in the MLS Interrupt Polarity Register is 0, for low
* interrupts it's 1 so that the proper input in the MLS Interrupt Input
* Register is interrupted as asserting the interrupt. */
switch (flow) {
case IRQ_TYPE_NONE:
opb_mask_irq(d);
return 0;
case IRQ_TYPE_LEVEL_HIGH:
invert = 0;
break;
case IRQ_TYPE_LEVEL_LOW:
invert = 1;
break;
default:
return -EINVAL;
}
bit = (1 << (31 - irqd_to_hwirq(d)));
mask = ~bit;
spin_lock_irqsave(&opb->lock, flags);
ipr = opb_in(opb, OPB_MLSIPR);
ipr = (ipr & mask) | (invert ? bit : 0);
opb_out(opb, OPB_MLSIPR, ipr);
ipr = opb_in(opb, OPB_MLSIPR); // Flush posted writes
spin_unlock_irqrestore(&opb->lock, flags);
/* Record the type in the interrupt descriptor */
irqd_set_trigger_type(d, flow);
return 0;
}
static struct irq_chip opb_irq_chip = {
.name = "OPB",
.irq_mask = opb_mask_irq,
.irq_unmask = opb_unmask_irq,
.irq_mask_ack = opb_mask_ack_irq,
.irq_ack = opb_ack_irq,
.irq_set_type = opb_set_irq_type
};
static int opb_host_map(struct irq_domain *host, unsigned int virq,
irq_hw_number_t hwirq)
{
struct opb_pic *opb;
opb = host->host_data;
/* Most of the important stuff is handled by the generic host code, like
* the lookup, so just attach some info to the virtual irq */
irq_set_chip_data(virq, opb);
irq_set_chip_and_handler(virq, &opb_irq_chip, handle_level_irq);
irq_set_irq_type(virq, IRQ_TYPE_NONE);
return 0;
}
static const struct irq_domain_ops opb_host_ops = {
.map = opb_host_map,
.xlate = irq_domain_xlate_twocell,
};
irqreturn_t opb_irq_handler(int irq, void *private)
{
struct opb_pic *opb;
u32 ir, src, subvirq;
opb = (struct opb_pic *) private;
/* Read the OPB MLS Interrupt Register for
* asserted interrupts */
ir = opb_in(opb, OPB_MLSIR);
if (!ir)
return IRQ_NONE;
do {
/* Get 1 - 32 source, *NOT* bit */
src = 32 - ffs(ir);
/* Translate from the OPB's conception of interrupt number to
* Linux's virtual IRQ */
subvirq = irq_linear_revmap(opb->host, src);
generic_handle_irq(subvirq);
} while ((ir = opb_in(opb, OPB_MLSIR)));
return IRQ_HANDLED;
}
struct opb_pic *opb_pic_init_one(struct device_node *dn)
{
struct opb_pic *opb;
struct resource res;
if (of_address_to_resource(dn, 0, &res)) {
printk(KERN_ERR "opb: Couldn't translate resource\n");
return NULL;
}
opb = kzalloc(sizeof(struct opb_pic), GFP_KERNEL);
if (!opb) {
printk(KERN_ERR "opb: Failed to allocate opb struct!\n");
return NULL;
}
/* Get access to the OPB MMIO registers */
opb->regs = ioremap(res.start + 0x10000, 0x1000);
if (!opb->regs) {
printk(KERN_ERR "opb: Failed to allocate register space!\n");
goto free_opb;
}
/* Allocate an irq domain so that Linux knows that despite only
* having one interrupt to issue, we're the controller for multiple
* hardware IRQs, so later we can lookup their virtual IRQs. */
opb->host = irq_domain_add_linear(dn, OPB_NR_IRQS, &opb_host_ops, opb);
if (!opb->host) {
printk(KERN_ERR "opb: Failed to allocate IRQ host!\n");
goto free_regs;
}
opb->index = opb_index++;
spin_lock_init(&opb->lock);
/* Disable all interrupts by default */
opb_out(opb, OPB_MLSASIER, 0);
opb_out(opb, OPB_MLSIER, 0);
/* ACK any interrupts left by FW */
opb_out(opb, OPB_MLSIR, 0xFFFFFFFF);
return opb;
free_regs:
iounmap(opb->regs);
free_opb:
kfree(opb);
return NULL;
}
void __init opb_pic_init(void)
{
struct device_node *dn;
struct opb_pic *opb;
int virq;
int rc;
/* Call init_one for each OPB device */
for_each_compatible_node(dn, NULL, "ibm,opb") {
/* Fill in an OPB struct */
opb = opb_pic_init_one(dn);
if (!opb) {
printk(KERN_WARNING "opb: Failed to init node, skipped!\n");
continue;
}
/* Map / get opb's hardware virtual irq */
virq = irq_of_parse_and_map(dn, 0);
if (virq <= 0) {
printk("opb: irq_op_parse_and_map failed!\n");
continue;
}
/* Attach opb interrupt handler to new virtual IRQ */
rc = request_irq(virq, opb_irq_handler, IRQF_NO_THREAD,
"OPB LS Cascade", opb);
if (rc) {
printk("opb: request_irq failed: %d\n", rc);
continue;
}
printk("OPB%d init with %d IRQs at %p\n", opb->index,
OPB_NR_IRQS, opb->regs);
}
}
/*
* Copyright 2008-2011, IBM Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/of.h>
#include <linux/smp.h>
#include <linux/time.h>
#include <linux/of_fdt.h>
#include <asm/machdep.h>
#include <asm/udbg.h>
#include "ics.h"
#include "wsp.h"
static void psr2_spin(void)
{
hard_irq_disable();
for (;;)
continue;
}
static void psr2_restart(char *cmd)
{
psr2_spin();
}
static int __init psr2_probe(void)
{
unsigned long root = of_get_flat_dt_root();
if (of_flat_dt_is_compatible(root, "ibm,wsp-chroma")) {
/* chroma systems also claim they are psr2s */
return 0;
}
if (!of_flat_dt_is_compatible(root, "ibm,psr2"))
return 0;
return 1;
}
define_machine(psr2_md) {
.name = "PSR2 A2",
.probe = psr2_probe,
.setup_arch = wsp_setup_arch,
.restart = psr2_restart,
.power_off = psr2_spin,
.halt = psr2_spin,
.calibrate_decr = generic_calibrate_decr,
.init_IRQ = wsp_setup_irq,
.progress = udbg_progress,
.power_save = book3e_idle,
};
machine_arch_initcall(psr2_md, wsp_probe_devices);
/*
* SCOM support for A2 platforms
*
* Copyright 2007-2011 Benjamin Herrenschmidt, David Gibson,
* Michael Ellerman, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/cpumask.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <asm/cputhreads.h>
#include <asm/reg_a2.h>
#include <asm/scom.h>
#include <asm/udbg.h>
#include <asm/code-patching.h>
#include "wsp.h"
#define SCOM_RAMC 0x2a /* Ram Command */
#define SCOM_RAMC_TGT1_EXT 0x80000000
#define SCOM_RAMC_SRC1_EXT 0x40000000
#define SCOM_RAMC_SRC2_EXT 0x20000000
#define SCOM_RAMC_SRC3_EXT 0x10000000
#define SCOM_RAMC_ENABLE 0x00080000
#define SCOM_RAMC_THREADSEL 0x00060000
#define SCOM_RAMC_EXECUTE 0x00010000
#define SCOM_RAMC_MSR_OVERRIDE 0x00008000
#define SCOM_RAMC_MSR_PR 0x00004000
#define SCOM_RAMC_MSR_GS 0x00002000
#define SCOM_RAMC_FORCE 0x00001000
#define SCOM_RAMC_FLUSH 0x00000800
#define SCOM_RAMC_INTERRUPT 0x00000004
#define SCOM_RAMC_ERROR 0x00000002
#define SCOM_RAMC_DONE 0x00000001
#define SCOM_RAMI 0x29 /* Ram Instruction */
#define SCOM_RAMIC 0x28 /* Ram Instruction and Command */
#define SCOM_RAMIC_INSN 0xffffffff00000000
#define SCOM_RAMD 0x2d /* Ram Data */
#define SCOM_RAMDH 0x2e /* Ram Data High */
#define SCOM_RAMDL 0x2f /* Ram Data Low */
#define SCOM_PCCR0 0x33 /* PC Configuration Register 0 */
#define SCOM_PCCR0_ENABLE_DEBUG 0x80000000
#define SCOM_PCCR0_ENABLE_RAM 0x40000000
#define SCOM_THRCTL 0x30 /* Thread Control and Status */
#define SCOM_THRCTL_T0_STOP 0x80000000
#define SCOM_THRCTL_T1_STOP 0x40000000
#define SCOM_THRCTL_T2_STOP 0x20000000
#define SCOM_THRCTL_T3_STOP 0x10000000
#define SCOM_THRCTL_T0_STEP 0x08000000
#define SCOM_THRCTL_T1_STEP 0x04000000
#define SCOM_THRCTL_T2_STEP 0x02000000
#define SCOM_THRCTL_T3_STEP 0x01000000
#define SCOM_THRCTL_T0_RUN 0x00800000
#define SCOM_THRCTL_T1_RUN 0x00400000
#define SCOM_THRCTL_T2_RUN 0x00200000
#define SCOM_THRCTL_T3_RUN 0x00100000
#define SCOM_THRCTL_T0_PM 0x00080000
#define SCOM_THRCTL_T1_PM 0x00040000
#define SCOM_THRCTL_T2_PM 0x00020000
#define SCOM_THRCTL_T3_PM 0x00010000
#define SCOM_THRCTL_T0_UDE 0x00008000
#define SCOM_THRCTL_T1_UDE 0x00004000
#define SCOM_THRCTL_T2_UDE 0x00002000
#define SCOM_THRCTL_T3_UDE 0x00001000
#define SCOM_THRCTL_ASYNC_DIS 0x00000800
#define SCOM_THRCTL_TB_DIS 0x00000400
#define SCOM_THRCTL_DEC_DIS 0x00000200
#define SCOM_THRCTL_AND 0x31 /* Thread Control and Status */
#define SCOM_THRCTL_OR 0x32 /* Thread Control and Status */
static DEFINE_PER_CPU(scom_map_t, scom_ptrs);
static scom_map_t get_scom(int cpu, struct device_node *np, int *first_thread)
{
scom_map_t scom = per_cpu(scom_ptrs, cpu);
int tcpu;
if (scom_map_ok(scom)) {
*first_thread = 0;
return scom;
}
*first_thread = 1;
scom = scom_map_device(np, 0);
for (tcpu = cpu_first_thread_sibling(cpu);
tcpu <= cpu_last_thread_sibling(cpu); tcpu++)
per_cpu(scom_ptrs, tcpu) = scom;
/* Hack: for the boot core, this will actually get called on
* the second thread up, not the first so our test above will
* set first_thread incorrectly. */
if (cpu_first_thread_sibling(cpu) == 0)
*first_thread = 0;
return scom;
}
static int a2_scom_ram(scom_map_t scom, int thread, u32 insn, int extmask)
{
u64 cmd, mask, val;
int n = 0;
cmd = ((u64)insn << 32) | (((u64)extmask & 0xf) << 28)
| ((u64)thread << 17) | SCOM_RAMC_ENABLE | SCOM_RAMC_EXECUTE;
mask = SCOM_RAMC_DONE | SCOM_RAMC_INTERRUPT | SCOM_RAMC_ERROR;
scom_write(scom, SCOM_RAMIC, cmd);
for (;;) {
if (scom_read(scom, SCOM_RAMC, &val) != 0) {
pr_err("SCOM error on instruction 0x%08x, thread %d\n",
insn, thread);
return -1;
}
if (val & mask)
break;
pr_devel("Waiting on RAMC = 0x%llx\n", val);
if (++n == 3) {
pr_err("RAMC timeout on instruction 0x%08x, thread %d\n",
insn, thread);
return -1;
}
}
if (val & SCOM_RAMC_INTERRUPT) {
pr_err("RAMC interrupt on instruction 0x%08x, thread %d\n",
insn, thread);
return -SCOM_RAMC_INTERRUPT;
}
if (val & SCOM_RAMC_ERROR) {
pr_err("RAMC error on instruction 0x%08x, thread %d\n",
insn, thread);
return -SCOM_RAMC_ERROR;
}
return 0;
}
static int a2_scom_getgpr(scom_map_t scom, int thread, int gpr, int alt,
u64 *out_gpr)
{
int rc;
/* or rN, rN, rN */
u32 insn = 0x7c000378 | (gpr << 21) | (gpr << 16) | (gpr << 11);
rc = a2_scom_ram(scom, thread, insn, alt ? 0xf : 0x0);
if (rc)
return rc;
return scom_read(scom, SCOM_RAMD, out_gpr);
}
static int a2_scom_getspr(scom_map_t scom, int thread, int spr, u64 *out_spr)
{
int rc, sprhi, sprlo;
u32 insn;
sprhi = spr >> 5;
sprlo = spr & 0x1f;
insn = 0x7c2002a6 | (sprlo << 16) | (sprhi << 11); /* mfspr r1,spr */
if (spr == 0x0ff0)
insn = 0x7c2000a6; /* mfmsr r1 */
rc = a2_scom_ram(scom, thread, insn, 0xf);
if (rc)
return rc;
return a2_scom_getgpr(scom, thread, 1, 1, out_spr);
}
static int a2_scom_setgpr(scom_map_t scom, int thread, int gpr,
int alt, u64 val)
{
u32 lis = 0x3c000000 | (gpr << 21);
u32 li = 0x38000000 | (gpr << 21);
u32 oris = 0x64000000 | (gpr << 21) | (gpr << 16);
u32 ori = 0x60000000 | (gpr << 21) | (gpr << 16);
u32 rldicr32 = 0x780007c6 | (gpr << 21) | (gpr << 16);
u32 highest = val >> 48;
u32 higher = (val >> 32) & 0xffff;
u32 high = (val >> 16) & 0xffff;
u32 low = val & 0xffff;
int lext = alt ? 0x8 : 0x0;
int oext = alt ? 0xf : 0x0;
int rc = 0;
if (highest)
rc |= a2_scom_ram(scom, thread, lis | highest, lext);
if (higher) {
if (highest)
rc |= a2_scom_ram(scom, thread, oris | higher, oext);
else
rc |= a2_scom_ram(scom, thread, li | higher, lext);
}
if (highest || higher)
rc |= a2_scom_ram(scom, thread, rldicr32, oext);
if (high) {
if (highest || higher)
rc |= a2_scom_ram(scom, thread, oris | high, oext);
else
rc |= a2_scom_ram(scom, thread, lis | high, lext);
}
if (highest || higher || high)
rc |= a2_scom_ram(scom, thread, ori | low, oext);
else
rc |= a2_scom_ram(scom, thread, li | low, lext);
return rc;
}
static int a2_scom_setspr(scom_map_t scom, int thread, int spr, u64 val)
{
int sprhi = spr >> 5;
int sprlo = spr & 0x1f;
/* mtspr spr, r1 */
u32 insn = 0x7c2003a6 | (sprlo << 16) | (sprhi << 11);
if (spr == 0x0ff0)
insn = 0x7c200124; /* mtmsr r1 */
if (a2_scom_setgpr(scom, thread, 1, 1, val))
return -1;
return a2_scom_ram(scom, thread, insn, 0xf);
}
static int a2_scom_initial_tlb(scom_map_t scom, int thread)
{
extern u32 a2_tlbinit_code_start[], a2_tlbinit_code_end[];
extern u32 a2_tlbinit_after_iprot_flush[];
extern u32 a2_tlbinit_after_linear_map[];
u32 assoc, entries, i;
u64 epn, tlbcfg;
u32 *p;
int rc;
/* Invalidate all entries (including iprot) */
rc = a2_scom_getspr(scom, thread, SPRN_TLB0CFG, &tlbcfg);
if (rc)
goto scom_fail;
entries = tlbcfg & TLBnCFG_N_ENTRY;
assoc = (tlbcfg & TLBnCFG_ASSOC) >> 24;
epn = 0;
/* Set MMUCR2 to enable 4K, 64K, 1M, 16M and 1G pages */
a2_scom_setspr(scom, thread, SPRN_MMUCR2, 0x000a7531);
/* Set MMUCR3 to write all thids bit to the TLB */
a2_scom_setspr(scom, thread, SPRN_MMUCR3, 0x0000000f);
/* Set MAS1 for 1G page size, and MAS2 to our initial EPN */
a2_scom_setspr(scom, thread, SPRN_MAS1, MAS1_TSIZE(BOOK3E_PAGESZ_1GB));
a2_scom_setspr(scom, thread, SPRN_MAS2, epn);
for (i = 0; i < entries; i++) {
a2_scom_setspr(scom, thread, SPRN_MAS0, MAS0_ESEL(i % assoc));
/* tlbwe */
rc = a2_scom_ram(scom, thread, 0x7c0007a4, 0);
if (rc)
goto scom_fail;
/* Next entry is new address? */
if((i + 1) % assoc == 0) {
epn += (1 << 30);
a2_scom_setspr(scom, thread, SPRN_MAS2, epn);
}
}
/* Setup args for linear mapping */
rc = a2_scom_setgpr(scom, thread, 3, 0, MAS0_TLBSEL(0));
if (rc)
goto scom_fail;
/* Linear mapping */
for (p = a2_tlbinit_code_start; p < a2_tlbinit_after_linear_map; p++) {
rc = a2_scom_ram(scom, thread, *p, 0);
if (rc)
goto scom_fail;
}
/*
* For the boot thread, between the linear mapping and the debug
* mappings there is a loop to flush iprot mappings. Ramming doesn't do
* branches, but the secondary threads don't need to be nearly as smart
* (i.e. we don't need to worry about invalidating the mapping we're
* standing on).
*/
/* Debug mappings. Expects r11 = MAS0 from linear map (set above) */
for (p = a2_tlbinit_after_iprot_flush; p < a2_tlbinit_code_end; p++) {
rc = a2_scom_ram(scom, thread, *p, 0);
if (rc)
goto scom_fail;
}
scom_fail:
if (rc)
pr_err("Setting up initial TLB failed, err %d\n", rc);
if (rc == -SCOM_RAMC_INTERRUPT) {
/* Interrupt, dump some status */
int rc[10];
u64 iar, srr0, srr1, esr, mas0, mas1, mas2, mas7_3, mas8, ccr2;
rc[0] = a2_scom_getspr(scom, thread, SPRN_IAR, &iar);
rc[1] = a2_scom_getspr(scom, thread, SPRN_SRR0, &srr0);
rc[2] = a2_scom_getspr(scom, thread, SPRN_SRR1, &srr1);
rc[3] = a2_scom_getspr(scom, thread, SPRN_ESR, &esr);
rc[4] = a2_scom_getspr(scom, thread, SPRN_MAS0, &mas0);
rc[5] = a2_scom_getspr(scom, thread, SPRN_MAS1, &mas1);
rc[6] = a2_scom_getspr(scom, thread, SPRN_MAS2, &mas2);
rc[7] = a2_scom_getspr(scom, thread, SPRN_MAS7_MAS3, &mas7_3);
rc[8] = a2_scom_getspr(scom, thread, SPRN_MAS8, &mas8);
rc[9] = a2_scom_getspr(scom, thread, SPRN_A2_CCR2, &ccr2);
pr_err(" -> retreived IAR =0x%llx (err %d)\n", iar, rc[0]);
pr_err(" retreived SRR0=0x%llx (err %d)\n", srr0, rc[1]);
pr_err(" retreived SRR1=0x%llx (err %d)\n", srr1, rc[2]);
pr_err(" retreived ESR =0x%llx (err %d)\n", esr, rc[3]);
pr_err(" retreived MAS0=0x%llx (err %d)\n", mas0, rc[4]);
pr_err(" retreived MAS1=0x%llx (err %d)\n", mas1, rc[5]);
pr_err(" retreived MAS2=0x%llx (err %d)\n", mas2, rc[6]);
pr_err(" retreived MS73=0x%llx (err %d)\n", mas7_3, rc[7]);
pr_err(" retreived MAS8=0x%llx (err %d)\n", mas8, rc[8]);
pr_err(" retreived CCR2=0x%llx (err %d)\n", ccr2, rc[9]);
}
return rc;
}
int a2_scom_startup_cpu(unsigned int lcpu, int thr_idx, struct device_node *np)
{
u64 init_iar, init_msr, init_ccr2;
unsigned long start_here;
int rc, core_setup;
scom_map_t scom;
u64 pccr0;
scom = get_scom(lcpu, np, &core_setup);
if (!scom) {
printk(KERN_ERR "Couldn't map SCOM for CPU%d\n", lcpu);
return -1;
}
pr_devel("Bringing up CPU%d using SCOM...\n", lcpu);
if (scom_read(scom, SCOM_PCCR0, &pccr0) != 0) {
printk(KERN_ERR "XSCOM failure readng PCCR0 on CPU%d\n", lcpu);
return -1;
}
scom_write(scom, SCOM_PCCR0, pccr0 | SCOM_PCCR0_ENABLE_DEBUG |
SCOM_PCCR0_ENABLE_RAM);
/* Stop the thead with THRCTL. If we are setting up the TLB we stop all
* threads. We also disable asynchronous interrupts while RAMing.
*/
if (core_setup)
scom_write(scom, SCOM_THRCTL_OR,
SCOM_THRCTL_T0_STOP |
SCOM_THRCTL_T1_STOP |
SCOM_THRCTL_T2_STOP |
SCOM_THRCTL_T3_STOP |
SCOM_THRCTL_ASYNC_DIS);
else
scom_write(scom, SCOM_THRCTL_OR, SCOM_THRCTL_T0_STOP >> thr_idx);
/* Flush its pipeline just in case */
scom_write(scom, SCOM_RAMC, ((u64)thr_idx << 17) |
SCOM_RAMC_FLUSH | SCOM_RAMC_ENABLE);
a2_scom_getspr(scom, thr_idx, SPRN_IAR, &init_iar);
a2_scom_getspr(scom, thr_idx, 0x0ff0, &init_msr);
a2_scom_getspr(scom, thr_idx, SPRN_A2_CCR2, &init_ccr2);
/* Set MSR to MSR_CM (0x0ff0 is magic value for MSR_CM) */
rc = a2_scom_setspr(scom, thr_idx, 0x0ff0, MSR_CM);
if (rc) {
pr_err("Failed to set MSR ! err %d\n", rc);
return rc;
}
/* RAM in an sync/isync for the sake of it */
a2_scom_ram(scom, thr_idx, 0x7c0004ac, 0);
a2_scom_ram(scom, thr_idx, 0x4c00012c, 0);
if (core_setup) {
pr_devel("CPU%d is first thread in core, initializing TLB...\n",
lcpu);
rc = a2_scom_initial_tlb(scom, thr_idx);
if (rc)
goto fail;
}
start_here = ppc_function_entry(core_setup ? generic_secondary_smp_init
: generic_secondary_thread_init);
pr_devel("CPU%d entry point at 0x%lx...\n", lcpu, start_here);
rc |= a2_scom_setspr(scom, thr_idx, SPRN_IAR, start_here);
rc |= a2_scom_setgpr(scom, thr_idx, 3, 0,
get_hard_smp_processor_id(lcpu));
/*
* Tell book3e_secondary_core_init not to set up the TLB, we've
* already done that.
*/
rc |= a2_scom_setgpr(scom, thr_idx, 4, 0, 1);
rc |= a2_scom_setspr(scom, thr_idx, SPRN_TENS, 0x1 << thr_idx);
scom_write(scom, SCOM_RAMC, 0);
scom_write(scom, SCOM_THRCTL_AND, ~(SCOM_THRCTL_T0_STOP >> thr_idx));
scom_write(scom, SCOM_PCCR0, pccr0);
fail:
pr_devel(" SCOM initialization %s\n", rc ? "failed" : "succeeded");
if (rc) {
pr_err("Old IAR=0x%08llx MSR=0x%08llx CCR2=0x%08llx\n",
init_iar, init_msr, init_ccr2);
}
return rc;
}
/*
* SCOM backend for WSP
*
* Copyright 2010 Benjamin Herrenschmidt, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/cpumask.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/of_address.h>
#include <asm/cputhreads.h>
#include <asm/reg_a2.h>
#include <asm/scom.h>
#include <asm/udbg.h>
#include "wsp.h"
static scom_map_t wsp_scom_map(struct device_node *dev, u64 reg, u64 count)
{
struct resource r;
u64 xscom_addr;
if (!of_get_property(dev, "scom-controller", NULL)) {
pr_err("%s: device %s is not a SCOM controller\n",
__func__, dev->full_name);
return SCOM_MAP_INVALID;
}
if (of_address_to_resource(dev, 0, &r)) {
pr_debug("Failed to find SCOM controller address\n");
return 0;
}
/* Transform the SCOM address into an XSCOM offset */
xscom_addr = ((reg & 0x7f000000) >> 1) | ((reg & 0xfffff) << 3);
return (scom_map_t)ioremap(r.start + xscom_addr, count << 3);
}
static void wsp_scom_unmap(scom_map_t map)
{
iounmap((void *)map);
}
static int wsp_scom_read(scom_map_t map, u64 reg, u64 *value)
{
u64 __iomem *addr = (u64 __iomem *)map;
*value = in_be64(addr + reg);
return 0;
}
static int wsp_scom_write(scom_map_t map, u64 reg, u64 value)
{
u64 __iomem *addr = (u64 __iomem *)map;
out_be64(addr + reg, value);
return 0;
}
static const struct scom_controller wsp_scom_controller = {
.map = wsp_scom_map,
.unmap = wsp_scom_unmap,
.read = wsp_scom_read,
.write = wsp_scom_write
};
void scom_init_wsp(void)
{
scom_init(&wsp_scom_controller);
}
/*
* Copyright 2010 Michael Ellerman, IBM Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/of_platform.h>
#include "wsp.h"
/*
* Find chip-id by walking up device tree looking for ibm,wsp-chip-id property.
* Won't work for nodes that are not a descendant of a wsp node.
*/
int wsp_get_chip_id(struct device_node *dn)
{
const u32 *p;
int rc;
/* Start looking at the specified node, not its parent */
dn = of_node_get(dn);
while (dn && !(p = of_get_property(dn, "ibm,wsp-chip-id", NULL)))
dn = of_get_next_parent(dn);
if (!dn)
return -1;
rc = *p;
of_node_put(dn);
return rc;
}
/*
* SMP Support for A2 platforms
*
* Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/smp.h>
#include <asm/dbell.h>
#include <asm/machdep.h>
#include <asm/xics.h>
#include "ics.h"
#include "wsp.h"
static void smp_a2_setup_cpu(int cpu)
{
doorbell_setup_this_cpu();
if (cpu != boot_cpuid)
xics_setup_cpu();
}
int smp_a2_kick_cpu(int nr)
{
const char *enable_method;
struct device_node *np;
int thr_idx;
if (nr < 0 || nr >= NR_CPUS)
return -ENOENT;
np = of_get_cpu_node(nr, &thr_idx);
if (!np)
return -ENODEV;
enable_method = of_get_property(np, "enable-method", NULL);
pr_devel("CPU%d has enable-method: \"%s\"\n", nr, enable_method);
if (!enable_method) {
printk(KERN_ERR "CPU%d has no enable-method\n", nr);
return -ENOENT;
} else if (strcmp(enable_method, "ibm,a2-scom") == 0) {
if (a2_scom_startup_cpu(nr, thr_idx, np))
return -1;
} else {
printk(KERN_ERR "CPU%d: Don't understand enable-method \"%s\"\n",
nr, enable_method);
return -EINVAL;
}
/*
* The processor is currently spinning, waiting for the
* cpu_start field to become non-zero After we set cpu_start,
* the processor will continue on to secondary_start
*/
paca[nr].cpu_start = 1;
return 0;
}
static int __init smp_a2_probe(void)
{
return num_possible_cpus();
}
static struct smp_ops_t a2_smp_ops = {
.message_pass = NULL, /* Use smp_muxed_ipi_message_pass */
.cause_ipi = doorbell_cause_ipi,
.probe = smp_a2_probe,
.kick_cpu = smp_a2_kick_cpu,
.setup_cpu = smp_a2_setup_cpu,
};
void __init a2_setup_smp(void)
{
smp_ops = &a2_smp_ops;
}
/*
* Copyright 2008-2011, IBM Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/smp.h>
#include <linux/delay.h>
#include <linux/time.h>
#include <linux/of_address.h>
#include <asm/scom.h>
#include "wsp.h"
#include "ics.h"
#define WSP_SOC_COMPATIBLE "ibm,wsp-soc"
#define PBIC_COMPATIBLE "ibm,wsp-pbic"
#define COPRO_COMPATIBLE "ibm,wsp-coprocessor"
static int __init wsp_probe_buses(void)
{
static __initdata struct of_device_id bus_ids[] = {
/*
* every node in between needs to be here or you won't
* find it
*/
{ .compatible = WSP_SOC_COMPATIBLE, },
{ .compatible = PBIC_COMPATIBLE, },
{ .compatible = COPRO_COMPATIBLE, },
{},
};
of_platform_bus_probe(NULL, bus_ids, NULL);
return 0;
}
void __init wsp_setup_arch(void)
{
/* init to some ~sane value until calibrate_delay() runs */
loops_per_jiffy = 50000000;
scom_init_wsp();
/* Setup SMP callback */
#ifdef CONFIG_SMP
a2_setup_smp();
#endif
#ifdef CONFIG_PCI
wsp_setup_pci();
#endif
}
void __init wsp_setup_irq(void)
{
wsp_init_irq();
opb_pic_init();
}
int __init wsp_probe_devices(void)
{
struct device_node *np;
/* Our RTC is a ds1500. It seems to be programatically compatible
* with the ds1511 for which we have a driver so let's use that
*/
np = of_find_compatible_node(NULL, NULL, "dallas,ds1500");
if (np != NULL) {
struct resource res;
if (of_address_to_resource(np, 0, &res) == 0)
platform_device_register_simple("ds1511", 0, &res, 1);
}
wsp_probe_buses();
return 0;
}
void wsp_halt(void)
{
u64 val;
scom_map_t m;
struct device_node *dn;
struct device_node *mine;
struct device_node *me;
int rc;
me = of_get_cpu_node(smp_processor_id(), NULL);
mine = scom_find_parent(me);
/* This will halt all the A2s but not power off the chip */
for_each_node_with_property(dn, "scom-controller") {
if (dn == mine)
continue;
m = scom_map(dn, 0, 1);
/* read-modify-write it so the HW probe does not get
* confused */
rc = scom_read(m, 0, &val);
if (rc == 0)
scom_write(m, 0, val | 1);
scom_unmap(m);
}
m = scom_map(mine, 0, 1);
rc = scom_read(m, 0, &val);
if (rc == 0)
scom_write(m, 0, val | 1);
/* should never return */
scom_unmap(m);
}
#ifndef __WSP_H
#define __WSP_H
#include <asm/wsp.h>
/* Devtree compatible strings for major devices */
#define PCIE_COMPATIBLE "ibm,wsp-pciex"
extern void wsp_setup_arch(void);
extern void wsp_setup_irq(void);
extern int wsp_probe_devices(void);
extern void wsp_halt(void);
extern void wsp_setup_pci(void);
extern void scom_init_wsp(void);
extern void a2_setup_smp(void);
extern int a2_scom_startup_cpu(unsigned int lcpu, int thr_idx,
struct device_node *np);
extern int smp_a2_kick_cpu(int nr);
extern void opb_pic_init(void);
/* chroma specific managment */
extern void wsp_h8_restart(char *cmd);
extern void wsp_h8_power_off(void);
extern void __init wsp_setup_h8(void);
#endif /* __WSP_H */
此差异已折叠。
/*
* Copyright 2010 Ben Herrenschmidt, IBM Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef __WSP_PCI_H
#define __WSP_PCI_H
/* Architected registers */
#define PCIE_REG_DMA_CHAN_STATUS 0x110
#define PCIE_REG_CPU_LOADSTORE_STATUS 0x120
#define PCIE_REG_CONFIG_DATA 0x130
#define PCIE_REG_LOCK0 0x138
#define PCIE_REG_CONFIG_ADDRESS 0x140
#define PCIE_REG_CA_ENABLE 0x8000000000000000ull
#define PCIE_REG_CA_BUS_MASK 0x0ff0000000000000ull
#define PCIE_REG_CA_BUS_SHIFT (20+32)
#define PCIE_REG_CA_DEV_MASK 0x000f800000000000ull
#define PCIE_REG_CA_DEV_SHIFT (15+32)
#define PCIE_REG_CA_FUNC_MASK 0x0000700000000000ull
#define PCIE_REG_CA_FUNC_SHIFT (12+32)
#define PCIE_REG_CA_REG_MASK 0x00000fff00000000ull
#define PCIE_REG_CA_REG_SHIFT ( 0+32)
#define PCIE_REG_CA_BE_MASK 0x00000000f0000000ull
#define PCIE_REG_CA_BE_SHIFT ( 28)
#define PCIE_REG_LOCK1 0x148
#define PCIE_REG_PHB_CONFIG 0x160
#define PCIE_REG_PHBC_64B_TCE_EN 0x2000000000000000ull
#define PCIE_REG_PHBC_MMIO_DMA_FREEZE_EN 0x1000000000000000ull
#define PCIE_REG_PHBC_32BIT_MSI_EN 0x0080000000000000ull
#define PCIE_REG_PHBC_M64_EN 0x0040000000000000ull
#define PCIE_REG_PHBC_IO_EN 0x0008000000000000ull
#define PCIE_REG_PHBC_64BIT_MSI_EN 0x0002000000000000ull
#define PCIE_REG_PHBC_M32A_EN 0x0000800000000000ull
#define PCIE_REG_PHBC_M32B_EN 0x0000400000000000ull
#define PCIE_REG_PHBC_MSI_PE_VALIDATE 0x0000200000000000ull
#define PCIE_REG_PHBC_DMA_XLATE_BYPASS 0x0000100000000000ull
#define PCIE_REG_IO_BASE_ADDR 0x170
#define PCIE_REG_IO_BASE_MASK 0x178
#define PCIE_REG_IO_START_ADDR 0x180
#define PCIE_REG_M32A_BASE_ADDR 0x190
#define PCIE_REG_M32A_BASE_MASK 0x198
#define PCIE_REG_M32A_START_ADDR 0x1a0
#define PCIE_REG_M32B_BASE_ADDR 0x1b0
#define PCIE_REG_M32B_BASE_MASK 0x1b8
#define PCIE_REG_M32B_START_ADDR 0x1c0
#define PCIE_REG_M64_BASE_ADDR 0x1e0
#define PCIE_REG_M64_BASE_MASK 0x1e8
#define PCIE_REG_M64_START_ADDR 0x1f0
#define PCIE_REG_TCE_KILL 0x210
#define PCIE_REG_TCEKILL_SINGLE 0x8000000000000000ull
#define PCIE_REG_TCEKILL_ADDR_MASK 0x000003fffffffff8ull
#define PCIE_REG_TCEKILL_PS_4K 0
#define PCIE_REG_TCEKILL_PS_64K 1
#define PCIE_REG_TCEKILL_PS_16M 2
#define PCIE_REG_TCEKILL_PS_16G 3
#define PCIE_REG_IODA_ADDR 0x220
#define PCIE_REG_IODA_AD_AUTOINC 0x8000000000000000ull
#define PCIE_REG_IODA_AD_TBL_MVT 0x0005000000000000ull
#define PCIE_REG_IODA_AD_TBL_PELT 0x0006000000000000ull
#define PCIE_REG_IODA_AD_TBL_PESTA 0x0007000000000000ull
#define PCIE_REG_IODA_AD_TBL_PESTB 0x0008000000000000ull
#define PCIE_REG_IODA_AD_TBL_TVT 0x0009000000000000ull
#define PCIE_REG_IODA_AD_TBL_TCE 0x000a000000000000ull
#define PCIE_REG_IODA_DATA0 0x228
#define PCIE_REG_IODA_DATA1 0x230
#define PCIE_REG_LOCK2 0x240
#define PCIE_REG_PHB_GEN_CAP 0x250
#define PCIE_REG_PHB_TCE_CAP 0x258
#define PCIE_REG_PHB_IRQ_CAP 0x260
#define PCIE_REG_PHB_EEH_CAP 0x268
#define PCIE_REG_PAPR_ERR_INJ_CONTROL 0x2b0
#define PCIE_REG_PAPR_ERR_INJ_ADDR 0x2b8
#define PCIE_REG_PAPR_ERR_INJ_MASK 0x2c0
#define PCIE_REG_SYS_CFG1 0x600
#define PCIE_REG_SYS_CFG1_CLASS_CODE 0x0000000000ffffffull
#define IODA_TVT0_TTA_MASK 0x000fffffffff0000ull
#define IODA_TVT0_TTA_SHIFT 4
#define IODA_TVT0_BUSNUM_VALID_MASK 0x000000000000e000ull
#define IODA_TVT0_TCE_TABLE_SIZE_MASK 0x0000000000001f00ull
#define IODA_TVT0_TCE_TABLE_SIZE_SHIFT 8
#define IODA_TVT0_BUSNUM_VALUE_MASK 0x00000000000000ffull
#define IODA_TVT0_BUSNUM_VALID_SHIFT 0
#define IODA_TVT1_DEVNUM_VALID 0x2000000000000000ull
#define IODA_TVT1_DEVNUM_VALUE_MASK 0x1f00000000000000ull
#define IODA_TVT1_DEVNUM_VALUE_SHIFT 56
#define IODA_TVT1_FUNCNUM_VALID 0x0008000000000000ull
#define IODA_TVT1_FUNCNUM_VALUE_MASK 0x0007000000000000ull
#define IODA_TVT1_FUNCNUM_VALUE_SHIFT 48
#define IODA_TVT1_IO_PAGE_SIZE_MASK 0x00001f0000000000ull
#define IODA_TVT1_IO_PAGE_SIZE_SHIFT 40
#define IODA_TVT1_PE_NUMBER_MASK 0x000000000000003full
#define IODA_TVT1_PE_NUMBER_SHIFT 0
#define IODA_TVT_COUNT 64
/* UTL Core registers */
#define PCIE_UTL_SYS_BUS_CONTROL 0x400
#define PCIE_UTL_STATUS 0x408
#define PCIE_UTL_SYS_BUS_AGENT_STATUS 0x410
#define PCIE_UTL_SYS_BUS_AGENT_ERR_SEV 0x418
#define PCIE_UTL_SYS_BUS_AGENT_IRQ_EN 0x420
#define PCIE_UTL_SYS_BUS_BURST_SZ_CONF 0x440
#define PCIE_UTL_REVISION_ID 0x448
#define PCIE_UTL_OUT_POST_HDR_BUF_ALLOC 0x4c0
#define PCIE_UTL_OUT_POST_DAT_BUF_ALLOC 0x4d0
#define PCIE_UTL_IN_POST_HDR_BUF_ALLOC 0x4e0
#define PCIE_UTL_IN_POST_DAT_BUF_ALLOC 0x4f0
#define PCIE_UTL_OUT_NP_BUF_ALLOC 0x500
#define PCIE_UTL_IN_NP_BUF_ALLOC 0x510
#define PCIE_UTL_PCIE_TAGS_ALLOC 0x520
#define PCIE_UTL_GBIF_READ_TAGS_ALLOC 0x530
#define PCIE_UTL_PCIE_PORT_CONTROL 0x540
#define PCIE_UTL_PCIE_PORT_STATUS 0x548
#define PCIE_UTL_PCIE_PORT_ERROR_SEV 0x550
#define PCIE_UTL_PCIE_PORT_IRQ_EN 0x558
#define PCIE_UTL_RC_STATUS 0x560
#define PCIE_UTL_RC_ERR_SEVERITY 0x568
#define PCIE_UTL_RC_IRQ_EN 0x570
#define PCIE_UTL_EP_STATUS 0x578
#define PCIE_UTL_EP_ERR_SEVERITY 0x580
#define PCIE_UTL_EP_ERR_IRQ_EN 0x588
#define PCIE_UTL_PCI_PM_CTRL1 0x590
#define PCIE_UTL_PCI_PM_CTRL2 0x598
/* PCIe stack registers */
#define PCIE_REG_SYSTEM_CONFIG1 0x600
#define PCIE_REG_SYSTEM_CONFIG2 0x608
#define PCIE_REG_EP_SYSTEM_CONFIG 0x618
#define PCIE_REG_EP_FLR 0x620
#define PCIE_REG_EP_BAR_CONFIG 0x628
#define PCIE_REG_LINK_CONFIG 0x630
#define PCIE_REG_PM_CONFIG 0x640
#define PCIE_REG_DLP_CONTROL 0x650
#define PCIE_REG_DLP_STATUS 0x658
#define PCIE_REG_ERR_REPORT_CONTROL 0x660
#define PCIE_REG_SLOT_CONTROL1 0x670
#define PCIE_REG_SLOT_CONTROL2 0x678
#define PCIE_REG_UTL_CONFIG 0x680
#define PCIE_REG_BUFFERS_CONFIG 0x690
#define PCIE_REG_ERROR_INJECT 0x698
#define PCIE_REG_SRIOV_CONFIG 0x6a0
#define PCIE_REG_PF0_SRIOV_STATUS 0x6a8
#define PCIE_REG_PF1_SRIOV_STATUS 0x6b0
#define PCIE_REG_PORT_NUMBER 0x700
#define PCIE_REG_POR_SYSTEM_CONFIG 0x708
/* PHB internal logic registers */
#define PCIE_REG_PHB_VERSION 0x800
#define PCIE_REG_RESET 0x808
#define PCIE_REG_PHB_CONTROL 0x810
#define PCIE_REG_PHB_TIMEOUT_CONTROL1 0x878
#define PCIE_REG_PHB_QUIESCE_DMA 0x888
#define PCIE_REG_PHB_DMA_READ_TAG_ACTV 0x900
#define PCIE_REG_PHB_TCE_READ_TAG_ACTV 0x908
/* FIR registers */
#define PCIE_REG_LEM_FIR_ACCUM 0xc00
#define PCIE_REG_LEM_FIR_AND_MASK 0xc08
#define PCIE_REG_LEM_FIR_OR_MASK 0xc10
#define PCIE_REG_LEM_ACTION0 0xc18
#define PCIE_REG_LEM_ACTION1 0xc20
#define PCIE_REG_LEM_ERROR_MASK 0xc30
#define PCIE_REG_LEM_ERROR_AND_MASK 0xc38
#define PCIE_REG_LEM_ERROR_OR_MASK 0xc40
/* PHB Error registers */
#define PCIE_REG_PHB_ERR_STATUS 0xc80
#define PCIE_REG_PHB_ERR1_STATUS 0xc88
#define PCIE_REG_PHB_ERR_INJECT 0xc90
#define PCIE_REG_PHB_ERR_LEM_ENABLE 0xc98
#define PCIE_REG_PHB_ERR_IRQ_ENABLE 0xca0
#define PCIE_REG_PHB_ERR_FREEZE_ENABLE 0xca8
#define PCIE_REG_PHB_ERR_SIDE_ENABLE 0xcb8
#define PCIE_REG_PHB_ERR_LOG_0 0xcc0
#define PCIE_REG_PHB_ERR_LOG_1 0xcc8
#define PCIE_REG_PHB_ERR_STATUS_MASK 0xcd0
#define PCIE_REG_PHB_ERR1_STATUS_MASK 0xcd8
#define PCIE_REG_MMIO_ERR_STATUS 0xd00
#define PCIE_REG_MMIO_ERR1_STATUS 0xd08
#define PCIE_REG_MMIO_ERR_INJECT 0xd10
#define PCIE_REG_MMIO_ERR_LEM_ENABLE 0xd18
#define PCIE_REG_MMIO_ERR_IRQ_ENABLE 0xd20
#define PCIE_REG_MMIO_ERR_FREEZE_ENABLE 0xd28
#define PCIE_REG_MMIO_ERR_SIDE_ENABLE 0xd38
#define PCIE_REG_MMIO_ERR_LOG_0 0xd40
#define PCIE_REG_MMIO_ERR_LOG_1 0xd48
#define PCIE_REG_MMIO_ERR_STATUS_MASK 0xd50
#define PCIE_REG_MMIO_ERR1_STATUS_MASK 0xd58
#define PCIE_REG_DMA_ERR_STATUS 0xd80
#define PCIE_REG_DMA_ERR1_STATUS 0xd88
#define PCIE_REG_DMA_ERR_INJECT 0xd90
#define PCIE_REG_DMA_ERR_LEM_ENABLE 0xd98
#define PCIE_REG_DMA_ERR_IRQ_ENABLE 0xda0
#define PCIE_REG_DMA_ERR_FREEZE_ENABLE 0xda8
#define PCIE_REG_DMA_ERR_SIDE_ENABLE 0xdb8
#define PCIE_REG_DMA_ERR_LOG_0 0xdc0
#define PCIE_REG_DMA_ERR_LOG_1 0xdc8
#define PCIE_REG_DMA_ERR_STATUS_MASK 0xdd0
#define PCIE_REG_DMA_ERR1_STATUS_MASK 0xdd8
/* Shortcuts for access to the above using the PHB definitions
* with an offset
*/
#define PCIE_REG_ERR_PHB_OFFSET 0x0
#define PCIE_REG_ERR_MMIO_OFFSET 0x80
#define PCIE_REG_ERR_DMA_OFFSET 0x100
/* Debug and Trace registers */
#define PCIE_REG_PHB_DEBUG_CONTROL0 0xe00
#define PCIE_REG_PHB_DEBUG_STATUS0 0xe08
#define PCIE_REG_PHB_DEBUG_CONTROL1 0xe10
#define PCIE_REG_PHB_DEBUG_STATUS1 0xe18
#define PCIE_REG_PHB_DEBUG_CONTROL2 0xe20
#define PCIE_REG_PHB_DEBUG_STATUS2 0xe28
#define PCIE_REG_PHB_DEBUG_CONTROL3 0xe30
#define PCIE_REG_PHB_DEBUG_STATUS3 0xe38
#define PCIE_REG_PHB_DEBUG_CONTROL4 0xe40
#define PCIE_REG_PHB_DEBUG_STATUS4 0xe48
#define PCIE_REG_PHB_DEBUG_CONTROL5 0xe50
#define PCIE_REG_PHB_DEBUG_STATUS5 0xe58
#define PCIE_REG_PHB_DEBUG_CONTROL6 0xe60
#define PCIE_REG_PHB_DEBUG_STATUS6 0xe68
/* Definition for PCIe errors */
struct wsp_pcie_err_log_data {
__u64 phb_err;
__u64 phb_err1;
__u64 phb_log0;
__u64 phb_log1;
__u64 mmio_err;
__u64 mmio_err1;
__u64 mmio_log0;
__u64 mmio_log1;
__u64 dma_err;
__u64 dma_err1;
__u64 dma_log0;
__u64 dma_log1;
__u64 utl_sys_err;
__u64 utl_port_err;
__u64 utl_rc_err;
__u64 unused;
};
#endif /* __WSP_PCI_H */
......@@ -26,6 +26,7 @@
#include <asm/errno.h>
#include <asm/xics.h>
#include <asm/kvm_ppc.h>
#include <asm/dbell.h>
struct icp_ipl {
union {
......@@ -145,7 +146,13 @@ static unsigned int icp_native_get_irq(void)
static void icp_native_cause_ipi(int cpu, unsigned long data)
{
kvmppc_set_host_ipi(cpu, 1);
icp_native_set_qirr(cpu, IPI_PRIORITY);
#ifdef CONFIG_PPC_DOORBELL
if (cpu_has_feature(CPU_FTR_DBELL) &&
(cpumask_test_cpu(cpu, cpu_sibling_mask(smp_processor_id()))))
doorbell_cause_ipi(cpu, data);
else
#endif
icp_native_set_qirr(cpu, IPI_PRIORITY);
}
void xics_wake_cpu(int cpu)
......
......@@ -122,7 +122,7 @@ void xmon_printf(const char *format, ...)
if (n && rc == 0) {
/* No udbg hooks, fallback to printk() - dangerous */
printk(xmon_outbuf);
printk("%s", xmon_outbuf);
}
}
......
......@@ -73,12 +73,10 @@ static int fastsleep_loop(struct cpuidle_device *dev,
return index;
new_lpcr = old_lpcr;
new_lpcr &= ~(LPCR_MER | LPCR_PECE); /* lpcr[mer] must be 0 */
/* exit powersave upon external interrupt, but not decrementer
* interrupt.
/* Do not exit powersave upon decrementer as we've setup the timer
* offload.
*/
new_lpcr |= LPCR_PECE0;
new_lpcr &= ~LPCR_PECE1;
mtspr(SPRN_LPCR, new_lpcr);
power7_sleep();
......
......@@ -313,7 +313,7 @@ config CRYPTO_DEV_S5P
config CRYPTO_DEV_NX
bool "Support for IBM Power7+ in-Nest cryptographic acceleration"
depends on PPC64 && IBMVIO
depends on PPC64 && IBMVIO && !CPU_LITTLE_ENDIAN
default n
help
Support for Power7+ in-Nest cryptographic acceleration.
......
......@@ -13,7 +13,7 @@ CFLAGS := -Wall -O2 -flto -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(CUR
export CC CFLAGS
TARGETS = pmu copyloops mm
TARGETS = pmu copyloops mm tm
endif
......
......@@ -30,12 +30,15 @@ int run_test(int (test_function)(void), char *name)
pid = fork();
if (pid == 0) {
setpgid(0, 0);
exit(test_function());
} else if (pid == -1) {
perror("fork");
return 1;
}
setpgid(pid, pid);
/* Wake us up in timeout seconds */
alarm(TIMEOUT);
terminated = false;
......@@ -50,17 +53,20 @@ int run_test(int (test_function)(void), char *name)
if (terminated) {
printf("!! force killing %s\n", name);
kill(pid, SIGKILL);
kill(-pid, SIGKILL);
return 1;
} else {
printf("!! killing %s\n", name);
kill(pid, SIGTERM);
kill(-pid, SIGTERM);
terminated = true;
alarm(KILL_TIMEOUT);
goto wait;
}
}
/* Kill anything else in the process group that is still running */
kill(-pid, SIGTERM);
if (WIFEXITED(status))
status = WEXITSTATUS(status);
else {
......@@ -99,7 +105,10 @@ int test_harness(int (test_function)(void), char *name)
rc = run_test(test_function, name);
test_finish(name, rc);
if (rc == MAGIC_SKIP_RETURN_VALUE)
test_skip(name);
else
test_finish(name, rc);
return rc;
}
......@@ -4,7 +4,7 @@ noarg:
PROGS := count_instructions
EXTRA_SOURCES := ../harness.c event.c
all: $(PROGS)
all: $(PROGS) sub_all
$(PROGS): $(EXTRA_SOURCES)
......@@ -12,12 +12,30 @@ $(PROGS): $(EXTRA_SOURCES)
count_instructions: loop.S count_instructions.c $(EXTRA_SOURCES)
$(CC) $(CFLAGS) -m64 -o $@ $^
run_tests: all
run_tests: all sub_run_tests
@-for PROG in $(PROGS); do \
./$$PROG; \
done;
clean:
clean: sub_clean
rm -f $(PROGS) loop.o
.PHONY: all run_tests clean
SUB_TARGETS = ebb
sub_all:
@for TARGET in $(SUB_TARGETS); do \
$(MAKE) -C $$TARGET all; \
done;
sub_run_tests: all
@for TARGET in $(SUB_TARGETS); do \
$(MAKE) -C $$TARGET run_tests; \
done;
sub_clean:
@for TARGET in $(SUB_TARGETS); do \
$(MAKE) -C $$TARGET clean; \
done;
.PHONY: all run_tests clean sub_all sub_run_tests sub_clean
noarg:
$(MAKE) -C ../../
# The EBB handler is 64-bit code and everything links against it
CFLAGS += -m64
PROGS := reg_access_test event_attributes_test cycles_test \
cycles_with_freeze_test pmc56_overflow_test \
ebb_vs_cpu_event_test cpu_event_vs_ebb_test \
cpu_event_pinned_vs_ebb_test task_event_vs_ebb_test \
task_event_pinned_vs_ebb_test multi_ebb_procs_test \
multi_counter_test pmae_handling_test \
close_clears_pmcc_test instruction_count_test \
fork_cleanup_test ebb_on_child_test \
ebb_on_willing_child_test back_to_back_ebbs_test \
lost_exception_test no_handler_test
all: $(PROGS)
$(PROGS): ../../harness.c ../event.c ../lib.c ebb.c ebb_handler.S trace.c
instruction_count_test: ../loop.S
lost_exception_test: ../lib.c
run_tests: all
@-for PROG in $(PROGS); do \
./$$PROG; \
done;
clean:
rm -f $(PROGS)
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
* Licensed under GPLv2.
*/
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include "ebb.h"
#define NUMBER_OF_EBBS 50
/*
* Test that if we overflow the counter while in the EBB handler, we take
* another EBB on exiting from the handler.
*
* We do this by counting with a stupidly low sample period, causing us to
* overflow the PMU while we're still in the EBB handler, leading to another
* EBB.
*
* We get out of what would otherwise be an infinite loop by leaving the
* counter frozen once we've taken enough EBBs.
*/
static void ebb_callee(void)
{
uint64_t siar, val;
val = mfspr(SPRN_BESCR);
if (!(val & BESCR_PMEO)) {
ebb_state.stats.spurious++;
goto out;
}
ebb_state.stats.ebb_count++;
trace_log_counter(ebb_state.trace, ebb_state.stats.ebb_count);
/* Resets the PMC */
count_pmc(1, sample_period);
out:
if (ebb_state.stats.ebb_count == NUMBER_OF_EBBS)
/* Reset but leave counters frozen */
reset_ebb_with_clear_mask(MMCR0_PMAO);
else
/* Unfreezes */
reset_ebb();
/* Do some stuff to chew some cycles and pop the counter */
siar = mfspr(SPRN_SIAR);
trace_log_reg(ebb_state.trace, SPRN_SIAR, siar);
val = mfspr(SPRN_PMC1);
trace_log_reg(ebb_state.trace, SPRN_PMC1, val);
val = mfspr(SPRN_MMCR0);
trace_log_reg(ebb_state.trace, SPRN_MMCR0, val);
}
int back_to_back_ebbs(void)
{
struct event event;
event_init_named(&event, 0x1001e, "cycles");
event_leader_ebb_init(&event);
event.attr.exclude_kernel = 1;
event.attr.exclude_hv = 1;
event.attr.exclude_idle = 1;
FAIL_IF(event_open(&event));
setup_ebb_handler(ebb_callee);
FAIL_IF(ebb_event_enable(&event));
sample_period = 5;
ebb_freeze_pmcs();
mtspr(SPRN_PMC1, pmc_sample_period(sample_period));
ebb_global_enable();
ebb_unfreeze_pmcs();
while (ebb_state.stats.ebb_count < NUMBER_OF_EBBS)
FAIL_IF(core_busy_loop());
ebb_global_disable();
ebb_freeze_pmcs();
count_pmc(1, sample_period);
dump_ebb_state();
event_close(&event);
FAIL_IF(ebb_state.stats.ebb_count != NUMBER_OF_EBBS);
return 0;
}
int main(void)
{
return test_harness(back_to_back_ebbs, "back_to_back_ebbs");
}
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
* Licensed under GPLv2.
*/
#include <stdio.h>
#include <stdlib.h>
#include <setjmp.h>
#include <signal.h>
#include "ebb.h"
/*
* Test that closing the EBB event clears MMCR0_PMCC, preventing further access
* by userspace to the PMU hardware.
*/
int close_clears_pmcc(void)
{
struct event event;
event_init_named(&event, 0x1001e, "cycles");
event_leader_ebb_init(&event);
FAIL_IF(event_open(&event));
ebb_enable_pmc_counting(1);
setup_ebb_handler(standard_ebb_callee);
ebb_global_enable();
FAIL_IF(ebb_event_enable(&event));
mtspr(SPRN_PMC1, pmc_sample_period(sample_period));
while (ebb_state.stats.ebb_count < 1)
FAIL_IF(core_busy_loop());
ebb_global_disable();
event_close(&event);
FAIL_IF(ebb_state.stats.ebb_count == 0);
/* The real test is here, do we take a SIGILL when writing PMU regs now
* that we have closed the event. We expect that we will. */
FAIL_IF(catch_sigill(write_pmc1));
/* We should still be able to read EBB regs though */
mfspr(SPRN_EBBHR);
mfspr(SPRN_EBBRR);
mfspr(SPRN_BESCR);
return 0;
}
int main(void)
{
return test_harness(close_clears_pmcc, "close_clears_pmcc");
}
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
* Licensed under GPLv2.
*/
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include "ebb.h"
/*
* Tests a pinned cpu event vs an EBB - in that order. The pinned cpu event
* should remain and the EBB event should fail to enable.
*/
static int setup_cpu_event(struct event *event, int cpu)
{
event_init_named(event, 0x400FA, "PM_RUN_INST_CMPL");
event->attr.pinned = 1;
event->attr.exclude_kernel = 1;
event->attr.exclude_hv = 1;
event->attr.exclude_idle = 1;
SKIP_IF(require_paranoia_below(1));
FAIL_IF(event_open_with_cpu(event, cpu));
FAIL_IF(event_enable(event));
return 0;
}
int cpu_event_pinned_vs_ebb(void)
{
union pipe read_pipe, write_pipe;
struct event event;
int cpu, rc;
pid_t pid;
cpu = pick_online_cpu();
FAIL_IF(cpu < 0);
FAIL_IF(bind_to_cpu(cpu));
FAIL_IF(pipe(read_pipe.fds) == -1);
FAIL_IF(pipe(write_pipe.fds) == -1);
pid = fork();
if (pid == 0) {
/* NB order of pipes looks reversed */
exit(ebb_child(write_pipe, read_pipe));
}
/* We setup the cpu event first */
rc = setup_cpu_event(&event, cpu);
if (rc) {
kill_child_and_wait(pid);
return rc;
}
/* Signal the child to install its EBB event and wait */
if (sync_with_child(read_pipe, write_pipe))
/* If it fails, wait for it to exit */
goto wait;
/* Signal the child to run */
FAIL_IF(sync_with_child(read_pipe, write_pipe));
wait:
/* We expect it to fail to read the event */
FAIL_IF(wait_for_child(pid) != 2);
FAIL_IF(event_disable(&event));
FAIL_IF(event_read(&event));
event_report(&event);
/* The cpu event should have run */
FAIL_IF(event.result.value == 0);
FAIL_IF(event.result.enabled != event.result.running);
return 0;
}
int main(void)
{
return test_harness(cpu_event_pinned_vs_ebb, "cpu_event_pinned_vs_ebb");
}
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
* Licensed under GPLv2.
*/
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include "ebb.h"
/*
* Tests a cpu event vs an EBB - in that order. The EBB should force the cpu
* event off the PMU.
*/
static int setup_cpu_event(struct event *event, int cpu)
{
event_init_named(event, 0x400FA, "PM_RUN_INST_CMPL");
event->attr.exclude_kernel = 1;
event->attr.exclude_hv = 1;
event->attr.exclude_idle = 1;
SKIP_IF(require_paranoia_below(1));
FAIL_IF(event_open_with_cpu(event, cpu));
FAIL_IF(event_enable(event));
return 0;
}
int cpu_event_vs_ebb(void)
{
union pipe read_pipe, write_pipe;
struct event event;
int cpu, rc;
pid_t pid;
cpu = pick_online_cpu();
FAIL_IF(cpu < 0);
FAIL_IF(bind_to_cpu(cpu));
FAIL_IF(pipe(read_pipe.fds) == -1);
FAIL_IF(pipe(write_pipe.fds) == -1);
pid = fork();
if (pid == 0) {
/* NB order of pipes looks reversed */
exit(ebb_child(write_pipe, read_pipe));
}
/* We setup the cpu event first */
rc = setup_cpu_event(&event, cpu);
if (rc) {
kill_child_and_wait(pid);
return rc;
}
/* Signal the child to install its EBB event and wait */
if (sync_with_child(read_pipe, write_pipe))
/* If it fails, wait for it to exit */
goto wait;
/* Signal the child to run */
FAIL_IF(sync_with_child(read_pipe, write_pipe));
wait:
/* We expect the child to succeed */
FAIL_IF(wait_for_child(pid));
FAIL_IF(event_disable(&event));
FAIL_IF(event_read(&event));
event_report(&event);
/* The cpu event may have run */
return 0;
}
int main(void)
{
return test_harness(cpu_event_vs_ebb, "cpu_event_vs_ebb");
}
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
* Licensed under GPLv2.
*/
#include <stdio.h>
#include <stdlib.h>
#include "ebb.h"
/*
* Basic test that counts user cycles and takes EBBs.
*/
int cycles(void)
{
struct event event;
event_init_named(&event, 0x1001e, "cycles");
event_leader_ebb_init(&event);
event.attr.exclude_kernel = 1;
event.attr.exclude_hv = 1;
event.attr.exclude_idle = 1;
FAIL_IF(event_open(&event));
ebb_enable_pmc_counting(1);
setup_ebb_handler(standard_ebb_callee);
ebb_global_enable();
FAIL_IF(ebb_event_enable(&event));
mtspr(SPRN_PMC1, pmc_sample_period(sample_period));
while (ebb_state.stats.ebb_count < 10) {
FAIL_IF(core_busy_loop());
FAIL_IF(ebb_check_mmcr0());
}
ebb_global_disable();
ebb_freeze_pmcs();
count_pmc(1, sample_period);
dump_ebb_state();
event_close(&event);
FAIL_IF(ebb_state.stats.ebb_count == 0);
FAIL_IF(!ebb_check_count(1, sample_period, 100));
return 0;
}
int main(void)
{
return test_harness(cycles, "cycles");
}
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
* Licensed under GPLv2.
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include "ebb.h"
/*
* Test of counting cycles while using MMCR0_FC (freeze counters) to only count
* parts of the code. This is complicated by the fact that FC is set by the
* hardware when the event overflows. We may take the EBB after we have set FC,
* so we have to be careful about whether we clear FC at the end of the EBB
* handler or not.
*/
static bool counters_frozen = false;
static int ebbs_while_frozen = 0;
static void ebb_callee(void)
{
uint64_t mask, val;
mask = MMCR0_PMAO | MMCR0_FC;
val = mfspr(SPRN_BESCR);
if (!(val & BESCR_PMEO)) {
ebb_state.stats.spurious++;
goto out;
}
ebb_state.stats.ebb_count++;
trace_log_counter(ebb_state.trace, ebb_state.stats.ebb_count);
val = mfspr(SPRN_MMCR0);
trace_log_reg(ebb_state.trace, SPRN_MMCR0, val);
if (counters_frozen) {
trace_log_string(ebb_state.trace, "frozen");
ebbs_while_frozen++;
mask &= ~MMCR0_FC;
}
count_pmc(1, sample_period);
out:
reset_ebb_with_clear_mask(mask);
}
int cycles_with_freeze(void)
{
struct event event;
uint64_t val;
bool fc_cleared;
event_init_named(&event, 0x1001e, "cycles");
event_leader_ebb_init(&event);
event.attr.exclude_kernel = 1;
event.attr.exclude_hv = 1;
event.attr.exclude_idle = 1;
FAIL_IF(event_open(&event));
setup_ebb_handler(ebb_callee);
ebb_global_enable();
FAIL_IF(ebb_event_enable(&event));
mtspr(SPRN_PMC1, pmc_sample_period(sample_period));
fc_cleared = false;
/* Make sure we loop until we take at least one EBB */
while ((ebb_state.stats.ebb_count < 20 && !fc_cleared) ||
ebb_state.stats.ebb_count < 1)
{
counters_frozen = false;
mb();
mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~MMCR0_FC);
FAIL_IF(core_busy_loop());
counters_frozen = true;
mb();
mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) | MMCR0_FC);
val = mfspr(SPRN_MMCR0);
if (! (val & MMCR0_FC)) {
printf("Outside of loop, FC NOT set MMCR0 0x%lx\n", val);
fc_cleared = true;
}
}
ebb_global_disable();
ebb_freeze_pmcs();
count_pmc(1, sample_period);
dump_ebb_state();
printf("EBBs while frozen %d\n", ebbs_while_frozen);
event_close(&event);
FAIL_IF(ebb_state.stats.ebb_count == 0);
FAIL_IF(fc_cleared);
return 0;
}
int main(void)
{
return test_harness(cycles_with_freeze, "cycles_with_freeze");
}
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册