提交 b69f9e17 编写于 作者: L Linus Torvalds

Merge tag 'powerpc-4.20-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:
 "Some things that I missed due to travel, or that came in late.

  Two fixes also going to stable:

   - A revert of a buggy change to the 8xx TLB miss handlers.

   - Our flushing of SPE (Signal Processing Engine) registers on fork
     was broken.

  Other changes:

   - A change to the KVM decrementer emulation to use proper APIs.

   - Some cleanups to the way we do code patching in the 8xx code.

   - Expose the maximum possible memory for the system in
     /proc/powerpc/lparcfg.

   - Merge some updates from Scott: "a couple device tree updates, and a
     fix for a missing prototype warning"

  A few other minor fixes and a handful of fixes for our selftests.

  Thanks to: Aravinda Prasad, Breno Leitao, Camelia Groza, Christophe
  Leroy, Felipe Rechia, Joel Stanley, Naveen N. Rao, Paul Mackerras,
  Scott Wood, Tyrel Datwyler"

* tag 'powerpc-4.20-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (21 commits)
  selftests/powerpc: Fix compilation issue due to asm label
  selftests/powerpc/cache_shape: Fix out-of-tree build
  selftests/powerpc/switch_endian: Fix out-of-tree build
  selftests/powerpc/pmu: Link ebb tests with -no-pie
  selftests/powerpc/signal: Fix out-of-tree build
  selftests/powerpc/ptrace: Fix out-of-tree build
  powerpc/xmon: Relax frame size for clang
  selftests: powerpc: Fix warning for security subdir
  selftests/powerpc: Relax L1d miss targets for rfi_flush test
  powerpc/process: Fix flush_all_to_thread for SPE
  powerpc/pseries: add missing cpumask.h include file
  selftests/powerpc: Fix ptrace tm failure
  KVM: PPC: Use exported tb_to_ns() function in decrementer emulation
  powerpc/pseries: Export maximum memory value
  powerpc/8xx: Use patch_site for perf counters setup
  powerpc/8xx: Use patch_site for memory setup patching
  powerpc/code-patching: Add a helper to get the address of a patch_site
  Revert "powerpc/8xx: Use L1 entry APG to handle _PAGE_ACCESSED for CONFIG_SWAP"
  powerpc/8xx: add missing header in 8xx_mmu.c
  powerpc/8xx: Add DT node for using the SEC engine of the MPC885
  ...
...@@ -77,12 +77,12 @@ ...@@ -77,12 +77,12 @@
}; };
ethernet@f0000 { ethernet@f0000 {
phy-handle = <&xg_cs4315_phy1>; phy-handle = <&xg_cs4315_phy2>;
phy-connection-type = "xgmii"; phy-connection-type = "xgmii";
}; };
ethernet@f2000 { ethernet@f2000 {
phy-handle = <&xg_cs4315_phy2>; phy-handle = <&xg_cs4315_phy1>;
phy-connection-type = "xgmii"; phy-connection-type = "xgmii";
}; };
......
...@@ -72,7 +72,7 @@ ...@@ -72,7 +72,7 @@
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
device_type = "soc"; device_type = "soc";
ranges = <0x0 0xff000000 0x4000>; ranges = <0x0 0xff000000 0x28000>;
bus-frequency = <0>; bus-frequency = <0>;
// Temporary -- will go away once kernel uses ranges for get_immrbase(). // Temporary -- will go away once kernel uses ranges for get_immrbase().
...@@ -224,6 +224,17 @@ ...@@ -224,6 +224,17 @@
#size-cells = <0>; #size-cells = <0>;
}; };
}; };
crypto@20000 {
compatible = "fsl,sec1.2", "fsl,sec1.0";
reg = <0x20000 0x8000>;
interrupts = <1 1>;
interrupt-parent = <&PIC>;
fsl,num-channels = <1>;
fsl,channel-fifo-len = <24>;
fsl,exec-units-mask = <0x4c>;
fsl,descriptor-types-mask = <0x05000154>;
};
}; };
chosen { chosen {
......
...@@ -36,6 +36,11 @@ int raw_patch_instruction(unsigned int *addr, unsigned int instr); ...@@ -36,6 +36,11 @@ int raw_patch_instruction(unsigned int *addr, unsigned int instr);
int patch_instruction_site(s32 *addr, unsigned int instr); int patch_instruction_site(s32 *addr, unsigned int instr);
int patch_branch_site(s32 *site, unsigned long target, int flags); int patch_branch_site(s32 *site, unsigned long target, int flags);
static inline unsigned long patch_site_addr(s32 *site)
{
return (unsigned long)site + *site;
}
int instr_is_relative_branch(unsigned int instr); int instr_is_relative_branch(unsigned int instr);
int instr_is_relative_link_branch(unsigned int instr); int instr_is_relative_link_branch(unsigned int instr);
int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr); int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr);
......
...@@ -34,20 +34,12 @@ ...@@ -34,20 +34,12 @@
* respectively NA for All or X for Supervisor and no access for User. * respectively NA for All or X for Supervisor and no access for User.
* Then we use the APG to say whether accesses are according to Page rules or * Then we use the APG to say whether accesses are according to Page rules or
* "all Supervisor" rules (Access to all) * "all Supervisor" rules (Access to all)
* We also use the 2nd APG bit for _PAGE_ACCESSED when having SWAP: * Therefore, we define 2 APG groups. lsb is _PMD_USER
* When that bit is not set access is done iaw "all user" * 0 => No user => 01 (all accesses performed according to page definition)
* which means no access iaw page rules. * 1 => User => 00 (all accesses performed as supervisor iaw page definition)
* Therefore, we define 4 APG groups. lsb is _PMD_USER, 2nd is _PAGE_ACCESSED
* 0x => No access => 11 (all accesses performed as user iaw page definition)
* 10 => No user => 01 (all accesses performed according to page definition)
* 11 => User => 00 (all accesses performed as supervisor iaw page definition)
* We define all 16 groups so that all other bits of APG can take any value * We define all 16 groups so that all other bits of APG can take any value
*/ */
#ifdef CONFIG_SWAP
#define MI_APG_INIT 0xf4f4f4f4
#else
#define MI_APG_INIT 0x44444444 #define MI_APG_INIT 0x44444444
#endif
/* The effective page number register. When read, contains the information /* The effective page number register. When read, contains the information
* about the last instruction TLB miss. When MI_RPN is written, bits in * about the last instruction TLB miss. When MI_RPN is written, bits in
...@@ -115,20 +107,12 @@ ...@@ -115,20 +107,12 @@
* Supervisor and no access for user and NA for ALL. * Supervisor and no access for user and NA for ALL.
* Then we use the APG to say whether accesses are according to Page rules or * Then we use the APG to say whether accesses are according to Page rules or
* "all Supervisor" rules (Access to all) * "all Supervisor" rules (Access to all)
* We also use the 2nd APG bit for _PAGE_ACCESSED when having SWAP: * Therefore, we define 2 APG groups. lsb is _PMD_USER
* When that bit is not set access is done iaw "all user" * 0 => No user => 01 (all accesses performed according to page definition)
* which means no access iaw page rules. * 1 => User => 00 (all accesses performed as supervisor iaw page definition)
* Therefore, we define 4 APG groups. lsb is _PMD_USER, 2nd is _PAGE_ACCESSED
* 0x => No access => 11 (all accesses performed as user iaw page definition)
* 10 => No user => 01 (all accesses performed according to page definition)
* 11 => User => 00 (all accesses performed as supervisor iaw page definition)
* We define all 16 groups so that all other bits of APG can take any value * We define all 16 groups so that all other bits of APG can take any value
*/ */
#ifdef CONFIG_SWAP
#define MD_APG_INIT 0xf4f4f4f4
#else
#define MD_APG_INIT 0x44444444 #define MD_APG_INIT 0x44444444
#endif
/* The effective page number register. When read, contains the information /* The effective page number register. When read, contains the information
* about the last instruction TLB miss. When MD_RPN is written, bits in * about the last instruction TLB miss. When MD_RPN is written, bits in
...@@ -180,12 +164,6 @@ ...@@ -180,12 +164,6 @@
*/ */
#define SPRN_M_TW 799 #define SPRN_M_TW 799
/* APGs */
#define M_APG0 0x00000000
#define M_APG1 0x00000020
#define M_APG2 0x00000040
#define M_APG3 0x00000060
#ifdef CONFIG_PPC_MM_SLICES #ifdef CONFIG_PPC_MM_SLICES
#include <asm/nohash/32/slice.h> #include <asm/nohash/32/slice.h>
#define SLICE_ARRAY_SIZE (1 << (32 - SLICE_LOW_SHIFT - 1)) #define SLICE_ARRAY_SIZE (1 << (32 - SLICE_LOW_SHIFT - 1))
...@@ -251,6 +229,15 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) ...@@ -251,6 +229,15 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
BUG(); BUG();
} }
/* patch sites */
extern s32 patch__itlbmiss_linmem_top;
extern s32 patch__dtlbmiss_linmem_top, patch__dtlbmiss_immr_jmp;
extern s32 patch__fixupdar_linmem_top;
extern s32 patch__itlbmiss_exit_1, patch__itlbmiss_exit_2;
extern s32 patch__dtlbmiss_exit_1, patch__dtlbmiss_exit_2, patch__dtlbmiss_exit_3;
extern s32 patch__itlbmiss_perf, patch__dtlbmiss_perf;
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#if defined(CONFIG_PPC_4K_PAGES) #if defined(CONFIG_PPC_4K_PAGES)
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <asm/page.h> #include <asm/page.h>
#include <linux/time.h> #include <linux/time.h>
#include <linux/cpumask.h>
/* /*
* Definitions for talking to the RTAS on CHRP machines. * Definitions for talking to the RTAS on CHRP machines.
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/export.h> #include <asm/export.h>
#include <asm/code-patching-asm.h>
#if CONFIG_TASK_SIZE <= 0x80000000 && CONFIG_PAGE_OFFSET >= 0x80000000 #if CONFIG_TASK_SIZE <= 0x80000000 && CONFIG_PAGE_OFFSET >= 0x80000000
/* By simply checking Address >= 0x80000000, we know if its a kernel address */ /* By simply checking Address >= 0x80000000, we know if its a kernel address */
...@@ -318,8 +319,8 @@ InstructionTLBMiss: ...@@ -318,8 +319,8 @@ InstructionTLBMiss:
cmpli cr0, r11, PAGE_OFFSET@h cmpli cr0, r11, PAGE_OFFSET@h
#ifndef CONFIG_PIN_TLB_TEXT #ifndef CONFIG_PIN_TLB_TEXT
/* It is assumed that kernel code fits into the first 8M page */ /* It is assumed that kernel code fits into the first 8M page */
_ENTRY(ITLBMiss_cmp) 0: cmpli cr7, r11, (PAGE_OFFSET + 0x0800000)@h
cmpli cr7, r11, (PAGE_OFFSET + 0x0800000)@h patch_site 0b, patch__itlbmiss_linmem_top
#endif #endif
#endif #endif
#endif #endif
...@@ -353,13 +354,14 @@ _ENTRY(ITLBMiss_cmp) ...@@ -353,13 +354,14 @@ _ENTRY(ITLBMiss_cmp)
#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE) #if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE)
mtcr r12 mtcr r12
#endif #endif
#ifdef CONFIG_SWAP
rlwinm r11, r10, 31, _PAGE_ACCESSED >> 1
#endif
/* Load the MI_TWC with the attributes for this "segment." */ /* Load the MI_TWC with the attributes for this "segment." */
mtspr SPRN_MI_TWC, r11 /* Set segment attributes */ mtspr SPRN_MI_TWC, r11 /* Set segment attributes */
#ifdef CONFIG_SWAP
rlwinm r11, r10, 32-5, _PAGE_PRESENT
and r11, r11, r10
rlwimi r10, r11, 0, _PAGE_PRESENT
#endif
li r11, RPN_PATTERN | 0x200 li r11, RPN_PATTERN | 0x200
/* The Linux PTE won't go exactly into the MMU TLB. /* The Linux PTE won't go exactly into the MMU TLB.
* Software indicator bits 20 and 23 must be clear. * Software indicator bits 20 and 23 must be clear.
...@@ -372,16 +374,17 @@ _ENTRY(ITLBMiss_cmp) ...@@ -372,16 +374,17 @@ _ENTRY(ITLBMiss_cmp)
mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
/* Restore registers */ /* Restore registers */
_ENTRY(itlb_miss_exit_1) 0: mfspr r10, SPRN_SPRG_SCRATCH0
mfspr r10, SPRN_SPRG_SCRATCH0
mfspr r11, SPRN_SPRG_SCRATCH1 mfspr r11, SPRN_SPRG_SCRATCH1
#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE) #if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE)
mfspr r12, SPRN_SPRG_SCRATCH2 mfspr r12, SPRN_SPRG_SCRATCH2
#endif #endif
rfi rfi
patch_site 0b, patch__itlbmiss_exit_1
#ifdef CONFIG_PERF_EVENTS #ifdef CONFIG_PERF_EVENTS
_ENTRY(itlb_miss_perf) patch_site 0f, patch__itlbmiss_perf
lis r10, (itlb_miss_counter - PAGE_OFFSET)@ha 0: lis r10, (itlb_miss_counter - PAGE_OFFSET)@ha
lwz r11, (itlb_miss_counter - PAGE_OFFSET)@l(r10) lwz r11, (itlb_miss_counter - PAGE_OFFSET)@l(r10)
addi r11, r11, 1 addi r11, r11, 1
stw r11, (itlb_miss_counter - PAGE_OFFSET)@l(r10) stw r11, (itlb_miss_counter - PAGE_OFFSET)@l(r10)
...@@ -435,11 +438,11 @@ DataStoreTLBMiss: ...@@ -435,11 +438,11 @@ DataStoreTLBMiss:
#ifndef CONFIG_PIN_TLB_IMMR #ifndef CONFIG_PIN_TLB_IMMR
cmpli cr0, r11, VIRT_IMMR_BASE@h cmpli cr0, r11, VIRT_IMMR_BASE@h
#endif #endif
_ENTRY(DTLBMiss_cmp) 0: cmpli cr7, r11, (PAGE_OFFSET + 0x1800000)@h
cmpli cr7, r11, (PAGE_OFFSET + 0x1800000)@h patch_site 0b, patch__dtlbmiss_linmem_top
#ifndef CONFIG_PIN_TLB_IMMR #ifndef CONFIG_PIN_TLB_IMMR
_ENTRY(DTLBMiss_jmp) 0: beq- DTLBMissIMMR
beq- DTLBMissIMMR patch_site 0b, patch__dtlbmiss_immr_jmp
#endif #endif
blt cr7, DTLBMissLinear blt cr7, DTLBMissLinear
lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha
...@@ -470,14 +473,22 @@ _ENTRY(DTLBMiss_jmp) ...@@ -470,14 +473,22 @@ _ENTRY(DTLBMiss_jmp)
* above. * above.
*/ */
rlwimi r11, r10, 0, _PAGE_GUARDED rlwimi r11, r10, 0, _PAGE_GUARDED
#ifdef CONFIG_SWAP
/* _PAGE_ACCESSED has to be set. We use second APG bit for that, 0
* on that bit will represent a Non Access group
*/
rlwinm r11, r10, 31, _PAGE_ACCESSED >> 1
#endif
mtspr SPRN_MD_TWC, r11 mtspr SPRN_MD_TWC, r11
/* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set.
* We also need to know if the insn is a load/store, so:
* Clear _PAGE_PRESENT and load that which will
* trap into DTLB Error with store bit set accordinly.
*/
/* PRESENT=0x1, ACCESSED=0x20
* r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5));
* r10 = (r10 & ~PRESENT) | r11;
*/
#ifdef CONFIG_SWAP
rlwinm r11, r10, 32-5, _PAGE_PRESENT
and r11, r11, r10
rlwimi r10, r11, 0, _PAGE_PRESENT
#endif
/* The Linux PTE won't go exactly into the MMU TLB. /* The Linux PTE won't go exactly into the MMU TLB.
* Software indicator bits 24, 25, 26, and 27 must be * Software indicator bits 24, 25, 26, and 27 must be
* set. All other Linux PTE bits control the behavior * set. All other Linux PTE bits control the behavior
...@@ -489,14 +500,16 @@ _ENTRY(DTLBMiss_jmp) ...@@ -489,14 +500,16 @@ _ENTRY(DTLBMiss_jmp)
/* Restore registers */ /* Restore registers */
mtspr SPRN_DAR, r11 /* Tag DAR */ mtspr SPRN_DAR, r11 /* Tag DAR */
_ENTRY(dtlb_miss_exit_1)
mfspr r10, SPRN_SPRG_SCRATCH0 0: mfspr r10, SPRN_SPRG_SCRATCH0
mfspr r11, SPRN_SPRG_SCRATCH1 mfspr r11, SPRN_SPRG_SCRATCH1
mfspr r12, SPRN_SPRG_SCRATCH2 mfspr r12, SPRN_SPRG_SCRATCH2
rfi rfi
patch_site 0b, patch__dtlbmiss_exit_1
#ifdef CONFIG_PERF_EVENTS #ifdef CONFIG_PERF_EVENTS
_ENTRY(dtlb_miss_perf) patch_site 0f, patch__dtlbmiss_perf
lis r10, (dtlb_miss_counter - PAGE_OFFSET)@ha 0: lis r10, (dtlb_miss_counter - PAGE_OFFSET)@ha
lwz r11, (dtlb_miss_counter - PAGE_OFFSET)@l(r10) lwz r11, (dtlb_miss_counter - PAGE_OFFSET)@l(r10)
addi r11, r11, 1 addi r11, r11, 1
stw r11, (dtlb_miss_counter - PAGE_OFFSET)@l(r10) stw r11, (dtlb_miss_counter - PAGE_OFFSET)@l(r10)
...@@ -637,8 +650,8 @@ InstructionBreakpoint: ...@@ -637,8 +650,8 @@ InstructionBreakpoint:
*/ */
DTLBMissIMMR: DTLBMissIMMR:
mtcr r12 mtcr r12
/* Set 512k byte guarded page and mark it valid and accessed */ /* Set 512k byte guarded page and mark it valid */
li r10, MD_PS512K | MD_GUARDED | MD_SVALID | M_APG2 li r10, MD_PS512K | MD_GUARDED | MD_SVALID
mtspr SPRN_MD_TWC, r10 mtspr SPRN_MD_TWC, r10
mfspr r10, SPRN_IMMR /* Get current IMMR */ mfspr r10, SPRN_IMMR /* Get current IMMR */
rlwinm r10, r10, 0, 0xfff80000 /* Get 512 kbytes boundary */ rlwinm r10, r10, 0, 0xfff80000 /* Get 512 kbytes boundary */
...@@ -648,16 +661,17 @@ DTLBMissIMMR: ...@@ -648,16 +661,17 @@ DTLBMissIMMR:
li r11, RPN_PATTERN li r11, RPN_PATTERN
mtspr SPRN_DAR, r11 /* Tag DAR */ mtspr SPRN_DAR, r11 /* Tag DAR */
_ENTRY(dtlb_miss_exit_2)
mfspr r10, SPRN_SPRG_SCRATCH0 0: mfspr r10, SPRN_SPRG_SCRATCH0
mfspr r11, SPRN_SPRG_SCRATCH1 mfspr r11, SPRN_SPRG_SCRATCH1
mfspr r12, SPRN_SPRG_SCRATCH2 mfspr r12, SPRN_SPRG_SCRATCH2
rfi rfi
patch_site 0b, patch__dtlbmiss_exit_2
DTLBMissLinear: DTLBMissLinear:
mtcr r12 mtcr r12
/* Set 8M byte page and mark it valid and accessed */ /* Set 8M byte page and mark it valid */
li r11, MD_PS8MEG | MD_SVALID | M_APG2 li r11, MD_PS8MEG | MD_SVALID
mtspr SPRN_MD_TWC, r11 mtspr SPRN_MD_TWC, r11
rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */ rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */
ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY | \ ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY | \
...@@ -666,28 +680,29 @@ DTLBMissLinear: ...@@ -666,28 +680,29 @@ DTLBMissLinear:
li r11, RPN_PATTERN li r11, RPN_PATTERN
mtspr SPRN_DAR, r11 /* Tag DAR */ mtspr SPRN_DAR, r11 /* Tag DAR */
_ENTRY(dtlb_miss_exit_3)
mfspr r10, SPRN_SPRG_SCRATCH0 0: mfspr r10, SPRN_SPRG_SCRATCH0
mfspr r11, SPRN_SPRG_SCRATCH1 mfspr r11, SPRN_SPRG_SCRATCH1
mfspr r12, SPRN_SPRG_SCRATCH2 mfspr r12, SPRN_SPRG_SCRATCH2
rfi rfi
patch_site 0b, patch__dtlbmiss_exit_3
#ifndef CONFIG_PIN_TLB_TEXT #ifndef CONFIG_PIN_TLB_TEXT
ITLBMissLinear: ITLBMissLinear:
mtcr r12 mtcr r12
/* Set 8M byte page and mark it valid,accessed */ /* Set 8M byte page and mark it valid */
li r11, MI_PS8MEG | MI_SVALID | M_APG2 li r11, MI_PS8MEG | MI_SVALID
mtspr SPRN_MI_TWC, r11 mtspr SPRN_MI_TWC, r11
rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */ rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */
ori r10, r10, 0xf0 | MI_SPS16K | _PAGE_SH | _PAGE_DIRTY | \ ori r10, r10, 0xf0 | MI_SPS16K | _PAGE_SH | _PAGE_DIRTY | \
_PAGE_PRESENT _PAGE_PRESENT
mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
_ENTRY(itlb_miss_exit_2) 0: mfspr r10, SPRN_SPRG_SCRATCH0
mfspr r10, SPRN_SPRG_SCRATCH0
mfspr r11, SPRN_SPRG_SCRATCH1 mfspr r11, SPRN_SPRG_SCRATCH1
mfspr r12, SPRN_SPRG_SCRATCH2 mfspr r12, SPRN_SPRG_SCRATCH2
rfi rfi
patch_site 0b, patch__itlbmiss_exit_2
#endif #endif
/* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions /* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions
...@@ -705,8 +720,10 @@ FixupDAR:/* Entry point for dcbx workaround. */ ...@@ -705,8 +720,10 @@ FixupDAR:/* Entry point for dcbx workaround. */
mfspr r11, SPRN_M_TW /* Get level 1 table */ mfspr r11, SPRN_M_TW /* Get level 1 table */
blt+ 3f blt+ 3f
rlwinm r11, r10, 16, 0xfff8 rlwinm r11, r10, 16, 0xfff8
_ENTRY(FixupDAR_cmp)
cmpli cr7, r11, (PAGE_OFFSET + 0x1800000)@h 0: cmpli cr7, r11, (PAGE_OFFSET + 0x1800000)@h
patch_site 0b, patch__fixupdar_linmem_top
/* create physical page address from effective address */ /* create physical page address from effective address */
tophys(r11, r10) tophys(r11, r10)
blt- cr7, 201f blt- cr7, 201f
...@@ -960,7 +977,7 @@ initial_mmu: ...@@ -960,7 +977,7 @@ initial_mmu:
ori r8, r8, MI_EVALID /* Mark it valid */ ori r8, r8, MI_EVALID /* Mark it valid */
mtspr SPRN_MI_EPN, r8 mtspr SPRN_MI_EPN, r8
li r8, MI_PS8MEG /* Set 8M byte page */ li r8, MI_PS8MEG /* Set 8M byte page */
ori r8, r8, MI_SVALID | M_APG2 /* Make it valid, APG 2 */ ori r8, r8, MI_SVALID /* Make it valid */
mtspr SPRN_MI_TWC, r8 mtspr SPRN_MI_TWC, r8
li r8, MI_BOOTINIT /* Create RPN for address 0 */ li r8, MI_BOOTINIT /* Create RPN for address 0 */
mtspr SPRN_MI_RPN, r8 /* Store TLB entry */ mtspr SPRN_MI_RPN, r8 /* Store TLB entry */
...@@ -987,7 +1004,7 @@ initial_mmu: ...@@ -987,7 +1004,7 @@ initial_mmu:
ori r8, r8, MD_EVALID /* Mark it valid */ ori r8, r8, MD_EVALID /* Mark it valid */
mtspr SPRN_MD_EPN, r8 mtspr SPRN_MD_EPN, r8
li r8, MD_PS512K | MD_GUARDED /* Set 512k byte page */ li r8, MD_PS512K | MD_GUARDED /* Set 512k byte page */
ori r8, r8, MD_SVALID | M_APG2 /* Make it valid and accessed */ ori r8, r8, MD_SVALID /* Make it valid */
mtspr SPRN_MD_TWC, r8 mtspr SPRN_MD_TWC, r8
mr r8, r9 /* Create paddr for TLB */ mr r8, r9 /* Create paddr for TLB */
ori r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */ ori r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */
......
...@@ -590,12 +590,11 @@ void flush_all_to_thread(struct task_struct *tsk) ...@@ -590,12 +590,11 @@ void flush_all_to_thread(struct task_struct *tsk)
if (tsk->thread.regs) { if (tsk->thread.regs) {
preempt_disable(); preempt_disable();
BUG_ON(tsk != current); BUG_ON(tsk != current);
save_all(tsk);
#ifdef CONFIG_SPE #ifdef CONFIG_SPE
if (tsk->thread.regs->msr & MSR_SPE) if (tsk->thread.regs->msr & MSR_SPE)
tsk->thread.spefscr = mfspr(SPRN_SPEFSCR); tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
#endif #endif
save_all(tsk);
preempt_enable(); preempt_enable();
} }
......
...@@ -2337,8 +2337,7 @@ static void kvmppc_set_timer(struct kvm_vcpu *vcpu) ...@@ -2337,8 +2337,7 @@ static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
kvmppc_core_prepare_to_enter(vcpu); kvmppc_core_prepare_to_enter(vcpu);
return; return;
} }
dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC dec_nsec = tb_to_ns(vcpu->arch.dec_expires - now);
/ tb_ticks_per_sec;
hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL); hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL);
vcpu->arch.timer_running = 1; vcpu->arch.timer_running = 1;
} }
......
...@@ -61,11 +61,10 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) ...@@ -61,11 +61,10 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
dec_time = vcpu->arch.dec; dec_time = vcpu->arch.dec;
/* /*
* Guest timebase ticks at the same frequency as host decrementer. * Guest timebase ticks at the same frequency as host timebase.
* So use the host decrementer calculations for decrementer emulation. * So use the host timebase calculations for decrementer emulation.
*/ */
dec_time = dec_time << decrementer_clockevent.shift; dec_time = tb_to_ns(dec_time);
do_div(dec_time, decrementer_clockevent.mult);
dec_nsec = do_div(dec_time, NSEC_PER_SEC); dec_nsec = do_div(dec_time, NSEC_PER_SEC);
hrtimer_start(&vcpu->arch.dec_timer, hrtimer_start(&vcpu->arch.dec_timer,
ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL); ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL);
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
*/ */
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/mmu_context.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
#include <asm/code-patching.h> #include <asm/code-patching.h>
...@@ -79,7 +80,7 @@ void __init MMU_init_hw(void) ...@@ -79,7 +80,7 @@ void __init MMU_init_hw(void)
for (; i < 32 && mem >= LARGE_PAGE_SIZE_8M; i++) { for (; i < 32 && mem >= LARGE_PAGE_SIZE_8M; i++) {
mtspr(SPRN_MD_CTR, ctr | (i << 8)); mtspr(SPRN_MD_CTR, ctr | (i << 8));
mtspr(SPRN_MD_EPN, (unsigned long)__va(addr) | MD_EVALID); mtspr(SPRN_MD_EPN, (unsigned long)__va(addr) | MD_EVALID);
mtspr(SPRN_MD_TWC, MD_PS8MEG | MD_SVALID | M_APG2); mtspr(SPRN_MD_TWC, MD_PS8MEG | MD_SVALID);
mtspr(SPRN_MD_RPN, addr | flags | _PAGE_PRESENT); mtspr(SPRN_MD_RPN, addr | flags | _PAGE_PRESENT);
addr += LARGE_PAGE_SIZE_8M; addr += LARGE_PAGE_SIZE_8M;
mem -= LARGE_PAGE_SIZE_8M; mem -= LARGE_PAGE_SIZE_8M;
...@@ -97,22 +98,13 @@ static void __init mmu_mapin_immr(void) ...@@ -97,22 +98,13 @@ static void __init mmu_mapin_immr(void)
map_kernel_page(v + offset, p + offset, PAGE_KERNEL_NCG); map_kernel_page(v + offset, p + offset, PAGE_KERNEL_NCG);
} }
/* Address of instructions to patch */ static void __init mmu_patch_cmp_limit(s32 *site, unsigned long mapped)
#ifndef CONFIG_PIN_TLB_IMMR
extern unsigned int DTLBMiss_jmp;
#endif
extern unsigned int DTLBMiss_cmp, FixupDAR_cmp;
#ifndef CONFIG_PIN_TLB_TEXT
extern unsigned int ITLBMiss_cmp;
#endif
static void __init mmu_patch_cmp_limit(unsigned int *addr, unsigned long mapped)
{ {
unsigned int instr = *addr; unsigned int instr = *(unsigned int *)patch_site_addr(site);
instr &= 0xffff0000; instr &= 0xffff0000;
instr |= (unsigned long)__va(mapped) >> 16; instr |= (unsigned long)__va(mapped) >> 16;
patch_instruction(addr, instr); patch_instruction_site(site, instr);
} }
unsigned long __init mmu_mapin_ram(unsigned long top) unsigned long __init mmu_mapin_ram(unsigned long top)
...@@ -123,17 +115,17 @@ unsigned long __init mmu_mapin_ram(unsigned long top) ...@@ -123,17 +115,17 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
mapped = 0; mapped = 0;
mmu_mapin_immr(); mmu_mapin_immr();
#ifndef CONFIG_PIN_TLB_IMMR #ifndef CONFIG_PIN_TLB_IMMR
patch_instruction(&DTLBMiss_jmp, PPC_INST_NOP); patch_instruction_site(&patch__dtlbmiss_immr_jmp, PPC_INST_NOP);
#endif #endif
#ifndef CONFIG_PIN_TLB_TEXT #ifndef CONFIG_PIN_TLB_TEXT
mmu_patch_cmp_limit(&ITLBMiss_cmp, 0); mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, 0);
#endif #endif
} else { } else {
mapped = top & ~(LARGE_PAGE_SIZE_8M - 1); mapped = top & ~(LARGE_PAGE_SIZE_8M - 1);
} }
mmu_patch_cmp_limit(&DTLBMiss_cmp, mapped); mmu_patch_cmp_limit(&patch__dtlbmiss_linmem_top, mapped);
mmu_patch_cmp_limit(&FixupDAR_cmp, mapped); mmu_patch_cmp_limit(&patch__fixupdar_linmem_top, mapped);
/* If the size of RAM is not an exact power of two, we may not /* If the size of RAM is not an exact power of two, we may not
* have covered RAM in its entirety with 8 MiB * have covered RAM in its entirety with 8 MiB
......
...@@ -31,9 +31,6 @@ ...@@ -31,9 +31,6 @@
extern unsigned long itlb_miss_counter, dtlb_miss_counter; extern unsigned long itlb_miss_counter, dtlb_miss_counter;
extern atomic_t instruction_counter; extern atomic_t instruction_counter;
extern unsigned int itlb_miss_perf, dtlb_miss_perf;
extern unsigned int itlb_miss_exit_1, itlb_miss_exit_2;
extern unsigned int dtlb_miss_exit_1, dtlb_miss_exit_2, dtlb_miss_exit_3;
static atomic_t insn_ctr_ref; static atomic_t insn_ctr_ref;
static atomic_t itlb_miss_ref; static atomic_t itlb_miss_ref;
...@@ -103,22 +100,22 @@ static int mpc8xx_pmu_add(struct perf_event *event, int flags) ...@@ -103,22 +100,22 @@ static int mpc8xx_pmu_add(struct perf_event *event, int flags)
break; break;
case PERF_8xx_ID_ITLB_LOAD_MISS: case PERF_8xx_ID_ITLB_LOAD_MISS:
if (atomic_inc_return(&itlb_miss_ref) == 1) { if (atomic_inc_return(&itlb_miss_ref) == 1) {
unsigned long target = (unsigned long)&itlb_miss_perf; unsigned long target = patch_site_addr(&patch__itlbmiss_perf);
patch_branch(&itlb_miss_exit_1, target, 0); patch_branch_site(&patch__itlbmiss_exit_1, target, 0);
#ifndef CONFIG_PIN_TLB_TEXT #ifndef CONFIG_PIN_TLB_TEXT
patch_branch(&itlb_miss_exit_2, target, 0); patch_branch_site(&patch__itlbmiss_exit_2, target, 0);
#endif #endif
} }
val = itlb_miss_counter; val = itlb_miss_counter;
break; break;
case PERF_8xx_ID_DTLB_LOAD_MISS: case PERF_8xx_ID_DTLB_LOAD_MISS:
if (atomic_inc_return(&dtlb_miss_ref) == 1) { if (atomic_inc_return(&dtlb_miss_ref) == 1) {
unsigned long target = (unsigned long)&dtlb_miss_perf; unsigned long target = patch_site_addr(&patch__dtlbmiss_perf);
patch_branch(&dtlb_miss_exit_1, target, 0); patch_branch_site(&patch__dtlbmiss_exit_1, target, 0);
patch_branch(&dtlb_miss_exit_2, target, 0); patch_branch_site(&patch__dtlbmiss_exit_2, target, 0);
patch_branch(&dtlb_miss_exit_3, target, 0); patch_branch_site(&patch__dtlbmiss_exit_3, target, 0);
} }
val = dtlb_miss_counter; val = dtlb_miss_counter;
break; break;
...@@ -180,17 +177,17 @@ static void mpc8xx_pmu_del(struct perf_event *event, int flags) ...@@ -180,17 +177,17 @@ static void mpc8xx_pmu_del(struct perf_event *event, int flags)
break; break;
case PERF_8xx_ID_ITLB_LOAD_MISS: case PERF_8xx_ID_ITLB_LOAD_MISS:
if (atomic_dec_return(&itlb_miss_ref) == 0) { if (atomic_dec_return(&itlb_miss_ref) == 0) {
patch_instruction(&itlb_miss_exit_1, insn); patch_instruction_site(&patch__itlbmiss_exit_1, insn);
#ifndef CONFIG_PIN_TLB_TEXT #ifndef CONFIG_PIN_TLB_TEXT
patch_instruction(&itlb_miss_exit_2, insn); patch_instruction_site(&patch__itlbmiss_exit_2, insn);
#endif #endif
} }
break; break;
case PERF_8xx_ID_DTLB_LOAD_MISS: case PERF_8xx_ID_DTLB_LOAD_MISS:
if (atomic_dec_return(&dtlb_miss_ref) == 0) { if (atomic_dec_return(&dtlb_miss_ref) == 0) {
patch_instruction(&dtlb_miss_exit_1, insn); patch_instruction_site(&patch__dtlbmiss_exit_1, insn);
patch_instruction(&dtlb_miss_exit_2, insn); patch_instruction_site(&patch__dtlbmiss_exit_2, insn);
patch_instruction(&dtlb_miss_exit_3, insn); patch_instruction_site(&patch__dtlbmiss_exit_3, insn);
} }
break; break;
} }
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/hugetlb.h>
#include <asm/lppaca.h> #include <asm/lppaca.h>
#include <asm/hvcall.h> #include <asm/hvcall.h>
#include <asm/firmware.h> #include <asm/firmware.h>
...@@ -36,6 +37,7 @@ ...@@ -36,6 +37,7 @@
#include <asm/vio.h> #include <asm/vio.h>
#include <asm/mmu.h> #include <asm/mmu.h>
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/drmem.h>
#include "pseries.h" #include "pseries.h"
...@@ -433,6 +435,16 @@ static void parse_em_data(struct seq_file *m) ...@@ -433,6 +435,16 @@ static void parse_em_data(struct seq_file *m)
seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]); seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]);
} }
static void maxmem_data(struct seq_file *m)
{
unsigned long maxmem = 0;
maxmem += drmem_info->n_lmbs * drmem_info->lmb_size;
maxmem += hugetlb_total_pages() * PAGE_SIZE;
seq_printf(m, "MaxMem=%ld\n", maxmem);
}
static int pseries_lparcfg_data(struct seq_file *m, void *v) static int pseries_lparcfg_data(struct seq_file *m, void *v)
{ {
int partition_potential_processors; int partition_potential_processors;
...@@ -491,6 +503,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) ...@@ -491,6 +503,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
seq_printf(m, "slb_size=%d\n", mmu_slb_size); seq_printf(m, "slb_size=%d\n", mmu_slb_size);
#endif #endif
parse_em_data(m); parse_em_data(m);
maxmem_data(m);
return 0; return 0;
} }
......
...@@ -11,6 +11,12 @@ UBSAN_SANITIZE := n ...@@ -11,6 +11,12 @@ UBSAN_SANITIZE := n
ORIG_CFLAGS := $(KBUILD_CFLAGS) ORIG_CFLAGS := $(KBUILD_CFLAGS)
KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS)) KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS))
ifdef CONFIG_CC_IS_CLANG
# clang stores addresses on the stack causing the frame size to blow
# out. See https://github.com/ClangBuiltLinux/linux/issues/252
KBUILD_CFLAGS += -Wframe-larger-than=4096
endif
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
obj-y += xmon.o nonstdio.o spr_access.o obj-y += xmon.o nonstdio.o spr_access.o
......
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
TEST_PROGS := cache_shape TEST_GEN_PROGS := cache_shape
all: $(TEST_PROGS)
$(TEST_PROGS): ../harness.c ../utils.c
top_srcdir = ../../../../.. top_srcdir = ../../../../..
include ../../lib.mk include ../../lib.mk
clean: $(TEST_GEN_PROGS): ../harness.c ../utils.c
rm -f $(TEST_PROGS) *.o
...@@ -5,6 +5,9 @@ noarg: ...@@ -5,6 +5,9 @@ noarg:
# The EBB handler is 64-bit code and everything links against it # The EBB handler is 64-bit code and everything links against it
CFLAGS += -m64 CFLAGS += -m64
# Toolchains may build PIE by default which breaks the assembly
LDFLAGS += -no-pie
TEST_GEN_PROGS := reg_access_test event_attributes_test cycles_test \ TEST_GEN_PROGS := reg_access_test event_attributes_test cycles_test \
cycles_with_freeze_test pmc56_overflow_test \ cycles_with_freeze_test pmc56_overflow_test \
ebb_vs_cpu_event_test cpu_event_vs_ebb_test \ ebb_vs_cpu_event_test cpu_event_vs_ebb_test \
......
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
TEST_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \ TEST_GEN_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \
ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar ptrace-vsx ptrace-tm-vsx \ ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar ptrace-vsx ptrace-tm-vsx \
ptrace-tm-spd-vsx ptrace-tm-spr ptrace-hwbreak ptrace-pkey core-pkey \ ptrace-tm-spd-vsx ptrace-tm-spr ptrace-hwbreak ptrace-pkey core-pkey \
perf-hwbreak ptrace-syscall perf-hwbreak ptrace-syscall
...@@ -7,14 +7,9 @@ TEST_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \ ...@@ -7,14 +7,9 @@ TEST_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \
top_srcdir = ../../../../.. top_srcdir = ../../../../..
include ../../lib.mk include ../../lib.mk
all: $(TEST_PROGS)
CFLAGS += -m64 -I../../../../../usr/include -I../tm -mhtm -fno-pie CFLAGS += -m64 -I../../../../../usr/include -I../tm -mhtm -fno-pie
ptrace-pkey core-pkey: child.h $(OUTPUT)/ptrace-pkey $(OUTPUT)/core-pkey: child.h
ptrace-pkey core-pkey: LDLIBS += -pthread $(OUTPUT)/ptrace-pkey $(OUTPUT)/core-pkey: LDLIBS += -pthread
$(TEST_PROGS): ../harness.c ../utils.c ../lib/reg.S ptrace.h
clean: $(TEST_GEN_PROGS): ../harness.c ../utils.c ../lib/reg.S ptrace.h
rm -f $(TEST_PROGS) *.o
...@@ -67,8 +67,8 @@ void tm_spd_gpr(void) ...@@ -67,8 +67,8 @@ void tm_spd_gpr(void)
"3: ;" "3: ;"
: [res] "=r" (result), [texasr] "=r" (texasr) : [res] "=r" (result), [texasr] "=r" (texasr)
: [gpr_1]"i"(GPR_1), [gpr_2]"i"(GPR_2), [gpr_4]"i"(GPR_4), : [gpr_1]"i"(GPR_1), [gpr_2]"i"(GPR_2), [gpr_4]"i"(GPR_4),
[sprn_texasr] "i" (SPRN_TEXASR), [flt_1] "r" (&a), [sprn_texasr] "i" (SPRN_TEXASR), [flt_1] "b" (&a),
[flt_2] "r" (&b), [flt_4] "r" (&d) [flt_4] "b" (&d)
: "memory", "r5", "r6", "r7", : "memory", "r5", "r6", "r7",
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
"r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
......
# SPDX-License-Identifier: GPL-2.0+ # SPDX-License-Identifier: GPL-2.0+
TEST_GEN_PROGS := rfi_flush TEST_GEN_PROGS := rfi_flush
top_srcdir = ../../../../..
CFLAGS += -I../../../../../usr/include CFLAGS += -I../../../../../usr/include
......
...@@ -49,6 +49,7 @@ int rfi_flush_test(void) ...@@ -49,6 +49,7 @@ int rfi_flush_test(void)
struct perf_event_read v; struct perf_event_read v;
__u64 l1d_misses_total = 0; __u64 l1d_misses_total = 0;
unsigned long iterations = 100000, zero_size = 24 * 1024; unsigned long iterations = 100000, zero_size = 24 * 1024;
unsigned long l1d_misses_expected;
int rfi_flush_org, rfi_flush; int rfi_flush_org, rfi_flush;
SKIP_IF(geteuid() != 0); SKIP_IF(geteuid() != 0);
...@@ -71,6 +72,12 @@ int rfi_flush_test(void) ...@@ -71,6 +72,12 @@ int rfi_flush_test(void)
iter = repetitions; iter = repetitions;
/*
* We expect to see l1d miss for each cacheline access when rfi_flush
* is set. Allow a small variation on this.
*/
l1d_misses_expected = iterations * (zero_size / CACHELINE_SIZE - 2);
again: again:
FAIL_IF(perf_event_reset(fd)); FAIL_IF(perf_event_reset(fd));
...@@ -78,10 +85,9 @@ int rfi_flush_test(void) ...@@ -78,10 +85,9 @@ int rfi_flush_test(void)
FAIL_IF(read(fd, &v, sizeof(v)) != sizeof(v)); FAIL_IF(read(fd, &v, sizeof(v)) != sizeof(v));
/* Expect at least zero_size/CACHELINE_SIZE misses per iteration */ if (rfi_flush && v.l1d_misses >= l1d_misses_expected)
if (v.l1d_misses >= (iterations * zero_size / CACHELINE_SIZE) && rfi_flush)
passes++; passes++;
else if (v.l1d_misses < iterations && !rfi_flush) else if (!rfi_flush && v.l1d_misses < (l1d_misses_expected / 2))
passes++; passes++;
l1d_misses_total += v.l1d_misses; l1d_misses_total += v.l1d_misses;
...@@ -92,13 +98,15 @@ int rfi_flush_test(void) ...@@ -92,13 +98,15 @@ int rfi_flush_test(void)
if (passes < repetitions) { if (passes < repetitions) {
printf("FAIL (L1D misses with rfi_flush=%d: %llu %c %lu) [%d/%d failures]\n", printf("FAIL (L1D misses with rfi_flush=%d: %llu %c %lu) [%d/%d failures]\n",
rfi_flush, l1d_misses_total, rfi_flush ? '<' : '>', rfi_flush, l1d_misses_total, rfi_flush ? '<' : '>',
rfi_flush ? (repetitions * iterations * zero_size / CACHELINE_SIZE) : iterations, rfi_flush ? repetitions * l1d_misses_expected :
repetitions * l1d_misses_expected / 2,
repetitions - passes, repetitions); repetitions - passes, repetitions);
rc = 1; rc = 1;
} else } else
printf("PASS (L1D misses with rfi_flush=%d: %llu %c %lu) [%d/%d pass]\n", printf("PASS (L1D misses with rfi_flush=%d: %llu %c %lu) [%d/%d pass]\n",
rfi_flush, l1d_misses_total, rfi_flush ? '>' : '<', rfi_flush, l1d_misses_total, rfi_flush ? '>' : '<',
rfi_flush ? (repetitions * iterations * zero_size / CACHELINE_SIZE) : iterations, rfi_flush ? repetitions * l1d_misses_expected :
repetitions * l1d_misses_expected / 2,
passes, repetitions); passes, repetitions);
if (rfi_flush == rfi_flush_org) { if (rfi_flush == rfi_flush_org) {
......
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
TEST_PROGS := signal signal_tm TEST_GEN_PROGS := signal signal_tm
all: $(TEST_PROGS)
$(TEST_PROGS): ../harness.c ../utils.c signal.S
CFLAGS += -maltivec CFLAGS += -maltivec
signal_tm: CFLAGS += -mhtm $(OUTPUT)/signal_tm: CFLAGS += -mhtm
top_srcdir = ../../../../.. top_srcdir = ../../../../..
include ../../lib.mk include ../../lib.mk
clean: $(TEST_GEN_PROGS): ../harness.c ../utils.c signal.S
rm -f $(TEST_PROGS) *.o
...@@ -8,6 +8,7 @@ EXTRA_CLEAN = $(OUTPUT)/*.o $(OUTPUT)/check-reversed.S ...@@ -8,6 +8,7 @@ EXTRA_CLEAN = $(OUTPUT)/*.o $(OUTPUT)/check-reversed.S
top_srcdir = ../../../../.. top_srcdir = ../../../../..
include ../../lib.mk include ../../lib.mk
$(OUTPUT)/switch_endian_test: ASFLAGS += -I $(OUTPUT)
$(OUTPUT)/switch_endian_test: $(OUTPUT)/check-reversed.S $(OUTPUT)/switch_endian_test: $(OUTPUT)/check-reversed.S
$(OUTPUT)/check-reversed.o: $(OUTPUT)/check.o $(OUTPUT)/check-reversed.o: $(OUTPUT)/check.o
......
...@@ -25,7 +25,6 @@ ...@@ -25,7 +25,6 @@
#include "utils.h" #include "utils.h"
static char auxv[4096]; static char auxv[4096];
extern unsigned int dscr_insn[];
int read_auxv(char *buf, ssize_t buf_size) int read_auxv(char *buf, ssize_t buf_size)
{ {
...@@ -247,7 +246,8 @@ static void sigill_handler(int signr, siginfo_t *info, void *unused) ...@@ -247,7 +246,8 @@ static void sigill_handler(int signr, siginfo_t *info, void *unused)
ucontext_t *ctx = (ucontext_t *)unused; ucontext_t *ctx = (ucontext_t *)unused;
unsigned long *pc = &UCONTEXT_NIA(ctx); unsigned long *pc = &UCONTEXT_NIA(ctx);
if (*pc == (unsigned long)&dscr_insn) { /* mtspr 3,RS to check for move to DSCR below */
if ((*((unsigned int *)*pc) & 0xfc1fffff) == 0x7c0303a6) {
if (!warned++) if (!warned++)
printf("WARNING: Skipping over dscr setup. Consider running 'ppc64_cpu --dscr=1' manually.\n"); printf("WARNING: Skipping over dscr setup. Consider running 'ppc64_cpu --dscr=1' manually.\n");
*pc += 4; *pc += 4;
...@@ -271,5 +271,5 @@ void set_dscr(unsigned long val) ...@@ -271,5 +271,5 @@ void set_dscr(unsigned long val)
init = 1; init = 1;
} }
asm volatile("dscr_insn: mtspr %1,%0" : : "r" (val), "i" (SPRN_DSCR)); asm volatile("mtspr %1,%0" : : "r" (val), "i" (SPRN_DSCR));
} }
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册