提交 aab6bf50 编写于 作者: L Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:
 "The diffstat is a bit spread out thanks to an invasive CPU erratum
  workaround which missed the merge window and also a bunch of fixes to
  the recently added MTE selftests.

   - Fixes to MTE kselftests

   - Fix return code from KVM Spectre-v2 hypercall

   - Build fixes for ld.lld and Clang's infamous integrated assembler

   - Ensure RCU is up and running before we use printk()

   - Workaround for Cortex-A77 erratum 1508412

   - Fix linker warnings from unexpected ELF sections

   - Ensure PE/COFF sections are 64k aligned"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: Change .weak to SYM_FUNC_START_WEAK_PI for arch/arm64/lib/mem*.S
  arm64/smp: Move rcu_cpu_starting() earlier
  arm64: Add workaround for Arm Cortex-A77 erratum 1508412
  arm64: Add part number for Arm Cortex-A77
  arm64: mte: Document that user PSTATE.TCO is ignored by kernel uaccess
  module: use hidden visibility for weak symbol references
  arm64: efi: increase EFI PE/COFF header padding to 64 KB
  arm64: vmlinux.lds: account for spurious empty .igot.plt sections
  kselftest/arm64: Fix check_user_mem test
  kselftest/arm64: Fix check_ksm_options test
  kselftest/arm64: Fix check_mmap_options test
  kselftest/arm64: Fix check_child_memory test
  kselftest/arm64: Fix check_tags_inclusion test
  kselftest/arm64: Fix check_buffer_fill test
  arm64: avoid -Woverride-init warning
  KVM: arm64: ARM_SMCCC_ARCH_WORKAROUND_1 doesn't return SMCCC_RET_NOT_REQUIRED
  arm64: vdso32: Allow ld.lld to properly link the VDSO
......@@ -102,7 +102,9 @@ applications.
system call) are not checked if the user thread tag checking mode is
``PR_MTE_TCF_NONE`` or ``PR_MTE_TCF_ASYNC``. If the tag checking mode is
``PR_MTE_TCF_SYNC``, the kernel makes a best effort to check its user
address accesses, however it cannot always guarantee it.
address accesses, however it cannot always guarantee it. Kernel accesses
to user addresses are always performed with an effective ``PSTATE.TCO``
value of zero, regardless of the user configuration.
Excluding Tags in the ``IRG``, ``ADDG`` and ``SUBG`` instructions
-----------------------------------------------------------------
......
......@@ -90,6 +90,8 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A76 | #1463225 | ARM64_ERRATUM_1463225 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A77 | #1508412 | ARM64_ERRATUM_1508412 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-N1 | #1188873,1418040| ARM64_ERRATUM_1418040 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-N1 | #1349291 | N/A |
......
......@@ -636,6 +636,26 @@ config ARM64_ERRATUM_1542419
If unsure, say Y.
config ARM64_ERRATUM_1508412
bool "Cortex-A77: 1508412: workaround deadlock on sequence of NC/Device load and store exclusive or PAR read"
default y
help
This option adds a workaround for Arm Cortex-A77 erratum 1508412.
Affected Cortex-A77 cores (r0p0, r1p0) could deadlock on a sequence
of a store-exclusive or read of PAR_EL1 and a load with device or
non-cacheable memory attributes. The workaround depends on a firmware
counterpart.
KVM guests must also have the workaround implemented or they can
deadlock the system.
Work around the issue by inserting DMB SY barriers around PAR_EL1
register reads and warning KVM users. The DMB barrier is sufficient
to prevent a speculative PAR_EL1 read.
If unsure, say Y.
config CAVIUM_ERRATUM_22375
bool "Cavium erratum 22375, 24313"
default y
......
......@@ -24,6 +24,7 @@
#define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK)
#define ICACHE_POLICY_VPIPT 0
#define ICACHE_POLICY_RESERVED 1
#define ICACHE_POLICY_VIPT 2
#define ICACHE_POLICY_PIPT 3
......
......@@ -65,7 +65,8 @@
#define ARM64_HAS_ARMv8_4_TTL 55
#define ARM64_HAS_TLB_RANGE 56
#define ARM64_MTE 57
#define ARM64_WORKAROUND_1508412 58
#define ARM64_NCAPS 58
#define ARM64_NCAPS 59
#endif /* __ASM_CPUCAPS_H */
......@@ -71,6 +71,7 @@
#define ARM_CPU_PART_CORTEX_A55 0xD05
#define ARM_CPU_PART_CORTEX_A76 0xD0B
#define ARM_CPU_PART_NEOVERSE_N1 0xD0C
#define ARM_CPU_PART_CORTEX_A77 0xD0D
#define APM_CPU_PART_POTENZA 0x000
......@@ -105,6 +106,7 @@
#define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55)
#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
#define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1)
#define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
......
......@@ -1007,6 +1007,7 @@
#include <linux/build_bug.h>
#include <linux/types.h>
#include <asm/alternative.h>
#define __DEFINE_MRS_MSR_S_REGNUM \
" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n" \
......@@ -1095,6 +1096,14 @@
write_sysreg_s(__scs_new, sysreg); \
} while (0)
#define read_sysreg_par() ({ \
u64 par; \
asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412)); \
par = read_sysreg(par_el1); \
asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412)); \
par; \
})
#endif
#endif /* __ASM_SYSREG_H */
......@@ -522,6 +522,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.matches = has_neoverse_n1_erratum_1542419,
.cpu_enable = cpu_enable_trap_ctr_access,
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_1508412
{
/* we depend on the firmware portion for correctness */
.desc = "ARM erratum 1508412 (kernel portion)",
.capability = ARM64_WORKAROUND_1508412,
ERRATA_MIDR_RANGE(MIDR_CORTEX_A77,
0, 0,
1, 0),
},
#endif
{
}
......
......@@ -34,10 +34,10 @@ DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data);
static struct cpuinfo_arm64 boot_cpu_data;
static const char *icache_policy_str[] = {
[0 ... ICACHE_POLICY_PIPT] = "RESERVED/UNKNOWN",
[ICACHE_POLICY_VPIPT] = "VPIPT",
[ICACHE_POLICY_RESERVED] = "RESERVED/UNKNOWN",
[ICACHE_POLICY_VIPT] = "VIPT",
[ICACHE_POLICY_PIPT] = "PIPT",
[ICACHE_POLICY_VPIPT] = "VPIPT",
};
unsigned long __icache_flags;
......@@ -334,10 +334,11 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
case ICACHE_POLICY_VPIPT:
set_bit(ICACHEF_VPIPT, &__icache_flags);
break;
default:
case ICACHE_POLICY_RESERVED:
case ICACHE_POLICY_VIPT:
/* Assume aliasing */
set_bit(ICACHEF_ALIASING, &__icache_flags);
break;
}
pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
......
......@@ -147,6 +147,6 @@ efi_debug_entry:
* correctly at this alignment, we must ensure that .text is
* placed at a 4k boundary in the Image to begin with.
*/
.align 12
.balign SEGMENT_ALIGN
efi_header_end:
.endm
......@@ -365,6 +365,9 @@ alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
br x30
#endif
.else
/* Ensure any device/NC reads complete */
alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412
eret
.endif
sb
......
......@@ -135,8 +135,6 @@ static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
return SPECTRE_VULNERABLE;
}
#define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED (1)
static enum mitigation_state spectre_v2_get_cpu_fw_mitigation_state(void)
{
int ret;
......
......@@ -222,6 +222,7 @@ asmlinkage notrace void secondary_start_kernel(void)
if (system_uses_irq_prio_masking())
init_gic_priority_masking();
rcu_cpu_starting(cpu);
preempt_disable();
trace_hardirqs_off();
......
......@@ -22,16 +22,21 @@ endif
CC_COMPAT ?= $(CC)
CC_COMPAT += $(CC_COMPAT_CLANG_FLAGS)
ifneq ($(LLVM),)
LD_COMPAT ?= $(LD)
else
LD_COMPAT ?= $(CROSS_COMPILE_COMPAT)ld
endif
else
CC_COMPAT ?= $(CROSS_COMPILE_COMPAT)gcc
LD_COMPAT ?= $(CROSS_COMPILE_COMPAT)ld
endif
cc32-option = $(call try-run,\
$(CC_COMPAT) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
cc32-disable-warning = $(call try-run,\
$(CC_COMPAT) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
cc32-ldoption = $(call try-run,\
$(CC_COMPAT) $(1) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2))
cc32-as-instr = $(call try-run,\
printf "%b\n" "$(1)" | $(CC_COMPAT) $(VDSO_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3))
......@@ -122,14 +127,10 @@ dmbinstr := $(call cc32-as-instr,dmb ishld,-DCONFIG_AS_DMB_ISHLD=1)
VDSO_CFLAGS += $(dmbinstr)
VDSO_AFLAGS += $(dmbinstr)
VDSO_LDFLAGS := $(VDSO_CPPFLAGS)
# From arm vDSO Makefile
VDSO_LDFLAGS += -Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1
VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
VDSO_LDFLAGS += -nostdlib -shared -mfloat-abi=soft
VDSO_LDFLAGS += -Wl,--hash-style=sysv
VDSO_LDFLAGS += -Wl,--build-id=sha1
VDSO_LDFLAGS += $(call cc32-ldoption,-fuse-ld=bfd)
VDSO_LDFLAGS += -Bsymbolic --no-undefined -soname=linux-vdso.so.1
VDSO_LDFLAGS += -z max-page-size=4096 -z common-page-size=4096
VDSO_LDFLAGS += -nostdlib -shared --hash-style=sysv --build-id=sha1
# Borrow vdsomunge.c from the arm vDSO
......@@ -189,8 +190,8 @@ quiet_cmd_vdsold_and_vdso_check = LD32 $@
cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check)
quiet_cmd_vdsold = LD32 $@
cmd_vdsold = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_LDFLAGS) \
-Wl,-T $(filter %.lds,$^) $(filter %.o,$^) -o $@
cmd_vdsold = $(LD_COMPAT) $(VDSO_LDFLAGS) \
-T $(filter %.lds,$^) $(filter %.o,$^) -o $@
quiet_cmd_vdsocc = CC32 $@
cmd_vdsocc = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) -c -o $@ $<
quiet_cmd_vdsocc_gettimeofday = CC32 $@
......
......@@ -278,7 +278,7 @@ SECTIONS
* explicitly check instead of blindly discarding.
*/
.plt : {
*(.plt) *(.plt.*) *(.iplt) *(.igot)
*(.plt) *(.plt.*) *(.iplt) *(.igot .igot.plt)
}
ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!")
......
......@@ -1719,7 +1719,8 @@ int kvm_arch_init(void *opaque)
return -ENODEV;
}
if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE))
if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) ||
cpus_have_final_cap(ARM64_WORKAROUND_1508412))
kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \
"Only trusted guests should be used on this system.\n");
......
......@@ -140,9 +140,9 @@ static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
* We do need to save/restore PAR_EL1 though, as we haven't
* saved the guest context yet, and we may return early...
*/
par = read_sysreg(par_el1);
par = read_sysreg_par();
if (!__kvm_at("s1e1r", far))
tmp = read_sysreg(par_el1);
tmp = read_sysreg_par();
else
tmp = SYS_PAR_EL1_F; /* back to the guest */
write_sysreg(par, par_el1);
......@@ -421,7 +421,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 &&
handle_tx2_tvm(vcpu))
return true;
goto guest;
/*
* We trap the first access to the FP/SIMD to save the host context
......@@ -431,13 +431,13 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
* Similarly for trapped SVE accesses.
*/
if (__hyp_handle_fpsimd(vcpu))
return true;
goto guest;
if (__hyp_handle_ptrauth(vcpu))
return true;
goto guest;
if (!__populate_fault_info(vcpu))
return true;
goto guest;
if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
bool valid;
......@@ -452,7 +452,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
int ret = __vgic_v2_perform_cpuif_access(vcpu);
if (ret == 1)
return true;
goto guest;
/* Promote an illegal access to an SError.*/
if (ret == -1)
......@@ -468,12 +468,17 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
int ret = __vgic_v3_perform_cpuif_access(vcpu);
if (ret == 1)
return true;
goto guest;
}
exit:
/* Return to the host kernel and handle the exit */
return false;
guest:
/* Re-enter the guest */
asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412));
return true;
}
static inline void __kvm_unexpected_el2_exception(void)
......
......@@ -43,7 +43,7 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
ctxt_sys_reg(ctxt, CONTEXTIDR_EL1) = read_sysreg_el1(SYS_CONTEXTIDR);
ctxt_sys_reg(ctxt, AMAIR_EL1) = read_sysreg_el1(SYS_AMAIR);
ctxt_sys_reg(ctxt, CNTKCTL_EL1) = read_sysreg_el1(SYS_CNTKCTL);
ctxt_sys_reg(ctxt, PAR_EL1) = read_sysreg(par_el1);
ctxt_sys_reg(ctxt, PAR_EL1) = read_sysreg_par();
ctxt_sys_reg(ctxt, TPIDR_EL1) = read_sysreg(tpidr_el1);
ctxt_sys_reg(ctxt, SP_EL1) = read_sysreg(sp_el1);
......
......@@ -250,7 +250,7 @@ void __noreturn hyp_panic(void)
{
u64 spsr = read_sysreg_el2(SYS_SPSR);
u64 elr = read_sysreg_el2(SYS_ELR);
u64 par = read_sysreg(par_el1);
u64 par = read_sysreg_par();
bool restore_host = true;
struct kvm_cpu_context *host_ctxt;
struct kvm_vcpu *vcpu;
......
......@@ -215,7 +215,7 @@ void __noreturn hyp_panic(void)
{
u64 spsr = read_sysreg_el2(SYS_SPSR);
u64 elr = read_sysreg_el2(SYS_ELR);
u64 par = read_sysreg(par_el1);
u64 par = read_sysreg_par();
__hyp_call_panic(spsr, elr, par);
unreachable();
......
......@@ -31,7 +31,7 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
val = SMCCC_RET_SUCCESS;
break;
case SPECTRE_UNAFFECTED:
val = SMCCC_RET_NOT_REQUIRED;
val = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED;
break;
}
break;
......
......@@ -95,7 +95,7 @@ static bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
case AMAIR_EL1: *val = read_sysreg_s(SYS_AMAIR_EL12); break;
case CNTKCTL_EL1: *val = read_sysreg_s(SYS_CNTKCTL_EL12); break;
case ELR_EL1: *val = read_sysreg_s(SYS_ELR_EL12); break;
case PAR_EL1: *val = read_sysreg_s(SYS_PAR_EL1); break;
case PAR_EL1: *val = read_sysreg_par(); break;
case DACR32_EL2: *val = read_sysreg_s(SYS_DACR32_EL2); break;
case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break;
case DBGVCR32_EL2: *val = read_sysreg_s(SYS_DBGVCR32_EL2); break;
......
......@@ -56,9 +56,8 @@
stp \reg1, \reg2, [\ptr], \val
.endm
.weak memcpy
SYM_FUNC_START_ALIAS(__memcpy)
SYM_FUNC_START_PI(memcpy)
SYM_FUNC_START_WEAK_PI(memcpy)
#include "copy_template.S"
ret
SYM_FUNC_END_PI(memcpy)
......
......@@ -45,9 +45,8 @@ C_h .req x12
D_l .req x13
D_h .req x14
.weak memmove
SYM_FUNC_START_ALIAS(__memmove)
SYM_FUNC_START_PI(memmove)
SYM_FUNC_START_WEAK_PI(memmove)
cmp dstin, src
b.lo __memcpy
add tmp1, src, count
......
......@@ -42,9 +42,8 @@ dst .req x8
tmp3w .req w9
tmp3 .req x9
.weak memset
SYM_FUNC_START_ALIAS(__memset)
SYM_FUNC_START_PI(memset)
SYM_FUNC_START_WEAK_PI(memset)
mov dst, dstin /* Preserve return value. */
and A_lw, val, #255
orr A_lw, A_lw, A_lw, lsl #8
......
......@@ -262,7 +262,7 @@ static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr,
local_irq_save(flags);
asm volatile("at s1e1r, %0" :: "r" (addr));
isb();
par = read_sysreg(par_el1);
par = read_sysreg_par();
local_irq_restore(flags);
/*
......
......@@ -87,6 +87,8 @@
ARM_SMCCC_SMC_32, \
0, 0x7fff)
#define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED 1
/* Paravirtualised time calls (defined by ARM DEN0057A) */
#define ARM_SMCCC_HV_PV_TIME_FEATURES \
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
......
......@@ -740,7 +740,7 @@ static inline bool within_module(unsigned long addr, const struct module *mod)
}
/* Get/put a kernel symbol (calls should be symmetric) */
#define symbol_get(x) ({ extern typeof(x) x __attribute__((weak)); &(x); })
#define symbol_get(x) ({ extern typeof(x) x __attribute__((weak,visibility("hidden"))); &(x); })
#define symbol_put(x) do { } while (0)
#define symbol_put_addr(x) do { } while (0)
......
......@@ -417,6 +417,9 @@ int main(int argc, char *argv[])
/* Register SIGSEGV handler */
mte_register_signal(SIGSEGV, mte_default_handler);
/* Set test plan */
ksft_set_plan(20);
/* Buffer by byte tests */
evaluate_test(check_buffer_by_byte(USE_MMAP, MTE_SYNC_ERR),
"Check buffer correctness by byte with sync err mode and mmap memory\n");
......
......@@ -163,6 +163,9 @@ int main(int argc, char *argv[])
mte_register_signal(SIGSEGV, mte_default_handler);
mte_register_signal(SIGBUS, mte_default_handler);
/* Set test plan */
ksft_set_plan(12);
evaluate_test(check_child_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE),
"Check child anonymous memory with private mapping, precise mode and mmap memory\n");
evaluate_test(check_child_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_SHARED),
......
......@@ -140,6 +140,10 @@ int main(int argc, char *argv[])
/* Register signal handlers */
mte_register_signal(SIGBUS, mte_default_handler);
mte_register_signal(SIGSEGV, mte_default_handler);
/* Set test plan */
ksft_set_plan(4);
/* Enable KSM */
mte_ksm_setup();
......
......@@ -205,7 +205,11 @@ int main(int argc, char *argv[])
mte_register_signal(SIGBUS, mte_default_handler);
mte_register_signal(SIGSEGV, mte_default_handler);
/* Set test plan */
ksft_set_plan(22);
mte_enable_pstate_tco();
evaluate_test(check_anonymous_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE, TAG_CHECK_OFF),
"Check anonymous memory with private mapping, sync error mode, mmap memory and tag check off\n");
evaluate_test(check_file_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_PRIVATE, TAG_CHECK_OFF),
......
......@@ -170,6 +170,9 @@ int main(int argc, char *argv[])
/* Register SIGSEGV handler */
mte_register_signal(SIGSEGV, mte_default_handler);
/* Set test plan */
ksft_set_plan(4);
evaluate_test(check_single_included_tags(USE_MMAP, MTE_SYNC_ERR),
"Check an included tag value with sync mode\n");
evaluate_test(check_multiple_included_tags(USE_MMAP, MTE_SYNC_ERR),
......
......@@ -92,9 +92,13 @@ int main(int argc, char *argv[])
err = mte_default_setup();
if (err)
return err;
/* Register signal handlers */
mte_register_signal(SIGSEGV, mte_default_handler);
/* Set test plan */
ksft_set_plan(4);
evaluate_test(check_usermem_access_fault(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE),
"Check memory access from kernel in sync mode, private mapping and mmap memory\n");
evaluate_test(check_usermem_access_fault(USE_MMAP, MTE_SYNC_ERR, MAP_SHARED),
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册