diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index 8e618fcf1f7c9f842873988ab4cb91e3fb5078fd..6a77c63540f7585477229f27fc1797bc1bdd5719 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -21,7 +21,7 @@ #ifdef CONFIG_AMD_MEM_ENCRYPT -extern unsigned long sme_me_mask; +extern u64 sme_me_mask; void sme_encrypt_execute(unsigned long encrypted_kernel_vaddr, unsigned long decrypted_kernel_vaddr, @@ -49,7 +49,7 @@ void swiotlb_set_mem_attributes(void *vaddr, unsigned long size); #else /* !CONFIG_AMD_MEM_ENCRYPT */ -#define sme_me_mask 0UL +#define sme_me_mask 0ULL static inline void __init sme_early_encrypt(resource_size_t paddr, unsigned long size) { } diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h index 831eb78955352bcded4422a8584d7becaf6ba15c..c471ca1f9412bdb021b0c27c00fb9fbfd8bebc90 100644 --- a/arch/x86/include/asm/mpspec.h +++ b/arch/x86/include/asm/mpspec.h @@ -86,7 +86,6 @@ static inline void e820__memblock_alloc_reserved_mpc_new(void) { } #endif int generic_processor_info(int apicid, int version); -int __generic_processor_info(int apicid, int version, bool enabled); #define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_LOCAL_APIC) diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 8315e2f517a7ef609e8d43b924a2734f7320e9bb..d705c769f77d52ce55e4f7d5d32b9853ceb40394 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -2130,7 +2130,7 @@ int generic_processor_info(int apicid, int version) * Since fixing handling of boot_cpu_physical_apicid requires * another discussion and tests on each platform, we leave it * for now and here we use read_apic_id() directly in this - * function, __generic_processor_info(). + * function, generic_processor_info(). */ if (disabled_cpu_apicid != BAD_APICID && disabled_cpu_apicid != read_apic_id() && diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 0fbd092697570c6e38d074fd01b3b99110dfb473..3fcc8e01683bef96b219d65dbdd0315db1f60605 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -37,7 +37,7 @@ static char sme_cmdline_off[] __initdata = "off"; * reside in the .data section so as not to be zeroed out when the .bss * section is later cleared. */ -unsigned long sme_me_mask __section(.data) = 0; +u64 sme_me_mask __section(.data) = 0; EXPORT_SYMBOL_GPL(sme_me_mask); /* Buffer used for early in-place encryption by BSP, no locking needed */ diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h index 1255f09f5e425a293d2da840242fab393a1cbb9e..265a9cd21cb418e4aae78bf6bc476ec463933670 100644 --- a/include/linux/mem_encrypt.h +++ b/include/linux/mem_encrypt.h @@ -21,7 +21,7 @@ #else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */ -#define sme_me_mask 0UL +#define sme_me_mask 0ULL #endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */ @@ -30,18 +30,23 @@ static inline bool sme_active(void) return !!sme_me_mask; } -static inline unsigned long sme_get_me_mask(void) +static inline u64 sme_get_me_mask(void) { return sme_me_mask; } +#ifdef CONFIG_AMD_MEM_ENCRYPT /* * The __sme_set() and __sme_clr() macros are useful for adding or removing * the encryption mask from a value (e.g. when dealing with pagetable * entries). */ -#define __sme_set(x) ((unsigned long)(x) | sme_me_mask) -#define __sme_clr(x) ((unsigned long)(x) & ~sme_me_mask) +#define __sme_set(x) ((x) | sme_me_mask) +#define __sme_clr(x) ((x) & ~sme_me_mask) +#else +#define __sme_set(x) (x) +#define __sme_clr(x) (x) +#endif #endif /* __ASSEMBLY__ */