提交 b34006c4 编写于 作者: A Ard Biesheuvel 提交者: Thomas Gleixner

x86/jump_table: Use relative references

Similar to the arm64 case, 64-bit x86 can benefit from using relative
references rather than absolute ones when emitting struct jump_entry
instances. Not only does this reduce the memory footprint of the entries
themselves by 33%, it also removes the need for carrying relocation
metadata on relocatable builds (i.e., for KASLR) which saves a fair
chunk of .init space as well (although the savings are not as dramatic
as on arm64)
Signed-off-by: NArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: NThomas Gleixner <tglx@linutronix.de>
Acked-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-s390@vger.kernel.org
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Jessica Yu <jeyu@kernel.org>
Link: https://lkml.kernel.org/r/20180919065144.25010-7-ard.biesheuvel@linaro.org
上级 9fc0f798
...@@ -119,6 +119,7 @@ config X86 ...@@ -119,6 +119,7 @@ config X86
select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE
select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN if X86_64 select HAVE_ARCH_KASAN if X86_64
select HAVE_ARCH_KGDB select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS if MMU select HAVE_ARCH_MMAP_RND_BITS if MMU
......
...@@ -37,7 +37,8 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran ...@@ -37,7 +37,8 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t" ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
".pushsection __jump_table, \"aw\" \n\t" ".pushsection __jump_table, \"aw\" \n\t"
_ASM_ALIGN "\n\t" _ASM_ALIGN "\n\t"
_ASM_PTR "1b, %l[l_yes], %c0 + %c1 \n\t" ".long 1b - ., %l[l_yes] - . \n\t"
_ASM_PTR "%c0 + %c1 - .\n\t"
".popsection \n\t" ".popsection \n\t"
: : "i" (key), "i" (branch) : : l_yes); : : "i" (key), "i" (branch) : : l_yes);
...@@ -53,7 +54,8 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool ...@@ -53,7 +54,8 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool
"2:\n\t" "2:\n\t"
".pushsection __jump_table, \"aw\" \n\t" ".pushsection __jump_table, \"aw\" \n\t"
_ASM_ALIGN "\n\t" _ASM_ALIGN "\n\t"
_ASM_PTR "1b, %l[l_yes], %c0 + %c1 \n\t" ".long 1b - ., %l[l_yes] - . \n\t"
_ASM_PTR "%c0 + %c1 - .\n\t"
".popsection \n\t" ".popsection \n\t"
: : "i" (key), "i" (branch) : : l_yes); : : "i" (key), "i" (branch) : : l_yes);
...@@ -62,18 +64,6 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool ...@@ -62,18 +64,6 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool
return true; return true;
} }
#ifdef CONFIG_X86_64
typedef u64 jump_label_t;
#else
typedef u32 jump_label_t;
#endif
struct jump_entry {
jump_label_t code;
jump_label_t target;
jump_label_t key;
};
#else /* __ASSEMBLY__ */ #else /* __ASSEMBLY__ */
.macro STATIC_JUMP_IF_TRUE target, key, def .macro STATIC_JUMP_IF_TRUE target, key, def
...@@ -88,7 +78,8 @@ struct jump_entry { ...@@ -88,7 +78,8 @@ struct jump_entry {
.endif .endif
.pushsection __jump_table, "aw" .pushsection __jump_table, "aw"
_ASM_ALIGN _ASM_ALIGN
_ASM_PTR .Lstatic_jump_\@, \target, \key .long .Lstatic_jump_\@ - ., \target - .
_ASM_PTR \key - .
.popsection .popsection
.endm .endm
...@@ -104,7 +95,8 @@ struct jump_entry { ...@@ -104,7 +95,8 @@ struct jump_entry {
.endif .endif
.pushsection __jump_table, "aw" .pushsection __jump_table, "aw"
_ASM_ALIGN _ASM_ALIGN
_ASM_PTR .Lstatic_jump_\@, \target, \key + 1 .long .Lstatic_jump_\@ - ., \target - .
_ASM_PTR \key + 1 - .
.popsection .popsection
.endm .endm
......
...@@ -30,9 +30,9 @@ ...@@ -30,9 +30,9 @@
#define EX_ORIG_OFFSET 0 #define EX_ORIG_OFFSET 0
#define EX_NEW_OFFSET 4 #define EX_NEW_OFFSET 4
#define JUMP_ENTRY_SIZE 24 #define JUMP_ENTRY_SIZE 16
#define JUMP_ORIG_OFFSET 0 #define JUMP_ORIG_OFFSET 0
#define JUMP_NEW_OFFSET 8 #define JUMP_NEW_OFFSET 4
#define ALT_ENTRY_SIZE 13 #define ALT_ENTRY_SIZE 13
#define ALT_ORIG_OFFSET 0 #define ALT_ORIG_OFFSET 0
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册