提交 b9a95e85 编写于 作者: W Will Deacon

Revert "arm64: alternative: Allow immediate branch as alternative instruction"

This reverts most of commit fef7f2b2.

It turns out that there are a couple of problems with the way we're
fixing up branch instructions used as part of alternative instruction
sequences:

  (1) If the branch target is also in the alternative sequence, we'll
      generate a branch into the .altinstructions section which actually
      gets freed.

  (2) The calls to aarch64_insn_{read,write} bring an awful lot more
      code into the patching path (e.g. taking locks, poking the fixmap,
      invalidating the TLB) which isn't actually needed for the early
      patching run under stop_machine, but makes the use of alternative
      sequences extremely fragile (as we can't patch code that could be
      used by the patching code).

Given that no code actually requires alternative patching of immediate
branches, let's remove this support for now and revisit it when we've
got a user. We leave the updated size check, since we really do require
the sequences to be the same length.
Acked-by: NMarc Zyngier <marc.zyngier@arm.com>
Signed-off-by: NWill Deacon <will.deacon@arm.com>
上级 5ebe6afa
......@@ -24,7 +24,6 @@
#include <asm/cacheflush.h>
#include <asm/alternative.h>
#include <asm/cpufeature.h>
#include <asm/insn.h>
#include <linux/stop_machine.h>
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
......@@ -34,48 +33,6 @@ struct alt_region {
struct alt_instr *end;
};
/*
* Decode the imm field of a b/bl instruction, and return the byte
* offset as a signed value (so it can be used when computing a new
* branch target).
*/
static s32 get_branch_offset(u32 insn)
{
s32 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
/* sign-extend the immediate before turning it into a byte offset */
return (imm << 6) >> 4;
}
static u32 get_alt_insn(u8 *insnptr, u8 *altinsnptr)
{
u32 insn;
aarch64_insn_read(altinsnptr, &insn);
/* Stop the world on instructions we don't support... */
BUG_ON(aarch64_insn_is_cbz(insn));
BUG_ON(aarch64_insn_is_cbnz(insn));
BUG_ON(aarch64_insn_is_bcond(insn));
/* ... and there is probably more. */
if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
enum aarch64_insn_branch_type type;
unsigned long target;
if (aarch64_insn_is_b(insn))
type = AARCH64_INSN_BRANCH_NOLINK;
else
type = AARCH64_INSN_BRANCH_LINK;
target = (unsigned long)altinsnptr + get_branch_offset(insn);
insn = aarch64_insn_gen_branch_imm((unsigned long)insnptr,
target, type);
}
return insn;
}
static int __apply_alternatives(void *alt_region)
{
struct alt_instr *alt;
......@@ -83,9 +40,6 @@ static int __apply_alternatives(void *alt_region)
u8 *origptr, *replptr;
for (alt = region->begin; alt < region->end; alt++) {
u32 insn;
int i;
if (!cpus_have_cap(alt->cpufeature))
continue;
......@@ -95,12 +49,7 @@ static int __apply_alternatives(void *alt_region)
origptr = (u8 *)&alt->orig_offset + alt->orig_offset;
replptr = (u8 *)&alt->alt_offset + alt->alt_offset;
for (i = 0; i < alt->alt_len; i += sizeof(insn)) {
insn = get_alt_insn(origptr + i, replptr + i);
aarch64_insn_write(origptr + i, insn);
}
memcpy(origptr, replptr, alt->alt_len);
flush_icache_range((uintptr_t)origptr,
(uintptr_t)(origptr + alt->alt_len));
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册