提交 529d235a 编写于 作者: M Michael Ellerman

powerpc: Add a proper syscall for switching endianness

We currently have a "special" syscall for switching endianness. This is
syscall number 0x1ebe, which is handled explicitly in the 64-bit syscall
exception entry.

That has a few problems, firstly the syscall number is outside of the
usual range, which confuses various tools. For example strace doesn't
recognise the syscall at all.

Secondly it's handled explicitly as a special case in the syscall
exception entry, which is complicated enough without it.

As a first step toward removing the special syscall, we need to add a
regular syscall that implements the same functionality.

The logic is simple, it simply toggles the MSR_LE bit in the userspace
MSR. This is the same as the special syscall, with the caveat that the
special syscall clobbers fewer registers.

This version clobbers r9-r12, XER, CTR, and CR0-1,5-7.
Signed-off-by: NMichael Ellerman <mpe@ellerman.id.au>
上级 c03e7374
...@@ -367,3 +367,4 @@ SYSCALL_SPU(getrandom) ...@@ -367,3 +367,4 @@ SYSCALL_SPU(getrandom)
SYSCALL_SPU(memfd_create) SYSCALL_SPU(memfd_create)
SYSCALL_SPU(bpf) SYSCALL_SPU(bpf)
COMPAT_SYS(execveat) COMPAT_SYS(execveat)
PPC64ONLY(switch_endian)
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
#include <uapi/asm/unistd.h> #include <uapi/asm/unistd.h>
#define __NR_syscalls 363 #define __NR_syscalls 364
#define __NR__exit __NR_exit #define __NR__exit __NR_exit
#define NR_syscalls __NR_syscalls #define NR_syscalls __NR_syscalls
......
...@@ -385,5 +385,6 @@ ...@@ -385,5 +385,6 @@
#define __NR_memfd_create 360 #define __NR_memfd_create 360
#define __NR_bpf 361 #define __NR_bpf 361
#define __NR_execveat 362 #define __NR_execveat 362
#define __NR_switch_endian 363
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
...@@ -356,6 +356,11 @@ _GLOBAL(ppc64_swapcontext) ...@@ -356,6 +356,11 @@ _GLOBAL(ppc64_swapcontext)
bl sys_swapcontext bl sys_swapcontext
b .Lsyscall_exit b .Lsyscall_exit
_GLOBAL(ppc_switch_endian)
bl save_nvgprs
bl sys_switch_endian
b .Lsyscall_exit
_GLOBAL(ret_from_fork) _GLOBAL(ret_from_fork)
bl schedule_tail bl schedule_tail
REST_NVGPRS(r1) REST_NVGPRS(r1)
......
...@@ -121,3 +121,20 @@ long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low, ...@@ -121,3 +121,20 @@ long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
return sys_fadvise64(fd, (u64)offset_high << 32 | offset_low, return sys_fadvise64(fd, (u64)offset_high << 32 | offset_low,
(u64)len_high << 32 | len_low, advice); (u64)len_high << 32 | len_low, advice);
} }
long sys_switch_endian(void)
{
struct thread_info *ti;
current->thread.regs->msr ^= MSR_LE;
/*
* Set TIF_RESTOREALL so that r3 isn't clobbered on return to
* userspace. That also has the effect of restoring the non-volatile
* GPRs, so we saved them on the way in here.
*/
ti = current_thread_info();
ti->flags |= _TIF_RESTOREALL;
return 0;
}
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#define PPC_SYS(func) .llong DOTSYM(ppc_##func),DOTSYM(ppc_##func) #define PPC_SYS(func) .llong DOTSYM(ppc_##func),DOTSYM(ppc_##func)
#define OLDSYS(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(sys_ni_syscall) #define OLDSYS(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(sys_ni_syscall)
#define SYS32ONLY(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(compat_sys_##func) #define SYS32ONLY(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(compat_sys_##func)
#define PPC64ONLY(func) .llong DOTSYM(ppc_##func),DOTSYM(sys_ni_syscall)
#define SYSX(f, f3264, f32) .llong DOTSYM(f),DOTSYM(f3264) #define SYSX(f, f3264, f32) .llong DOTSYM(f),DOTSYM(f3264)
#else #else
#define SYSCALL(func) .long sys_##func #define SYSCALL(func) .long sys_##func
...@@ -29,6 +30,7 @@ ...@@ -29,6 +30,7 @@
#define PPC_SYS(func) .long ppc_##func #define PPC_SYS(func) .long ppc_##func
#define OLDSYS(func) .long sys_##func #define OLDSYS(func) .long sys_##func
#define SYS32ONLY(func) .long sys_##func #define SYS32ONLY(func) .long sys_##func
#define PPC64ONLY(func) .long sys_ni_syscall
#define SYSX(f, f3264, f32) .long f32 #define SYSX(f, f3264, f32) .long f32
#endif #endif
#define SYSCALL_SPU(func) SYSCALL(func) #define SYSCALL_SPU(func) SYSCALL(func)
......
...@@ -21,9 +21,11 @@ ...@@ -21,9 +21,11 @@
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
#define OLDSYS(func) -1 #define OLDSYS(func) -1
#define SYS32ONLY(func) -1 #define SYS32ONLY(func) -1
#define PPC64ONLY(func) __NR_##func
#else #else
#define OLDSYS(func) __NR_old##func #define OLDSYS(func) __NR_old##func
#define SYS32ONLY(func) __NR_##func #define SYS32ONLY(func) __NR_##func
#define PPC64ONLY(func) -1
#endif #endif
#define SYSX(f, f3264, f32) -1 #define SYSX(f, f3264, f32) -1
......
...@@ -39,6 +39,7 @@ static void *spu_syscall_table[] = { ...@@ -39,6 +39,7 @@ static void *spu_syscall_table[] = {
#define PPC_SYS(func) sys_ni_syscall, #define PPC_SYS(func) sys_ni_syscall,
#define OLDSYS(func) sys_ni_syscall, #define OLDSYS(func) sys_ni_syscall,
#define SYS32ONLY(func) sys_ni_syscall, #define SYS32ONLY(func) sys_ni_syscall,
#define PPC64ONLY(func) sys_ni_syscall,
#define SYSX(f, f3264, f32) sys_ni_syscall, #define SYSX(f, f3264, f32) sys_ni_syscall,
#define SYSCALL_SPU(func) sys_##func, #define SYSCALL_SPU(func) sys_##func,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册