switch_to.h 3.2 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4
#ifndef _ASM_X86_SWITCH_TO_H
#define _ASM_X86_SWITCH_TO_H

5 6
#include <linux/sched/task_stack.h>

7
struct task_struct; /* one of the stranger aspects of C forward declarations */
8 9 10 11

struct task_struct *__switch_to_asm(struct task_struct *prev,
				    struct task_struct *next);

12
__visible struct task_struct *__switch_to(struct task_struct *prev,
13
					  struct task_struct *next);
14

15
/* This runs runs on the previous thread's stack. */
16
static inline void prepare_switch_to(struct task_struct *next)
17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
{
#ifdef CONFIG_VMAP_STACK
	/*
	 * If we switch to a stack that has a top-level paging entry
	 * that is not present in the current mm, the resulting #PF will
	 * will be promoted to a double-fault and we'll panic.  Probe
	 * the new stack now so that vmalloc_fault can fix up the page
	 * tables if needed.  This can only happen if we use a stack
	 * in vmap space.
	 *
	 * We assume that the stack is aligned so that it never spans
	 * more than one top-level paging entry.
	 *
	 * To minimize cache pollution, just follow the stack pointer.
	 */
	READ_ONCE(*(unsigned char *)next->thread.sp);
#endif
}

36 37
asmlinkage void ret_from_fork(void);

38 39 40 41
/*
 * This is the structure pointed to by thread.sp for an inactive task.  The
 * order of the fields must match the code in __switch_to_asm().
 */
42
struct inactive_task_frame {
43 44 45 46 47 48
#ifdef CONFIG_X86_64
	unsigned long r15;
	unsigned long r14;
	unsigned long r13;
	unsigned long r12;
#else
49
	unsigned long flags;
50 51 52 53
	unsigned long si;
	unsigned long di;
#endif
	unsigned long bx;
54 55 56 57 58

	/*
	 * These two fields must be together.  They form a stack frame header,
	 * needed by get_frame_pointer().
	 */
59
	unsigned long bp;
60
	unsigned long ret_addr;
61 62
};

63 64 65 66
struct fork_frame {
	struct inactive_task_frame frame;
	struct pt_regs regs;
};
67 68 69

#define switch_to(prev, next, last)					\
do {									\
70
	prepare_switch_to(next);					\
71
									\
72
	((last) = __switch_to_asm((prev), (next)));			\
73 74
} while (0)

75 76 77 78
#ifdef CONFIG_X86_32
static inline void refresh_sysenter_cs(struct thread_struct *thread)
{
	/* Only happens when SEP is enabled, no need to test "SEP"arately: */
79
	if (unlikely(this_cpu_read(cpu_tss_rw.x86_tss.ss1) == thread->sysenter_cs))
80 81
		return;

82
	this_cpu_write(cpu_tss_rw.x86_tss.ss1, thread->sysenter_cs);
83 84 85 86
	wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
}
#endif

87
/* This is used when switching tasks or entering/exiting vm86 mode. */
88
static inline void update_task_stack(struct task_struct *task)
89
{
90
	/* sp0 always points to the entry trampoline stack, which is constant: */
91
#ifdef CONFIG_X86_32
92 93 94 95
	if (static_cpu_has(X86_FEATURE_XENPV))
		load_sp0(task->thread.sp0);
	else
		this_cpu_write(cpu_tss_rw.x86_tss.sp1, task->thread.sp0);
96
#else
97 98 99 100 101 102
	/*
	 * x86-64 updates x86_tss.sp1 via cpu_current_top_of_stack. That
	 * doesn't work on x86-32 because sp1 and
	 * cpu_current_top_of_stack have different values (because of
	 * the non-zero stack-padding on 32bit).
	 */
103 104
	if (static_cpu_has(X86_FEATURE_XENPV))
		load_sp0(task_top_of_stack(task));
105
#endif
106
}
107

108 109 110 111 112 113 114 115 116
static inline void kthread_frame_init(struct inactive_task_frame *frame,
				      unsigned long fun, unsigned long arg)
{
	frame->bx = fun;
#ifdef CONFIG_X86_32
	frame->di = arg;
#else
	frame->r12 = arg;
#endif
117 118
}

119
#endif /* _ASM_X86_SWITCH_TO_H */