thread_info.h 6.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
/*
 * Copyright (C) 2002  David Howells (dhowells@redhat.com)
 * Copyright 2010 Tilera Corporation. All Rights Reserved.
 *
 *   This program is free software; you can redistribute it and/or
 *   modify it under the terms of the GNU General Public License
 *   as published by the Free Software Foundation, version 2.
 *
 *   This program is distributed in the hope that it will be useful, but
 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 *   NON INFRINGEMENT.  See the GNU General Public License for
 *   more details.
 */

#ifndef _ASM_TILE_THREAD_INFO_H
#define _ASM_TILE_THREAD_INFO_H

#include <asm/processor.h>
#include <asm/page.h>
#ifndef __ASSEMBLY__

/*
 * Low level task data that assembly code needs immediate access to.
 * The structure is placed at the bottom of the supervisor stack.
 */
struct thread_info {
	struct task_struct	*task;		/* main task structure */
	unsigned long		flags;		/* low level flags */
	unsigned long		status;		/* thread-synchronous flags */
	__u32			homecache_cpu;	/* CPU we are homecached on */
	__u32			cpu;		/* current CPU */
	int			preempt_count;	/* 0 => preemptable,
						   <0 => BUG */

	mm_segment_t		addr_limit;	/* thread address space
						   (KERNEL_DS or USER_DS) */
	struct single_step_state *step_state;	/* single step state
						   (if non-zero) */
40 41 42 43 44
	int			align_ctl;	/* controls unaligned access */
#ifdef __tilegx__
	unsigned long		unalign_jit_tmp[4]; /* temp r0..r3 storage */
	void __user		*unalign_jit_base; /* unalign fixup JIT base */
#endif
C
Chris Metcalf 已提交
45
	bool in_backtrace;			/* currently doing backtrace? */
46 47 48 49 50 51 52 53 54 55 56 57
};

/*
 * macros/functions for gaining access to the thread information structure.
 */
#define INIT_THREAD_INFO(tsk)			\
{						\
	.task		= &tsk,			\
	.flags		= 0,			\
	.cpu		= 0,			\
	.preempt_count	= INIT_PREEMPT_COUNT,	\
	.addr_limit	= KERNEL_DS,		\
58
	.step_state	= NULL,			\
59
	.align_ctl	= 0,			\
60 61 62 63 64 65 66 67 68 69 70 71
}

#define init_thread_info	(init_thread_union.thread_info)
#define init_stack		(init_thread_union.stack)

#endif /* !__ASSEMBLY__ */

#if PAGE_SIZE < 8192
#define THREAD_SIZE_ORDER (13 - PAGE_SHIFT)
#else
#define THREAD_SIZE_ORDER (0)
#endif
72
#define THREAD_SIZE_PAGES (1 << THREAD_SIZE_ORDER)
73 74 75 76 77 78 79 80

#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#define LOG2_THREAD_SIZE (PAGE_SHIFT + THREAD_SIZE_ORDER)

#define STACK_WARN             (THREAD_SIZE/8)

#ifndef __ASSEMBLY__

81 82
void arch_release_thread_info(struct thread_info *info);

83 84 85 86 87 88
/* How to get the thread information struct from C. */
register unsigned long stack_pointer __asm__("sp");

#define current_thread_info() \
  ((struct thread_info *)(stack_pointer & -THREAD_SIZE))

89 90 91
/* Sit on a nap instruction until interrupted. */
extern void smp_nap(void);

92
/* Enable interrupts racelessly and nap forever: helper for arch_cpu_idle(). */
93 94
extern void _cpu_idle(void);

95 96
#else /* __ASSEMBLY__ */

97 98 99 100 101 102
/*
 * How to get the thread information struct from assembly.
 * Note that we use different macros since different architectures
 * have different semantics in their "mm" instruction and we would
 * like to guarantee that the macro expands to exactly one instruction.
 */
103
#ifdef __tilegx__
104
#define EXTRACT_THREAD_INFO(reg) mm reg, zero, LOG2_THREAD_SIZE, 63
105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
#else
#define GET_THREAD_INFO(reg) mm reg, sp, zero, LOG2_THREAD_SIZE, 31
#endif

#endif /* !__ASSEMBLY__ */

/*
 * Thread information flags that various assembly files may need to access.
 * Keep flags accessed frequently in low bits, particular since it makes
 * it easier to build constants in assembly.
 */
#define TIF_SIGPENDING		0	/* signal pending */
#define TIF_NEED_RESCHED	1	/* rescheduling necessary */
#define TIF_SINGLESTEP		2	/* restore singlestep on return to
					   user mode */
#define TIF_ASYNC_TLB		3	/* got an async TLB fault in kernel */
#define TIF_SYSCALL_TRACE	4	/* syscall trace active */
#define TIF_SYSCALL_AUDIT	5	/* syscall auditing active */
#define TIF_SECCOMP		6	/* secure computing */
#define TIF_MEMDIE		7	/* OOM killer at work */
125
#define TIF_NOTIFY_RESUME	8	/* callback before returning to user */
126
#define TIF_SYSCALL_TRACEPOINT	9	/* syscall tracepoint instrumentation */
127
#define TIF_POLLING_NRFLAG	10	/* idle is polling for TIF_NEED_RESCHED */
128
#define TIF_NOHZ		11	/* in adaptive nohz mode */
129 130 131 132 133 134 135 136 137

#define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)
#define _TIF_SINGLESTEP		(1<<TIF_SINGLESTEP)
#define _TIF_ASYNC_TLB		(1<<TIF_ASYNC_TLB)
#define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT	(1<<TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP		(1<<TIF_SECCOMP)
#define _TIF_MEMDIE		(1<<TIF_MEMDIE)
138
#define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
139
#define _TIF_SYSCALL_TRACEPOINT	(1<<TIF_SYSCALL_TRACEPOINT)
140
#define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
141
#define _TIF_NOHZ		(1<<TIF_NOHZ)
142

143 144 145 146 147
/* Work to do as we loop to exit to user space. */
#define _TIF_WORK_MASK \
	(_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
	 _TIF_ASYNC_TLB | _TIF_NOTIFY_RESUME)

148 149
/* Work to do on any return to user space. */
#define _TIF_ALLWORK_MASK \
150
	(_TIF_WORK_MASK | _TIF_SINGLESTEP | _TIF_NOHZ)
151

152
/* Work to do at syscall entry. */
153 154
#define _TIF_SYSCALL_ENTRY_WORK \
	(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
155 156 157 158

/* Work to do at syscall exit. */
#define _TIF_SYSCALL_EXIT_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT)

159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
/*
 * Thread-synchronous status.
 *
 * This is different from the flags in that nobody else
 * ever touches our thread-synchronous status, so we don't
 * have to worry about atomic accesses.
 */
#ifdef __tilegx__
#define TS_COMPAT		0x0001	/* 32-bit compatibility mode */
#endif
#define TS_RESTORE_SIGMASK	0x0008	/* restore signal mask in do_signal */

#ifndef __ASSEMBLY__
#define HAVE_SET_RESTORE_SIGMASK	1
static inline void set_restore_sigmask(void)
{
	struct thread_info *ti = current_thread_info();
	ti->status |= TS_RESTORE_SIGMASK;
177
	WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags));
178
}
179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
static inline void clear_restore_sigmask(void)
{
	current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
}
static inline bool test_restore_sigmask(void)
{
	return current_thread_info()->status & TS_RESTORE_SIGMASK;
}
static inline bool test_and_clear_restore_sigmask(void)
{
	struct thread_info *ti = current_thread_info();
	if (!(ti->status & TS_RESTORE_SIGMASK))
		return false;
	ti->status &= ~TS_RESTORE_SIGMASK;
	return true;
}
195 196 197
#endif	/* !__ASSEMBLY__ */

#endif /* _ASM_TILE_THREAD_INFO_H */