thread_info_64.h 8.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
/* thread_info.h: sparc64 low-level thread information
 *
 * Copyright (C) 2002  David S. Miller (davem@redhat.com)
 */

#ifndef _ASM_THREAD_INFO_H
#define _ASM_THREAD_INFO_H

#ifdef __KERNEL__

#define NSWINS		7

#define TI_FLAG_BYTE_FAULT_CODE		0
#define TI_FLAG_FAULT_CODE_SHIFT	56
#define TI_FLAG_BYTE_WSTATE		1
#define TI_FLAG_WSTATE_SHIFT		48
#define TI_FLAG_BYTE_CWP		2
#define TI_FLAG_CWP_SHIFT		40
#define TI_FLAG_BYTE_CURRENT_DS		3
#define TI_FLAG_CURRENT_DS_SHIFT	32
#define TI_FLAG_BYTE_FPDEPTH		4
#define TI_FLAG_FPDEPTH_SHIFT		24
#define TI_FLAG_BYTE_WSAVED		5
#define TI_FLAG_WSAVED_SHIFT		16

#include <asm/page.h>

#ifndef __ASSEMBLY__

#include <asm/ptrace.h>
#include <asm/types.h>

struct task_struct;
struct exec_domain;

struct thread_info {
	/* D$ line 1 */
	struct task_struct	*task;
	unsigned long		flags;
	__u8			fpsaved[7];
	__u8			status;
	unsigned long		ksp;

	/* D$ line 2 */
	unsigned long		fault_address;
	struct pt_regs		*kregs;
	struct exec_domain	*exec_domain;
	int			preempt_count;	/* 0 => preemptable, <0 => BUG */
	__u8			new_child;
	__u8			syscall_noerror;
	__u16			cpu;

	unsigned long		*utraps;

	struct reg_window 	reg_window[NSWINS];
	unsigned long 		rwbuf_stkptrs[NSWINS];

	unsigned long		gsr[7];
	unsigned long		xfsr[7];

	__u64			__user *user_cntd0;
	__u64			__user *user_cntd1;
	__u64			kernel_cntd0, kernel_cntd1;
	__u64			pcr_reg;

	struct restart_block	restart_block;

	struct pt_regs		*kern_una_regs;
	unsigned int		kern_una_insn;

	unsigned long		fpregs[0] __attribute__ ((aligned(64)));
};

#endif /* !(__ASSEMBLY__) */

/* offsets into the thread_info struct for assembly code access */
#define TI_TASK		0x00000000
#define TI_FLAGS	0x00000008
#define TI_FAULT_CODE	(TI_FLAGS + TI_FLAG_BYTE_FAULT_CODE)
#define TI_WSTATE	(TI_FLAGS + TI_FLAG_BYTE_WSTATE)
#define TI_CWP		(TI_FLAGS + TI_FLAG_BYTE_CWP)
#define TI_CURRENT_DS	(TI_FLAGS + TI_FLAG_BYTE_CURRENT_DS)
#define TI_FPDEPTH	(TI_FLAGS + TI_FLAG_BYTE_FPDEPTH)
#define TI_WSAVED	(TI_FLAGS + TI_FLAG_BYTE_WSAVED)
#define TI_FPSAVED	0x00000010
#define TI_KSP		0x00000018
#define TI_FAULT_ADDR	0x00000020
#define TI_KREGS	0x00000028
#define TI_EXEC_DOMAIN	0x00000030
#define TI_PRE_COUNT	0x00000038
#define TI_NEW_CHILD	0x0000003c
#define TI_SYS_NOERROR	0x0000003d
#define TI_CPU		0x0000003e
#define TI_UTRAPS	0x00000040
#define TI_REG_WINDOW	0x00000048
#define TI_RWIN_SPTRS	0x000003c8
#define TI_GSR		0x00000400
#define TI_XFSR		0x00000438
#define TI_USER_CNTD0	0x00000470
#define TI_USER_CNTD1	0x00000478
#define TI_KERN_CNTD0	0x00000480
#define TI_KERN_CNTD1	0x00000488
#define TI_PCR		0x00000490
#define TI_RESTART_BLOCK 0x00000498
105 106
#define TI_KUNA_REGS	0x000004c8
#define TI_KUNA_INSN	0x000004d0
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
#define TI_FPREGS	0x00000500

/* We embed this in the uppermost byte of thread_info->flags */
#define FAULT_CODE_WRITE	0x01	/* Write access, implies D-TLB	   */
#define FAULT_CODE_DTLB		0x02	/* Miss happened in D-TLB	   */
#define FAULT_CODE_ITLB		0x04	/* Miss happened in I-TLB	   */
#define FAULT_CODE_WINFIXUP	0x08	/* Miss happened during spill/fill */
#define FAULT_CODE_BLKCOMMIT	0x10	/* Use blk-commit ASI in copy_page */

#if PAGE_SHIFT == 13
#define THREAD_SIZE (2*PAGE_SIZE)
#define THREAD_SHIFT (PAGE_SHIFT + 1)
#else /* PAGE_SHIFT == 13 */
#define THREAD_SIZE PAGE_SIZE
#define THREAD_SHIFT PAGE_SHIFT
#endif /* PAGE_SHIFT == 13 */

#define PREEMPT_ACTIVE		0x4000000

/*
 * macros/functions for gaining access to the thread information structure
 *
 * preempt_count needs to be 1 initially, until the scheduler is functional.
 */
#ifndef __ASSEMBLY__

#define INIT_THREAD_INFO(tsk)				\
{							\
	.task		=	&tsk,			\
	.flags		= ((unsigned long)ASI_P) << TI_FLAG_CURRENT_DS_SHIFT,	\
	.exec_domain	=	&default_exec_domain,	\
	.preempt_count	=	1,			\
	.restart_block	= {				\
		.fn	=	do_no_restart_syscall,	\
	},						\
}

#define init_thread_info	(init_thread_union.thread_info)
#define init_stack		(init_thread_union.stack)

/* how to get the thread information struct from C */
register struct thread_info *current_thread_info_reg asm("g6");
#define current_thread_info()	(current_thread_info_reg)

/* thread information allocation */
#if PAGE_SHIFT == 13
#define __THREAD_INFO_ORDER	1
#else /* PAGE_SHIFT == 13 */
#define __THREAD_INFO_ORDER	0
#endif /* PAGE_SHIFT == 13 */

158 159
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR

160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
#ifdef CONFIG_DEBUG_STACK_USAGE
#define alloc_thread_info(tsk)					\
({								\
	struct thread_info *ret;				\
								\
	ret = (struct thread_info *)				\
	  __get_free_pages(GFP_KERNEL, __THREAD_INFO_ORDER);	\
	if (ret)						\
		memset(ret, 0, PAGE_SIZE<<__THREAD_INFO_ORDER);	\
	ret;							\
})
#else
#define alloc_thread_info(tsk) \
	((struct thread_info *)__get_free_pages(GFP_KERNEL, __THREAD_INFO_ORDER))
#endif

#define free_thread_info(ti) \
	free_pages((unsigned long)(ti),__THREAD_INFO_ORDER)

#define __thread_flag_byte_ptr(ti)	\
	((unsigned char *)(&((ti)->flags)))
#define __cur_thread_flag_byte_ptr	__thread_flag_byte_ptr(current_thread_info())

#define get_thread_fault_code()		(__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_FAULT_CODE])
#define set_thread_fault_code(val)	(__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_FAULT_CODE] = (val))
#define get_thread_wstate()		(__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_WSTATE])
#define set_thread_wstate(val)		(__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_WSTATE] = (val))
#define get_thread_cwp()		(__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_CWP])
#define set_thread_cwp(val)		(__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_CWP] = (val))
#define get_thread_current_ds()		(__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_CURRENT_DS])
#define set_thread_current_ds(val)	(__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_CURRENT_DS] = (val))
#define get_thread_fpdepth()		(__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_FPDEPTH])
#define set_thread_fpdepth(val)		(__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_FPDEPTH] = (val))
#define get_thread_wsaved()		(__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_WSAVED])
#define set_thread_wsaved(val)		(__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_WSAVED] = (val))

#endif /* !(__ASSEMBLY__) */

/*
 * Thread information flags, only 16 bits are available as we encode
 * other values into the upper 6 bytes.
 *
 * On trap return we need to test several values:
 *
 * user:	need_resched, notify_resume, sigpending, wsaved, perfctr
 * kernel:	fpdepth
 *
 * So to check for work in the kernel case we simply load the fpdepth
 * byte out of the flags and test it.  For the user case we encode the
 * lower 3 bytes of flags as follows:
 *	----------------------------------------
 *	| wsaved | flags byte 1 | flags byte 2 |
 *	----------------------------------------
 * This optimizes the user test into:
 *	ldx		[%g6 + TI_FLAGS], REG1
 *	sethi		%hi(_TIF_USER_WORK_MASK), REG2
 *	or		REG2, %lo(_TIF_USER_WORK_MASK), REG2
 *	andcc		REG1, REG2, %g0
 *	be,pt		no_work_to_do
 *	 nop
 */
#define TIF_SYSCALL_TRACE	0	/* syscall trace active */
222
#define TIF_NOTIFY_RESUME	1	/* callback before returning to user */
223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
#define TIF_SIGPENDING		2	/* signal pending */
#define TIF_NEED_RESCHED	3	/* rescheduling necessary */
#define TIF_PERFCTR		4	/* performance counters active */
#define TIF_UNALIGNED		5	/* allowed to do unaligned accesses */
/* flag bit 6 is available */
#define TIF_32BIT		7	/* 32-bit binary */
/* flag bit 8 is available */
#define TIF_SECCOMP		9	/* secure computing */
#define TIF_SYSCALL_AUDIT	10	/* syscall auditing active */
/* flag bit 11 is available */
/* NOTE: Thread flags >= 12 should be ones we have no interest
 *       in using in assembly, else we can't use the mask as
 *       an immediate value in instructions such as andcc.
 */
#define TIF_ABI_PENDING		12
#define TIF_MEMDIE		13
#define TIF_POLLING_NRFLAG	14
240
#define TIF_FREEZE		15	/* is freezing for suspend */
241 242

#define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
243
#define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
244 245 246 247 248 249 250 251 252
#define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)
#define _TIF_PERFCTR		(1<<TIF_PERFCTR)
#define _TIF_UNALIGNED		(1<<TIF_UNALIGNED)
#define _TIF_32BIT		(1<<TIF_32BIT)
#define _TIF_SECCOMP		(1<<TIF_SECCOMP)
#define _TIF_SYSCALL_AUDIT	(1<<TIF_SYSCALL_AUDIT)
#define _TIF_ABI_PENDING	(1<<TIF_ABI_PENDING)
#define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
253
#define _TIF_FREEZE		(1<<TIF_FREEZE)
254 255

#define _TIF_USER_WORK_MASK	((0xff << TI_FLAG_WSAVED_SHIFT) | \
256 257 258
				 _TIF_DO_NOTIFY_RESUME_MASK | \
				 _TIF_NEED_RESCHED | _TIF_PERFCTR)
#define _TIF_DO_NOTIFY_RESUME_MASK	(_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283

/*
 * Thread-synchronous status.
 *
 * This is different from the flags in that nobody else
 * ever touches our thread-synchronous status, so we don't
 * have to worry about atomic accesses.
 *
 * Note that there are only 8 bits available.
 */
#define TS_RESTORE_SIGMASK	0x0001	/* restore signal mask in do_signal() */

#ifndef __ASSEMBLY__
#define HAVE_SET_RESTORE_SIGMASK	1
static inline void set_restore_sigmask(void)
{
	struct thread_info *ti = current_thread_info();
	ti->status |= TS_RESTORE_SIGMASK;
	set_bit(TIF_SIGPENDING, &ti->flags);
}
#endif	/* !__ASSEMBLY__ */

#endif /* __KERNEL__ */

#endif /* _ASM_THREAD_INFO_H */