msr.h 7.1 KB
Newer Older
T
Thomas Gleixner 已提交
1 2 3 4 5
#ifndef __ASM_X86_MSR_H_
#define __ASM_X86_MSR_H_

#include <asm/msr-index.h>

6 7 8 9
#ifndef __ASSEMBLY__
# include <linux/types.h>
#endif

10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
static inline unsigned long long native_read_tscp(int *aux)
{
	unsigned long low, high;
	asm volatile (".byte 0x0f,0x01,0xf9"
		      : "=a" (low), "=d" (high), "=c" (*aux));
	return low | ((u64)high >> 32);
}

#define rdtscp(low, high, aux)						\
       do {                                                            \
		unsigned long long _val = native_read_tscp(&(aux));     \
		(low) = (u32)_val;                                      \
		(high) = (u32)(_val >> 32);                             \
       } while (0)

#define rdtscpll(val, aux) (val) = native_read_tscp(&(aux))
#endif
#endif

T
Thomas Gleixner 已提交
31 32
#ifdef __i386__

33
#ifdef __KERNEL__
T
Thomas Gleixner 已提交
34 35
#ifndef __ASSEMBLY__

36
#include <asm/asm.h>
T
Thomas Gleixner 已提交
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
#include <asm/errno.h>

static inline unsigned long long native_read_msr(unsigned int msr)
{
	unsigned long long val;

	asm volatile("rdmsr" : "=A" (val) : "c" (msr));
	return val;
}

static inline unsigned long long native_read_msr_safe(unsigned int msr,
						      int *err)
{
	unsigned long long val;

52
	asm volatile("2: rdmsr ; xor %0,%0\n"
T
Thomas Gleixner 已提交
53 54
		     "1:\n\t"
		     ".section .fixup,\"ax\"\n\t"
55
		     "3:  mov %3,%0 ; jmp 1b\n\t"
T
Thomas Gleixner 已提交
56 57
		     ".previous\n\t"
		     ".section __ex_table,\"a\"\n"
58 59
		     _ASM_ALIGN "\n\t"
		     _ASM_PTR " 2b,3b\n\t"
T
Thomas Gleixner 已提交
60 61 62 63 64 65 66
		     ".previous"
		     : "=r" (*err), "=A" (val)
		     : "c" (msr), "i" (-EFAULT));

	return val;
}

67 68
static inline void native_write_msr(unsigned int msr,
				    unsigned low, unsigned high)
T
Thomas Gleixner 已提交
69
{
70
	asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high));
T
Thomas Gleixner 已提交
71 72 73
}

static inline int native_write_msr_safe(unsigned int msr,
74
					unsigned low, unsigned high)
T
Thomas Gleixner 已提交
75 76
{
	int err;
77
	asm volatile("2: wrmsr ; xor %0,%0\n"
T
Thomas Gleixner 已提交
78 79
		     "1:\n\t"
		     ".section .fixup,\"ax\"\n\t"
80
		     "3:  mov %4,%0 ; jmp 1b\n\t"
T
Thomas Gleixner 已提交
81 82
		     ".previous\n\t"
		     ".section __ex_table,\"a\"\n"
83 84
		     _ASM_ALIGN "\n\t"
		     _ASM_PTR " 2b,3b\n\t"
T
Thomas Gleixner 已提交
85 86
		     ".previous"
		     : "=a" (err)
87
		     : "c" (msr), "0" (low), "d" (high),
T
Thomas Gleixner 已提交
88 89 90 91 92 93 94 95 96 97 98
		       "i" (-EFAULT));
	return err;
}

static inline unsigned long long native_read_tsc(void)
{
	unsigned long long val;
	asm volatile("rdtsc" : "=A" (val));
	return val;
}

99
static inline unsigned long long native_read_pmc(int counter)
T
Thomas Gleixner 已提交
100 101
{
	unsigned long long val;
102
	asm volatile("rdpmc" : "=A" (val) : "c" (counter));
T
Thomas Gleixner 已提交
103 104 105 106 107
	return val;
}

#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
108
#else
T
Thomas Gleixner 已提交
109 110 111 112 113 114 115 116 117 118 119 120 121 122
#include <linux/errno.h>
/*
 * Access to machine-specific registers (available on 586 and better only)
 * Note: the rd* operations modify the parameters directly (without using
 * pointer indirection), this allows gcc to optimize better
 */

#define rdmsr(msr,val1,val2)						\
	do {								\
		u64 __val = native_read_msr(msr);			\
		(val1) = (u32)__val;					\
		(val2) = (u32)(__val >> 32);				\
	} while(0)

123
static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
T
Thomas Gleixner 已提交
124
{
125
	native_write_msr(msr, low, high);
T
Thomas Gleixner 已提交
126 127 128 129 130
}

#define rdmsrl(msr,val)							\
	((val) = native_read_msr(msr))

131
#define wrmsrl(msr, val) native_write_msr(msr, (u32)val, (u32)(val >> 32))
T
Thomas Gleixner 已提交
132 133

/* wrmsr with exception handling */
134
static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
T
Thomas Gleixner 已提交
135
{
136
	return native_write_msr_safe(msr, low, high);
T
Thomas Gleixner 已提交
137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
}

/* rdmsr with exception handling */
#define rdmsr_safe(msr,p1,p2)						\
	({								\
		int __err;						\
		u64 __val = native_read_msr_safe(msr, &__err);		\
		(*p1) = (u32)__val;					\
		(*p2) = (u32)(__val >> 32);				\
		__err;							\
	})

#define rdtscl(low)						\
	((low) = (u32)native_read_tsc())

#define rdtscll(val)						\
	((val) = native_read_tsc())

#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)

#define rdpmc(counter,low,high)					\
	do {							\
159
		u64 _l = native_read_pmc(counter);		\
T
Thomas Gleixner 已提交
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
		(low)  = (u32)_l;				\
		(high) = (u32)(_l >> 32);			\
	} while(0)
#endif	/* !CONFIG_PARAVIRT */

#endif  /* ! __ASSEMBLY__ */
#endif  /* __KERNEL__ */

#else   /* __i386__ */

#ifndef __ASSEMBLY__
#include <linux/errno.h>
/*
 * Access to machine-specific registers (available on 586 and better only)
 * Note: the rd* operations modify the parameters directly (without using
 * pointer indirection), this allows gcc to optimize better
 */

#define rdmsr(msr,val1,val2) \
       __asm__ __volatile__("rdmsr" \
			    : "=a" (val1), "=d" (val2) \
			    : "c" (msr))


#define rdmsrl(msr,val) do { unsigned long a__,b__; \
       __asm__ __volatile__("rdmsr" \
			    : "=a" (a__), "=d" (b__) \
			    : "c" (msr)); \
       val = a__ | (b__<<32); \
} while(0)

#define wrmsr(msr,val1,val2) \
     __asm__ __volatile__("wrmsr" \
			  : /* no outputs */ \
			  : "c" (msr), "a" (val1), "d" (val2))

#define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32)

#define rdtsc(low,high) \
     __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))

#define rdtscl(low) \
     __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx")


#define rdtscll(val) do { \
     unsigned int __a,__d; \
207
     __asm__ __volatile__("rdtsc" : "=a" (__a), "=d" (__d)); \
T
Thomas Gleixner 已提交
208 209 210 211 212 213 214 215 216 217 218 219
     (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
} while(0)

#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)

#define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0)

#define rdpmc(counter,low,high) \
     __asm__ __volatile__("rdpmc" \
			  : "=a" (low), "=d" (high) \
			  : "c" (counter))

220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254

#ifdef __KERNEL__

/* wrmsr with exception handling */
#define wrmsr_safe(msr,a,b) ({ int ret__;			\
	asm volatile("2: wrmsr ; xorl %0,%0\n"			\
		     "1:\n\t"					\
		     ".section .fixup,\"ax\"\n\t"		\
		     "3:  movl %4,%0 ; jmp 1b\n\t"		\
		     ".previous\n\t"				\
		     ".section __ex_table,\"a\"\n"		\
		     "   .align 8\n\t"				\
		     "   .quad	2b,3b\n\t"			\
		     ".previous"				\
		     : "=a" (ret__)				\
		     : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \
	ret__; })

#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))

#define rdmsr_safe(msr,a,b) \
	({ int ret__;						\
	  asm volatile ("1:       rdmsr\n"			\
			"2:\n"					\
			".section .fixup,\"ax\"\n"		\
			"3:       movl %4,%0\n"			\
			" jmp 2b\n"				\
			".previous\n"				\
			".section __ex_table,\"a\"\n"		\
			" .align 8\n"				\
			" .quad 1b,3b\n"				\
			".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b)) \
			:"c"(msr), "i"(-EIO), "0"(0));			\
	  ret__; })

255 256 257 258 259 260
#endif  /* __ASSEMBLY__ */

#endif  /* !__i386__ */

#ifndef __ASSEMBLY__

T
Thomas Gleixner 已提交
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
#ifdef CONFIG_SMP
void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
#else  /*  CONFIG_SMP  */
static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
{
	rdmsr(msr_no, *l, *h);
}
static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
{
	wrmsr(msr_no, l, h);
}
static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
{
	return rdmsr_safe(msr_no, l, h);
}
static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
{
	return wrmsr_safe(msr_no, l, h);
}
#endif  /* CONFIG_SMP */
284
#endif  /* __KERNEL__ */
285
#endif /* __ASSEMBLY__ */
T
Thomas Gleixner 已提交
286

287
#endif