asm-offsets.c 28.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * This program is used to generate definitions needed by
 * assembly language modules.
 *
 * We use the technique used in the OSF Mach kernel code:
 * generate asm statements containing #defines,
 * compile this file to assembler, and then extract the
 * #defines from the assembly-language output.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/mman.h>
#include <linux/mm.h>
24
#include <linux/suspend.h>
25
#include <linux/hrtimer.h>
S
Stephen Rothwell 已提交
26
#ifdef CONFIG_PPC64
27 28
#include <linux/time.h>
#include <linux/hardirq.h>
S
Stephen Rothwell 已提交
29
#endif
30
#include <linux/kbuild.h>
S
Stephen Rothwell 已提交
31

32 33 34 35 36 37
#include <asm/io.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
38
#include <asm/rtas.h>
39
#include <asm/vdso_datapage.h>
40
#include <asm/dbell.h>
41 42 43 44 45
#ifdef CONFIG_PPC64
#include <asm/paca.h>
#include <asm/lppaca.h>
#include <asm/cache.h>
#include <asm/compat.h>
46
#include <asm/mmu.h>
O
Olof Johansson 已提交
47
#include <asm/hvcall.h>
48
#include <asm/xics.h>
49
#endif
50 51 52
#ifdef CONFIG_PPC_POWERNV
#include <asm/opal.h>
#endif
53
#if defined(CONFIG_KVM) || defined(CONFIG_KVM_GUEST)
54
#include <linux/kvm_host.h>
55
#endif
56 57
#if defined(CONFIG_KVM) && defined(CONFIG_PPC_BOOK3S)
#include <asm/kvm_book3s.h>
58
#include <asm/kvm_ppc.h>
59
#endif
60

61
#ifdef CONFIG_PPC32
62 63 64
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
#include "head_booke.h"
#endif
65
#endif
66

67
#if defined(CONFIG_PPC_FSL_BOOK3E)
68 69 70
#include "../mm/mmu_decl.h"
#endif

71 72 73 74
#ifdef CONFIG_PPC_8xx
#include <asm/fixmap.h>
#endif

75 76
int main(void)
{
77 78 79
	OFFSET(THREAD, task_struct, thread);
	OFFSET(MM, task_struct, mm);
	OFFSET(MMCONTEXTID, mm_struct, context.id);
80
#ifdef CONFIG_PPC64
81 82
	DEFINE(SIGSEGV, SIGSEGV);
	DEFINE(NMI_MASK, NMI_MASK);
83
	OFFSET(TASKTHREADPPR, task_struct, thread.ppr);
S
Stephen Rothwell 已提交
84
#else
85
	OFFSET(THREAD_INFO, task_struct, stack);
86
	DEFINE(THREAD_INFO_GAP, _ALIGN_UP(sizeof(struct thread_info), 16));
87
	OFFSET(KSP_LIMIT, thread_struct, ksp_limit);
S
Stephen Rothwell 已提交
88 89
#endif /* CONFIG_PPC64 */

90
#ifdef CONFIG_LIVEPATCH
91
	OFFSET(TI_livepatch_sp, thread_info, livepatch_sp);
92 93
#endif

94 95 96
#ifdef CONFIG_CC_STACKPROTECTOR
	DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
#endif
97 98
	OFFSET(KSP, thread_struct, ksp);
	OFFSET(PT_REGS, thread_struct, regs);
99
#ifdef CONFIG_BOOKE
100
	OFFSET(THREAD_NORMSAVES, thread_struct, normsave[0]);
101
#endif
102 103 104 105 106
	OFFSET(THREAD_FPEXC_MODE, thread_struct, fpexc_mode);
	OFFSET(THREAD_FPSTATE, thread_struct, fp_state);
	OFFSET(THREAD_FPSAVEAREA, thread_struct, fp_save_area);
	OFFSET(FPSTATE_FPSCR, thread_fp_state, fpscr);
	OFFSET(THREAD_LOAD_FP, thread_struct, load_fp);
107
#ifdef CONFIG_ALTIVEC
108 109 110 111 112 113
	OFFSET(THREAD_VRSTATE, thread_struct, vr_state);
	OFFSET(THREAD_VRSAVEAREA, thread_struct, vr_save_area);
	OFFSET(THREAD_VRSAVE, thread_struct, vrsave);
	OFFSET(THREAD_USED_VR, thread_struct, used_vr);
	OFFSET(VRSTATE_VSCR, thread_vr_state, vscr);
	OFFSET(THREAD_LOAD_VEC, thread_struct, load_vec);
114
#endif /* CONFIG_ALTIVEC */
115
#ifdef CONFIG_VSX
116
	OFFSET(THREAD_USED_VSR, thread_struct, used_vsr);
117
#endif /* CONFIG_VSX */
S
Stephen Rothwell 已提交
118
#ifdef CONFIG_PPC64
119
	OFFSET(KSP_VSID, thread_struct, ksp_vsid);
S
Stephen Rothwell 已提交
120
#else /* CONFIG_PPC64 */
121
	OFFSET(PGDIR, thread_struct, pgdir);
122
#ifdef CONFIG_SPE
123 124 125 126
	OFFSET(THREAD_EVR0, thread_struct, evr[0]);
	OFFSET(THREAD_ACC, thread_struct, acc);
	OFFSET(THREAD_SPEFSCR, thread_struct, spefscr);
	OFFSET(THREAD_USED_SPE, thread_struct, used_spe);
127
#endif /* CONFIG_SPE */
S
Stephen Rothwell 已提交
128
#endif /* CONFIG_PPC64 */
129
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
130
	OFFSET(THREAD_DBCR0, thread_struct, debug.dbcr0);
131
#endif
132
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
133
	OFFSET(THREAD_KVM_SVCPU, thread_struct, kvm_shadow_vcpu);
134
#endif
135
#if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
136
	OFFSET(THREAD_KVM_VCPU, thread_struct, kvm_vcpu);
137
#endif
S
Stephen Rothwell 已提交
138

139
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
140 141 142 143 144 145 146 147 148 149 150
	OFFSET(PACATMSCRATCH, paca_struct, tm_scratch);
	OFFSET(THREAD_TM_TFHAR, thread_struct, tm_tfhar);
	OFFSET(THREAD_TM_TEXASR, thread_struct, tm_texasr);
	OFFSET(THREAD_TM_TFIAR, thread_struct, tm_tfiar);
	OFFSET(THREAD_TM_TAR, thread_struct, tm_tar);
	OFFSET(THREAD_TM_PPR, thread_struct, tm_ppr);
	OFFSET(THREAD_TM_DSCR, thread_struct, tm_dscr);
	OFFSET(PT_CKPT_REGS, thread_struct, ckpt_regs);
	OFFSET(THREAD_CKVRSTATE, thread_struct, ckvr_state);
	OFFSET(THREAD_CKVRSAVE, thread_struct, ckvrsave);
	OFFSET(THREAD_CKFPSTATE, thread_struct, ckfp_state);
151 152 153 154
	/* Local pt_regs on stack for Transactional Memory funcs. */
	DEFINE(TM_FRAME_SIZE, STACK_FRAME_OVERHEAD +
	       sizeof(struct pt_regs) + 16);
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
155

156 157 158 159 160
	OFFSET(TI_FLAGS, thread_info, flags);
	OFFSET(TI_LOCAL_FLAGS, thread_info, local_flags);
	OFFSET(TI_PREEMPT, thread_info, preempt_count);
	OFFSET(TI_TASK, thread_info, task);
	OFFSET(TI_CPU, thread_info, cpu);
S
Stephen Rothwell 已提交
161 162

#ifdef CONFIG_PPC64
163 164 165 166 167 168
	OFFSET(DCACHEL1BLOCKSIZE, ppc64_caches, l1d.block_size);
	OFFSET(DCACHEL1LOGBLOCKSIZE, ppc64_caches, l1d.log_block_size);
	OFFSET(DCACHEL1BLOCKSPERPAGE, ppc64_caches, l1d.blocks_per_page);
	OFFSET(ICACHEL1BLOCKSIZE, ppc64_caches, l1i.block_size);
	OFFSET(ICACHEL1LOGBLOCKSIZE, ppc64_caches, l1i.log_block_size);
	OFFSET(ICACHEL1BLOCKSPERPAGE, ppc64_caches, l1i.blocks_per_page);
S
Stephen Rothwell 已提交
169 170
	/* paca */
	DEFINE(PACA_SIZE, sizeof(struct paca_struct));
171 172 173 174 175 176 177 178 179 180 181 182
	OFFSET(PACAPACAINDEX, paca_struct, paca_index);
	OFFSET(PACAPROCSTART, paca_struct, cpu_start);
	OFFSET(PACAKSAVE, paca_struct, kstack);
	OFFSET(PACACURRENT, paca_struct, __current);
	OFFSET(PACASAVEDMSR, paca_struct, saved_msr);
	OFFSET(PACASTABRR, paca_struct, stab_rr);
	OFFSET(PACAR1, paca_struct, saved_r1);
	OFFSET(PACATOC, paca_struct, kernel_toc);
	OFFSET(PACAKBASE, paca_struct, kernelbase);
	OFFSET(PACAKMSR, paca_struct, kernel_msr);
	OFFSET(PACASOFTIRQEN, paca_struct, soft_enabled);
	OFFSET(PACAIRQHAPPENED, paca_struct, irq_happened);
183
#ifdef CONFIG_PPC_BOOK3S
184
	OFFSET(PACACONTEXTID, paca_struct, mm_ctx_id);
185
#ifdef CONFIG_PPC_MM_SLICES
186 187
	OFFSET(PACALOWSLICESPSIZE, paca_struct, mm_ctx_low_slices_psize);
	OFFSET(PACAHIGHSLICEPSIZE, paca_struct, mm_ctx_high_slices_psize);
188
	DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def));
189
#endif /* CONFIG_PPC_MM_SLICES */
190
#endif
191 192

#ifdef CONFIG_PPC_BOOK3E
193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
	OFFSET(PACAPGD, paca_struct, pgd);
	OFFSET(PACA_KERNELPGD, paca_struct, kernel_pgd);
	OFFSET(PACA_EXGEN, paca_struct, exgen);
	OFFSET(PACA_EXTLB, paca_struct, extlb);
	OFFSET(PACA_EXMC, paca_struct, exmc);
	OFFSET(PACA_EXCRIT, paca_struct, excrit);
	OFFSET(PACA_EXDBG, paca_struct, exdbg);
	OFFSET(PACA_MC_STACK, paca_struct, mc_kstack);
	OFFSET(PACA_CRIT_STACK, paca_struct, crit_kstack);
	OFFSET(PACA_DBG_STACK, paca_struct, dbg_kstack);
	OFFSET(PACA_TCD_PTR, paca_struct, tcd_ptr);

	OFFSET(TCD_ESEL_NEXT, tlb_core_data, esel_next);
	OFFSET(TCD_ESEL_MAX, tlb_core_data, esel_max);
	OFFSET(TCD_ESEL_FIRST, tlb_core_data, esel_first);
208 209
#endif /* CONFIG_PPC_BOOK3E */

210
#ifdef CONFIG_PPC_STD_MMU_64
211 212 213
	OFFSET(PACASLBCACHE, paca_struct, slb_cache);
	OFFSET(PACASLBCACHEPTR, paca_struct, slb_cache_ptr);
	OFFSET(PACAVMALLOCSLLP, paca_struct, vmalloc_sllp);
214
#ifdef CONFIG_PPC_MM_SLICES
215
	OFFSET(MMUPSIZESLLP, mmu_psize_def, sllp);
216
#else
217
	OFFSET(PACACONTEXTSLLP, paca_struct, mm_ctx_sllp);
218
#endif /* CONFIG_PPC_MM_SLICES */
219 220 221 222 223 224 225 226 227 228 229 230
	OFFSET(PACA_EXGEN, paca_struct, exgen);
	OFFSET(PACA_EXMC, paca_struct, exmc);
	OFFSET(PACA_EXSLB, paca_struct, exslb);
	OFFSET(PACALPPACAPTR, paca_struct, lppaca_ptr);
	OFFSET(PACA_SLBSHADOWPTR, paca_struct, slb_shadow_ptr);
	OFFSET(SLBSHADOW_STACKVSID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid);
	OFFSET(SLBSHADOW_STACKESID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid);
	OFFSET(SLBSHADOW_SAVEAREA, slb_shadow, save_area);
	OFFSET(LPPACA_PMCINUSE, lppaca, pmcregs_in_use);
	OFFSET(LPPACA_DTLIDX, lppaca, dtl_idx);
	OFFSET(LPPACA_YIELDCOUNT, lppaca, yield_count);
	OFFSET(PACA_DTL_RIDX, paca_struct, dtl_ridx);
231
#endif /* CONFIG_PPC_STD_MMU_64 */
232
	OFFSET(PACAEMERGSP, paca_struct, emergency_sp);
233
#ifdef CONFIG_PPC_BOOK3S_64
234 235 236 237 238 239 240 241 242 243 244 245 246
	OFFSET(PACAMCEMERGSP, paca_struct, mc_emergency_sp);
	OFFSET(PACA_IN_MCE, paca_struct, in_mce);
#endif
	OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id);
	OFFSET(PACAKEXECSTATE, paca_struct, kexec_state);
	OFFSET(PACA_DSCR_DEFAULT, paca_struct, dscr_default);
	OFFSET(ACCOUNT_STARTTIME, paca_struct, accounting.starttime);
	OFFSET(ACCOUNT_STARTTIME_USER, paca_struct, accounting.starttime_user);
	OFFSET(ACCOUNT_USER_TIME, paca_struct, accounting.user_time);
	OFFSET(ACCOUNT_SYSTEM_TIME, paca_struct, accounting.system_time);
	OFFSET(PACA_TRAP_SAVE, paca_struct, trap_save);
	OFFSET(PACA_NAPSTATELOST, paca_struct, nap_state_lost);
	OFFSET(PACA_SPRG_VDSO, paca_struct, sprg_vdso);
247 248
#else /* CONFIG_PPC64 */
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
249 250 251 252
	OFFSET(ACCOUNT_STARTTIME, thread_info, accounting.starttime);
	OFFSET(ACCOUNT_STARTTIME_USER, thread_info, accounting.starttime_user);
	OFFSET(ACCOUNT_USER_TIME, thread_info, accounting.user_time);
	OFFSET(ACCOUNT_SYSTEM_TIME, thread_info, accounting.system_time);
253
#endif
254
#endif /* CONFIG_PPC64 */
S
Stephen Rothwell 已提交
255 256

	/* RTAS */
257 258
	OFFSET(RTASBASE, rtas_t, base);
	OFFSET(RTASENTRY, rtas_t, entry);
S
Stephen Rothwell 已提交
259

260
	/* Interrupt register frame */
K
Kumar Gala 已提交
261
	DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE);
262
	DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
A
Alexander Graf 已提交
263
#ifdef CONFIG_PPC64
S
Stephen Rothwell 已提交
264 265 266 267
	/* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */
	DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
	DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
#endif /* CONFIG_PPC64 */
268 269 270 271 272 273 274 275 276 277 278 279 280 281
	DEFINE(GPR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[0]));
	DEFINE(GPR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[1]));
	DEFINE(GPR2, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[2]));
	DEFINE(GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[3]));
	DEFINE(GPR4, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[4]));
	DEFINE(GPR5, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[5]));
	DEFINE(GPR6, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[6]));
	DEFINE(GPR7, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[7]));
	DEFINE(GPR8, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[8]));
	DEFINE(GPR9, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[9]));
	DEFINE(GPR10, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[10]));
	DEFINE(GPR11, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[11]));
	DEFINE(GPR12, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[12]));
	DEFINE(GPR13, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[13]));
S
Stephen Rothwell 已提交
282
#ifndef CONFIG_PPC64
283
	DEFINE(GPR14, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[14]));
S
Stephen Rothwell 已提交
284
#endif /* CONFIG_PPC64 */
285 286 287 288 289 290 291 292 293 294 295 296
	/*
	 * Note: these symbols include _ because they overlap with special
	 * register names
	 */
	DEFINE(_NIP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, nip));
	DEFINE(_MSR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, msr));
	DEFINE(_CTR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ctr));
	DEFINE(_LINK, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, link));
	DEFINE(_CCR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ccr));
	DEFINE(_XER, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, xer));
	DEFINE(_DAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
	DEFINE(_DSISR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
S
Stephen Rothwell 已提交
297 298
	DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3));
	DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result));
299
	DEFINE(_TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap));
S
Stephen Rothwell 已提交
300 301 302 303 304 305
#ifndef CONFIG_PPC64
	/*
	 * The PowerPC 400-class & Book-E processors have neither the DAR
	 * nor the DSISR SPRs. Hence, we overload them to hold the similar
	 * DEAR and ESR SPRs for such processors.  For critical interrupts
	 * we use them to hold SRR0 and SRR1.
306 307 308
	 */
	DEFINE(_DEAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
	DEFINE(_ESR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
S
Stephen Rothwell 已提交
309 310 311 312 313 314 315 316
#else /* CONFIG_PPC64 */
	DEFINE(SOFTE, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, softe));

	/* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */
	DEFINE(_SRR0, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs));
	DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8);
#endif /* CONFIG_PPC64 */

317
#if defined(CONFIG_PPC32)
318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
	DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE);
	DEFINE(MAS0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
	/* we overload MMUCR for 44x on MAS0 since they are mutually exclusive */
	DEFINE(MMUCR, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
	DEFINE(MAS1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas1));
	DEFINE(MAS2, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas2));
	DEFINE(MAS3, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas3));
	DEFINE(MAS6, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas6));
	DEFINE(MAS7, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas7));
	DEFINE(_SRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr0));
	DEFINE(_SRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr1));
	DEFINE(_CSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr0));
	DEFINE(_CSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr1));
	DEFINE(_DSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr0));
	DEFINE(_DSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr1));
	DEFINE(SAVED_KSP_LIMIT, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, saved_ksp_limit));
#endif
336
#endif
S
Stephen Rothwell 已提交
337 338

#ifndef CONFIG_PPC64
339
	OFFSET(MM_PGD, mm_struct, pgd);
S
Stephen Rothwell 已提交
340
#endif /* ! CONFIG_PPC64 */
341 342

	/* About the CPU features table */
343 344 345
	OFFSET(CPU_SPEC_FEATURES, cpu_spec, cpu_features);
	OFFSET(CPU_SPEC_SETUP, cpu_spec, cpu_setup);
	OFFSET(CPU_SPEC_RESTORE, cpu_spec, cpu_restore);
346

347 348 349
	OFFSET(pbe_address, pbe, address);
	OFFSET(pbe_orig_address, pbe, orig_address);
	OFFSET(pbe_next, pbe, next);
350

351
#ifndef CONFIG_PPC64
P
Paul Mackerras 已提交
352
	DEFINE(TASK_SIZE, TASK_SIZE);
S
Stephen Rothwell 已提交
353
	DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28);
354
#endif /* ! CONFIG_PPC64 */
355

356
	/* datapage offsets for use by vdso */
357 358 359 360 361 362 363 364 365 366 367 368 369 370 371
	OFFSET(CFG_TB_ORIG_STAMP, vdso_data, tb_orig_stamp);
	OFFSET(CFG_TB_TICKS_PER_SEC, vdso_data, tb_ticks_per_sec);
	OFFSET(CFG_TB_TO_XS, vdso_data, tb_to_xs);
	OFFSET(CFG_TB_UPDATE_COUNT, vdso_data, tb_update_count);
	OFFSET(CFG_TZ_MINUTEWEST, vdso_data, tz_minuteswest);
	OFFSET(CFG_TZ_DSTTIME, vdso_data, tz_dsttime);
	OFFSET(CFG_SYSCALL_MAP32, vdso_data, syscall_map_32);
	OFFSET(WTOM_CLOCK_SEC, vdso_data, wtom_clock_sec);
	OFFSET(WTOM_CLOCK_NSEC, vdso_data, wtom_clock_nsec);
	OFFSET(STAMP_XTIME, vdso_data, stamp_xtime);
	OFFSET(STAMP_SEC_FRAC, vdso_data, stamp_sec_fraction);
	OFFSET(CFG_ICACHE_BLOCKSZ, vdso_data, icache_block_size);
	OFFSET(CFG_DCACHE_BLOCKSZ, vdso_data, dcache_block_size);
	OFFSET(CFG_ICACHE_LOGBLOCKSZ, vdso_data, icache_log_block_size);
	OFFSET(CFG_DCACHE_LOGBLOCKSZ, vdso_data, dcache_log_block_size);
372
#ifdef CONFIG_PPC64
373 374 375 376 377 378 379 380 381
	OFFSET(CFG_SYSCALL_MAP64, vdso_data, syscall_map_64);
	OFFSET(TVAL64_TV_SEC, timeval, tv_sec);
	OFFSET(TVAL64_TV_USEC, timeval, tv_usec);
	OFFSET(TVAL32_TV_SEC, compat_timeval, tv_sec);
	OFFSET(TVAL32_TV_USEC, compat_timeval, tv_usec);
	OFFSET(TSPC64_TV_SEC, timespec, tv_sec);
	OFFSET(TSPC64_TV_NSEC, timespec, tv_nsec);
	OFFSET(TSPC32_TV_SEC, compat_timespec, tv_sec);
	OFFSET(TSPC32_TV_NSEC, compat_timespec, tv_nsec);
382
#else
383 384 385 386
	OFFSET(TVAL32_TV_SEC, timeval, tv_sec);
	OFFSET(TVAL32_TV_USEC, timeval, tv_usec);
	OFFSET(TSPC32_TV_SEC, timespec, tv_sec);
	OFFSET(TSPC32_TV_NSEC, timespec, tv_nsec);
387 388
#endif
	/* timeval/timezone offsets for use by vdso */
389 390
	OFFSET(TZONE_TZ_MINWEST, timezone, tz_minuteswest);
	OFFSET(TZONE_TZ_DSTTIME, timezone, tz_dsttime);
391 392 393 394 395

	/* Other bits used by the vdso */
	DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
	DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
	DEFINE(NSEC_PER_SEC, NSEC_PER_SEC);
396
	DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
397

398 399 400
#ifdef CONFIG_BUG
	DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry));
#endif
401

402 403 404
#ifdef MAX_PGD_TABLE_SIZE
	DEFINE(PGD_TABLE_SIZE, MAX_PGD_TABLE_SIZE);
#else
405
	DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE);
406
#endif
407
	DEFINE(PTE_SIZE, sizeof(pte_t));
408

409
#ifdef CONFIG_KVM
410 411 412 413 414 415
	OFFSET(VCPU_HOST_STACK, kvm_vcpu, arch.host_stack);
	OFFSET(VCPU_HOST_PID, kvm_vcpu, arch.host_pid);
	OFFSET(VCPU_GUEST_PID, kvm_vcpu, arch.pid);
	OFFSET(VCPU_GPRS, kvm_vcpu, arch.gpr);
	OFFSET(VCPU_VRSAVE, kvm_vcpu, arch.vrsave);
	OFFSET(VCPU_FPRS, kvm_vcpu, arch.fp.fpr);
416
#ifdef CONFIG_ALTIVEC
417
	OFFSET(VCPU_VRS, kvm_vcpu, arch.vr.vr);
418
#endif
419 420 421
	OFFSET(VCPU_XER, kvm_vcpu, arch.xer);
	OFFSET(VCPU_CTR, kvm_vcpu, arch.ctr);
	OFFSET(VCPU_LR, kvm_vcpu, arch.lr);
422
#ifdef CONFIG_PPC_BOOK3S
423
	OFFSET(VCPU_TAR, kvm_vcpu, arch.tar);
424
#endif
425 426
	OFFSET(VCPU_CR, kvm_vcpu, arch.cr);
	OFFSET(VCPU_PC, kvm_vcpu, arch.pc);
427
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
428 429 430 431 432 433 434
	OFFSET(VCPU_MSR, kvm_vcpu, arch.shregs.msr);
	OFFSET(VCPU_SRR0, kvm_vcpu, arch.shregs.srr0);
	OFFSET(VCPU_SRR1, kvm_vcpu, arch.shregs.srr1);
	OFFSET(VCPU_SPRG0, kvm_vcpu, arch.shregs.sprg0);
	OFFSET(VCPU_SPRG1, kvm_vcpu, arch.shregs.sprg1);
	OFFSET(VCPU_SPRG2, kvm_vcpu, arch.shregs.sprg2);
	OFFSET(VCPU_SPRG3, kvm_vcpu, arch.shregs.sprg3);
435 436
#endif
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458
	OFFSET(VCPU_TB_RMENTRY, kvm_vcpu, arch.rm_entry);
	OFFSET(VCPU_TB_RMINTR, kvm_vcpu, arch.rm_intr);
	OFFSET(VCPU_TB_RMEXIT, kvm_vcpu, arch.rm_exit);
	OFFSET(VCPU_TB_GUEST, kvm_vcpu, arch.guest_time);
	OFFSET(VCPU_TB_CEDE, kvm_vcpu, arch.cede_time);
	OFFSET(VCPU_CUR_ACTIVITY, kvm_vcpu, arch.cur_activity);
	OFFSET(VCPU_ACTIVITY_START, kvm_vcpu, arch.cur_tb_start);
	OFFSET(TAS_SEQCOUNT, kvmhv_tb_accumulator, seqcount);
	OFFSET(TAS_TOTAL, kvmhv_tb_accumulator, tb_total);
	OFFSET(TAS_MIN, kvmhv_tb_accumulator, tb_min);
	OFFSET(TAS_MAX, kvmhv_tb_accumulator, tb_max);
#endif
	OFFSET(VCPU_SHARED_SPRG3, kvm_vcpu_arch_shared, sprg3);
	OFFSET(VCPU_SHARED_SPRG4, kvm_vcpu_arch_shared, sprg4);
	OFFSET(VCPU_SHARED_SPRG5, kvm_vcpu_arch_shared, sprg5);
	OFFSET(VCPU_SHARED_SPRG6, kvm_vcpu_arch_shared, sprg6);
	OFFSET(VCPU_SHARED_SPRG7, kvm_vcpu_arch_shared, sprg7);
	OFFSET(VCPU_SHADOW_PID, kvm_vcpu, arch.shadow_pid);
	OFFSET(VCPU_SHADOW_PID1, kvm_vcpu, arch.shadow_pid1);
	OFFSET(VCPU_SHARED, kvm_vcpu, arch.shared);
	OFFSET(VCPU_SHARED_MSR, kvm_vcpu_arch_shared, msr);
	OFFSET(VCPU_SHADOW_MSR, kvm_vcpu, arch.shadow_msr);
459
#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
460
	OFFSET(VCPU_SHAREDBE, kvm_vcpu, arch.shared_big_endian);
461
#endif
462

463 464 465 466 467 468
	OFFSET(VCPU_SHARED_MAS0, kvm_vcpu_arch_shared, mas0);
	OFFSET(VCPU_SHARED_MAS1, kvm_vcpu_arch_shared, mas1);
	OFFSET(VCPU_SHARED_MAS2, kvm_vcpu_arch_shared, mas2);
	OFFSET(VCPU_SHARED_MAS7_3, kvm_vcpu_arch_shared, mas7_3);
	OFFSET(VCPU_SHARED_MAS4, kvm_vcpu_arch_shared, mas4);
	OFFSET(VCPU_SHARED_MAS6, kvm_vcpu_arch_shared, mas6);
469

470 471
	OFFSET(VCPU_KVM, kvm_vcpu, kvm);
	OFFSET(KVM_LPID, kvm, arch.lpid);
472

473
	/* book3s */
474
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
	OFFSET(KVM_TLB_SETS, kvm, arch.tlb_sets);
	OFFSET(KVM_SDR1, kvm, arch.sdr1);
	OFFSET(KVM_HOST_LPID, kvm, arch.host_lpid);
	OFFSET(KVM_HOST_LPCR, kvm, arch.host_lpcr);
	OFFSET(KVM_HOST_SDR1, kvm, arch.host_sdr1);
	OFFSET(KVM_NEED_FLUSH, kvm, arch.need_tlb_flush.bits);
	OFFSET(KVM_ENABLED_HCALLS, kvm, arch.enabled_hcalls);
	OFFSET(KVM_VRMA_SLB_V, kvm, arch.vrma_slb_v);
	OFFSET(KVM_RADIX, kvm, arch.radix);
	OFFSET(VCPU_DSISR, kvm_vcpu, arch.shregs.dsisr);
	OFFSET(VCPU_DAR, kvm_vcpu, arch.shregs.dar);
	OFFSET(VCPU_VPA, kvm_vcpu, arch.vpa.pinned_addr);
	OFFSET(VCPU_VPA_DIRTY, kvm_vcpu, arch.vpa.dirty);
	OFFSET(VCPU_HEIR, kvm_vcpu, arch.emul_inst);
	OFFSET(VCPU_CPU, kvm_vcpu, cpu);
	OFFSET(VCPU_THREAD_CPU, kvm_vcpu, arch.thread_cpu);
491
#endif
492
#ifdef CONFIG_PPC_BOOK3S
493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551
	OFFSET(VCPU_PURR, kvm_vcpu, arch.purr);
	OFFSET(VCPU_SPURR, kvm_vcpu, arch.spurr);
	OFFSET(VCPU_IC, kvm_vcpu, arch.ic);
	OFFSET(VCPU_DSCR, kvm_vcpu, arch.dscr);
	OFFSET(VCPU_AMR, kvm_vcpu, arch.amr);
	OFFSET(VCPU_UAMOR, kvm_vcpu, arch.uamor);
	OFFSET(VCPU_IAMR, kvm_vcpu, arch.iamr);
	OFFSET(VCPU_CTRL, kvm_vcpu, arch.ctrl);
	OFFSET(VCPU_DABR, kvm_vcpu, arch.dabr);
	OFFSET(VCPU_DABRX, kvm_vcpu, arch.dabrx);
	OFFSET(VCPU_DAWR, kvm_vcpu, arch.dawr);
	OFFSET(VCPU_DAWRX, kvm_vcpu, arch.dawrx);
	OFFSET(VCPU_CIABR, kvm_vcpu, arch.ciabr);
	OFFSET(VCPU_HFLAGS, kvm_vcpu, arch.hflags);
	OFFSET(VCPU_DEC, kvm_vcpu, arch.dec);
	OFFSET(VCPU_DEC_EXPIRES, kvm_vcpu, arch.dec_expires);
	OFFSET(VCPU_PENDING_EXC, kvm_vcpu, arch.pending_exceptions);
	OFFSET(VCPU_CEDED, kvm_vcpu, arch.ceded);
	OFFSET(VCPU_PRODDED, kvm_vcpu, arch.prodded);
	OFFSET(VCPU_MMCR, kvm_vcpu, arch.mmcr);
	OFFSET(VCPU_PMC, kvm_vcpu, arch.pmc);
	OFFSET(VCPU_SPMC, kvm_vcpu, arch.spmc);
	OFFSET(VCPU_SIAR, kvm_vcpu, arch.siar);
	OFFSET(VCPU_SDAR, kvm_vcpu, arch.sdar);
	OFFSET(VCPU_SIER, kvm_vcpu, arch.sier);
	OFFSET(VCPU_SLB, kvm_vcpu, arch.slb);
	OFFSET(VCPU_SLB_MAX, kvm_vcpu, arch.slb_max);
	OFFSET(VCPU_SLB_NR, kvm_vcpu, arch.slb_nr);
	OFFSET(VCPU_FAULT_DSISR, kvm_vcpu, arch.fault_dsisr);
	OFFSET(VCPU_FAULT_DAR, kvm_vcpu, arch.fault_dar);
	OFFSET(VCPU_FAULT_GPA, kvm_vcpu, arch.fault_gpa);
	OFFSET(VCPU_INTR_MSR, kvm_vcpu, arch.intr_msr);
	OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst);
	OFFSET(VCPU_TRAP, kvm_vcpu, arch.trap);
	OFFSET(VCPU_CFAR, kvm_vcpu, arch.cfar);
	OFFSET(VCPU_PPR, kvm_vcpu, arch.ppr);
	OFFSET(VCPU_FSCR, kvm_vcpu, arch.fscr);
	OFFSET(VCPU_PSPB, kvm_vcpu, arch.pspb);
	OFFSET(VCPU_EBBHR, kvm_vcpu, arch.ebbhr);
	OFFSET(VCPU_EBBRR, kvm_vcpu, arch.ebbrr);
	OFFSET(VCPU_BESCR, kvm_vcpu, arch.bescr);
	OFFSET(VCPU_CSIGR, kvm_vcpu, arch.csigr);
	OFFSET(VCPU_TACR, kvm_vcpu, arch.tacr);
	OFFSET(VCPU_TCSCR, kvm_vcpu, arch.tcscr);
	OFFSET(VCPU_ACOP, kvm_vcpu, arch.acop);
	OFFSET(VCPU_WORT, kvm_vcpu, arch.wort);
	OFFSET(VCPU_TID, kvm_vcpu, arch.tid);
	OFFSET(VCPU_PSSCR, kvm_vcpu, arch.psscr);
	OFFSET(VCORE_ENTRY_EXIT, kvmppc_vcore, entry_exit_map);
	OFFSET(VCORE_IN_GUEST, kvmppc_vcore, in_guest);
	OFFSET(VCORE_NAPPING_THREADS, kvmppc_vcore, napping_threads);
	OFFSET(VCORE_KVM, kvmppc_vcore, kvm);
	OFFSET(VCORE_TB_OFFSET, kvmppc_vcore, tb_offset);
	OFFSET(VCORE_LPCR, kvmppc_vcore, lpcr);
	OFFSET(VCORE_PCR, kvmppc_vcore, pcr);
	OFFSET(VCORE_DPDES, kvmppc_vcore, dpdes);
	OFFSET(VCORE_VTB, kvmppc_vcore, vtb);
	OFFSET(VCPU_SLB_E, kvmppc_slb, orige);
	OFFSET(VCPU_SLB_V, kvmppc_slb, origv);
552
	DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb));
553
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
	OFFSET(VCPU_TFHAR, kvm_vcpu, arch.tfhar);
	OFFSET(VCPU_TFIAR, kvm_vcpu, arch.tfiar);
	OFFSET(VCPU_TEXASR, kvm_vcpu, arch.texasr);
	OFFSET(VCPU_GPR_TM, kvm_vcpu, arch.gpr_tm);
	OFFSET(VCPU_FPRS_TM, kvm_vcpu, arch.fp_tm.fpr);
	OFFSET(VCPU_VRS_TM, kvm_vcpu, arch.vr_tm.vr);
	OFFSET(VCPU_VRSAVE_TM, kvm_vcpu, arch.vrsave_tm);
	OFFSET(VCPU_CR_TM, kvm_vcpu, arch.cr_tm);
	OFFSET(VCPU_XER_TM, kvm_vcpu, arch.xer_tm);
	OFFSET(VCPU_LR_TM, kvm_vcpu, arch.lr_tm);
	OFFSET(VCPU_CTR_TM, kvm_vcpu, arch.ctr_tm);
	OFFSET(VCPU_AMR_TM, kvm_vcpu, arch.amr_tm);
	OFFSET(VCPU_PPR_TM, kvm_vcpu, arch.ppr_tm);
	OFFSET(VCPU_DSCR_TM, kvm_vcpu, arch.dscr_tm);
	OFFSET(VCPU_TAR_TM, kvm_vcpu, arch.tar_tm);
569
#endif
570 571

#ifdef CONFIG_PPC_BOOK3S_64
572
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
573
	OFFSET(PACA_SVCPU, paca_struct, shadow_vcpu);
574
# define SVCPU_FIELD(x, f)	DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f))
575 576 577
#else
# define SVCPU_FIELD(x, f)
#endif
578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606
# define HSTATE_FIELD(x, f)	DEFINE(x, offsetof(struct paca_struct, kvm_hstate.f))
#else	/* 32-bit */
# define SVCPU_FIELD(x, f)	DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, f))
# define HSTATE_FIELD(x, f)	DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, hstate.f))
#endif

	SVCPU_FIELD(SVCPU_CR, cr);
	SVCPU_FIELD(SVCPU_XER, xer);
	SVCPU_FIELD(SVCPU_CTR, ctr);
	SVCPU_FIELD(SVCPU_LR, lr);
	SVCPU_FIELD(SVCPU_PC, pc);
	SVCPU_FIELD(SVCPU_R0, gpr[0]);
	SVCPU_FIELD(SVCPU_R1, gpr[1]);
	SVCPU_FIELD(SVCPU_R2, gpr[2]);
	SVCPU_FIELD(SVCPU_R3, gpr[3]);
	SVCPU_FIELD(SVCPU_R4, gpr[4]);
	SVCPU_FIELD(SVCPU_R5, gpr[5]);
	SVCPU_FIELD(SVCPU_R6, gpr[6]);
	SVCPU_FIELD(SVCPU_R7, gpr[7]);
	SVCPU_FIELD(SVCPU_R8, gpr[8]);
	SVCPU_FIELD(SVCPU_R9, gpr[9]);
	SVCPU_FIELD(SVCPU_R10, gpr[10]);
	SVCPU_FIELD(SVCPU_R11, gpr[11]);
	SVCPU_FIELD(SVCPU_R12, gpr[12]);
	SVCPU_FIELD(SVCPU_R13, gpr[13]);
	SVCPU_FIELD(SVCPU_FAULT_DSISR, fault_dsisr);
	SVCPU_FIELD(SVCPU_FAULT_DAR, fault_dar);
	SVCPU_FIELD(SVCPU_LAST_INST, last_inst);
	SVCPU_FIELD(SVCPU_SHADOW_SRR1, shadow_srr1);
607
#ifdef CONFIG_PPC_BOOK3S_32
608
	SVCPU_FIELD(SVCPU_SR, sr);
609
#endif
610 611 612
#ifdef CONFIG_PPC64
	SVCPU_FIELD(SVCPU_SLB, slb);
	SVCPU_FIELD(SVCPU_SLB_MAX, slb_max);
613
	SVCPU_FIELD(SVCPU_SHADOW_FSCR, shadow_fscr);
614 615 616 617
#endif

	HSTATE_FIELD(HSTATE_HOST_R1, host_r1);
	HSTATE_FIELD(HSTATE_HOST_R2, host_r2);
618
	HSTATE_FIELD(HSTATE_HOST_MSR, host_msr);
619 620 621
	HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler);
	HSTATE_FIELD(HSTATE_SCRATCH0, scratch0);
	HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
622
	HSTATE_FIELD(HSTATE_SCRATCH2, scratch2);
623
	HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);
624
	HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5);
625
	HSTATE_FIELD(HSTATE_NAPPING, napping);
626

627
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
628 629
	HSTATE_FIELD(HSTATE_HWTHREAD_REQ, hwthread_req);
	HSTATE_FIELD(HSTATE_HWTHREAD_STATE, hwthread_state);
630
	HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
631 632
	HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore);
	HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
633 634
	HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr);
	HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi);
635
	HSTATE_FIELD(HSTATE_PTID, ptid);
636 637 638 639 640 641 642 643 644 645 646 647 648
	HSTATE_FIELD(HSTATE_MMCR0, host_mmcr[0]);
	HSTATE_FIELD(HSTATE_MMCR1, host_mmcr[1]);
	HSTATE_FIELD(HSTATE_MMCRA, host_mmcr[2]);
	HSTATE_FIELD(HSTATE_SIAR, host_mmcr[3]);
	HSTATE_FIELD(HSTATE_SDAR, host_mmcr[4]);
	HSTATE_FIELD(HSTATE_MMCR2, host_mmcr[5]);
	HSTATE_FIELD(HSTATE_SIER, host_mmcr[6]);
	HSTATE_FIELD(HSTATE_PMC1, host_pmc[0]);
	HSTATE_FIELD(HSTATE_PMC2, host_pmc[1]);
	HSTATE_FIELD(HSTATE_PMC3, host_pmc[2]);
	HSTATE_FIELD(HSTATE_PMC4, host_pmc[3]);
	HSTATE_FIELD(HSTATE_PMC5, host_pmc[4]);
	HSTATE_FIELD(HSTATE_PMC6, host_pmc[5]);
649 650 651 652 653
	HSTATE_FIELD(HSTATE_PURR, host_purr);
	HSTATE_FIELD(HSTATE_SPURR, host_spurr);
	HSTATE_FIELD(HSTATE_DSCR, host_dscr);
	HSTATE_FIELD(HSTATE_DABR, dabr);
	HSTATE_FIELD(HSTATE_DECEXP, dec_expires);
654
	HSTATE_FIELD(HSTATE_SPLIT_MODE, kvm_split_mode);
655
	DEFINE(IPI_PRIORITY, IPI_PRIORITY);
656 657 658 659 660
	OFFSET(KVM_SPLIT_RPR, kvm_split_mode, rpr);
	OFFSET(KVM_SPLIT_PMMAR, kvm_split_mode, pmmar);
	OFFSET(KVM_SPLIT_LDBAR, kvm_split_mode, ldbar);
	OFFSET(KVM_SPLIT_DO_NAP, kvm_split_mode, do_nap);
	OFFSET(KVM_SPLIT_NAPPED, kvm_split_mode, napped);
661
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
662

663 664
#ifdef CONFIG_PPC_BOOK3S_64
	HSTATE_FIELD(HSTATE_CFAR, cfar);
665
	HSTATE_FIELD(HSTATE_PPR, ppr);
666
	HSTATE_FIELD(HSTATE_HOST_FSCR, host_fscr);
667 668
#endif /* CONFIG_PPC_BOOK3S_64 */

669
#else /* CONFIG_PPC_BOOK3S */
670 671 672 673 674 675 676 677 678 679
	OFFSET(VCPU_CR, kvm_vcpu, arch.cr);
	OFFSET(VCPU_XER, kvm_vcpu, arch.xer);
	OFFSET(VCPU_LR, kvm_vcpu, arch.lr);
	OFFSET(VCPU_CTR, kvm_vcpu, arch.ctr);
	OFFSET(VCPU_PC, kvm_vcpu, arch.pc);
	OFFSET(VCPU_SPRG9, kvm_vcpu, arch.sprg9);
	OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst);
	OFFSET(VCPU_FAULT_DEAR, kvm_vcpu, arch.fault_dear);
	OFFSET(VCPU_FAULT_ESR, kvm_vcpu, arch.fault_esr);
	OFFSET(VCPU_CRIT_SAVE, kvm_vcpu, arch.crit_save);
680
#endif /* CONFIG_PPC_BOOK3S */
681
#endif /* CONFIG_KVM */
682 683

#ifdef CONFIG_KVM_GUEST
684 685 686 687 688 689 690
	OFFSET(KVM_MAGIC_SCRATCH1, kvm_vcpu_arch_shared, scratch1);
	OFFSET(KVM_MAGIC_SCRATCH2, kvm_vcpu_arch_shared, scratch2);
	OFFSET(KVM_MAGIC_SCRATCH3, kvm_vcpu_arch_shared, scratch3);
	OFFSET(KVM_MAGIC_INT, kvm_vcpu_arch_shared, int_pending);
	OFFSET(KVM_MAGIC_MSR, kvm_vcpu_arch_shared, msr);
	OFFSET(KVM_MAGIC_CRITICAL, kvm_vcpu_arch_shared, critical);
	OFFSET(KVM_MAGIC_SR, kvm_vcpu_arch_shared, sr);
691 692
#endif

693 694 695 696
#ifdef CONFIG_44x
	DEFINE(PGD_T_LOG2, PGD_T_LOG2);
	DEFINE(PTE_T_LOG2, PTE_T_LOG2);
#endif
697
#ifdef CONFIG_PPC_FSL_BOOK3E
698
	DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam));
699 700 701 702 703
	OFFSET(TLBCAM_MAS0, tlbcam, MAS0);
	OFFSET(TLBCAM_MAS1, tlbcam, MAS1);
	OFFSET(TLBCAM_MAS2, tlbcam, MAS2);
	OFFSET(TLBCAM_MAS3, tlbcam, MAS3);
	OFFSET(TLBCAM_MAS7, tlbcam, MAS7);
704
#endif
705

706
#if defined(CONFIG_KVM) && defined(CONFIG_SPE)
707 708 709 710
	OFFSET(VCPU_EVR, kvm_vcpu, arch.evr[0]);
	OFFSET(VCPU_ACC, kvm_vcpu, arch.acc);
	OFFSET(VCPU_SPEFSCR, kvm_vcpu, arch.spefscr);
	OFFSET(VCPU_HOST_SPEFSCR, kvm_vcpu, arch.host_spefscr);
711 712
#endif

713
#ifdef CONFIG_KVM_BOOKE_HV
714 715
	OFFSET(VCPU_HOST_MAS4, kvm_vcpu, arch.host_mas4);
	OFFSET(VCPU_HOST_MAS6, kvm_vcpu, arch.host_mas6);
716 717
#endif

718
#ifdef CONFIG_KVM_EXIT_TIMING
719 720 721 722
	OFFSET(VCPU_TIMING_EXIT_TBU, kvm_vcpu, arch.timing_exit.tv32.tbu);
	OFFSET(VCPU_TIMING_EXIT_TBL, kvm_vcpu, arch.timing_exit.tv32.tbl);
	OFFSET(VCPU_TIMING_LAST_ENTER_TBU, kvm_vcpu, arch.timing_last_enter.tv32.tbu);
	OFFSET(VCPU_TIMING_LAST_ENTER_TBL, kvm_vcpu, arch.timing_last_enter.tv32.tbl);
723 724
#endif

725
#ifdef CONFIG_PPC_POWERNV
726 727 728 729
	OFFSET(PACA_CORE_IDLE_STATE_PTR, paca_struct, core_idle_state_ptr);
	OFFSET(PACA_THREAD_IDLE_STATE, paca_struct, thread_idle_state);
	OFFSET(PACA_THREAD_MASK, paca_struct, thread_mask);
	OFFSET(PACA_SUBCORE_SIBLING_MASK, paca_struct, subcore_sibling_mask);
730 731
#endif

732 733
	DEFINE(PPC_DBELL_SERVER, PPC_DBELL_SERVER);

734
#ifdef CONFIG_PPC_8xx
735
	DEFINE(VIRT_IMMR_BASE, (u64)__fix_to_virt(FIX_IMMR_BASE));
736 737
#endif

738 739
	return 0;
}