head_64.S 7.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
/*
 * Copyright 2011 Tilera Corporation. All Rights Reserved.
 *
 *   This program is free software; you can redistribute it and/or
 *   modify it under the terms of the GNU General Public License
 *   as published by the Free Software Foundation, version 2.
 *
 *   This program is distributed in the hope that it will be useful, but
 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 *   NON INFRINGEMENT.  See the GNU General Public License for
 *   more details.
 *
 * TILE startup code.
 */

#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/thread_info.h>
#include <asm/processor.h>
#include <asm/asm-offsets.h>
#include <hv/hypervisor.h>
#include <arch/chip.h>
#include <arch/spr_def.h>

C
Chris Metcalf 已提交
28 29 30 31 32 33 34 35 36
/* Extract two 32-bit bit values that were read into one register. */
#ifdef __BIG_ENDIAN__
#define GET_FIRST_INT(rd, rs) shrsi rd, rs, 32
#define GET_SECOND_INT(rd, rs) addxi rd, rs, 0
#else
#define GET_FIRST_INT(rd, rs) addxi rd, rs, 0
#define GET_SECOND_INT(rd, rs) shrsi rd, rs, 32
#endif

37 38 39 40 41 42 43 44 45
/*
 * This module contains the entry code for kernel images. It performs the
 * minimal setup needed to call the generic C routines.
 */

	__HEAD
ENTRY(_start)
	/* Notify the hypervisor of what version of the API we want */
	{
46 47 48 49 50 51
#if KERNEL_PL == 1 && _HV_VERSION == 13
	  /* Support older hypervisors by asking for API version 12. */
	  movei r0, _HV_VERSION_OLD_HV_INIT
#else
	  movei r0, _HV_VERSION
#endif
52 53 54
	  movei r1, TILE_CHIP
	}
	{
55 56
	  movei r2, TILE_CHIP_REV
	  movei r3, KERNEL_PL
57
	}
58
	jal _hv_init
59 60 61
	/* Get a reasonable default ASID in r0 */
	{
	  move r0, zero
62
	  jal _hv_inquire_asid
63 64 65 66 67 68 69 70 71 72
	}

	/*
	 * Install the default page table.  The relocation required to
	 * statically define the table is a bit too complex, so we have
	 * to plug in the pointer from the L0 to the L1 table by hand.
	 * We only do this on the first cpu to boot, though, since the
	 * other CPUs should see a properly-constructed page table.
	 */
	{
C
Chris Metcalf 已提交
73
	  GET_FIRST_INT(r2, r0)    /* ASID for hv_install_context */
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
	  moveli r4, hw1_last(swapper_pgprot - PAGE_OFFSET)
	}
	{
	  shl16insli r4, r4, hw0(swapper_pgprot - PAGE_OFFSET)
	}
	{
	  ld r1, r4               /* access_pte for hv_install_context */
	}
	{
	  moveli r0, hw1_last(.Lsv_data_pmd - PAGE_OFFSET)
	  moveli r6, hw1_last(temp_data_pmd - PAGE_OFFSET)
	}
	{
	  /* After initializing swapper_pgprot, HV_PTE_GLOBAL is set. */
	  bfextu r7, r1, HV_PTE_INDEX_GLOBAL, HV_PTE_INDEX_GLOBAL
89
	  finv r4
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
	}
	bnez r7, .Lno_write
	{
	  shl16insli r0, r0, hw0(.Lsv_data_pmd - PAGE_OFFSET)
	  shl16insli r6, r6, hw0(temp_data_pmd - PAGE_OFFSET)
	}
	{
	  /* Cut off the low bits of the PT address. */
	  shrui r6, r6, HV_LOG2_PAGE_TABLE_ALIGN
	  /* Start with our access pte. */
	  move r5, r1
	}
	{
	  /* Stuff the address into the page table pointer slot of the PTE. */
	  bfins r5, r6, HV_PTE_INDEX_PTFN, \
			HV_PTE_INDEX_PTFN + HV_PTE_PTFN_BITS - 1
	}
	{
	  /* Store the L0 data PTE. */
	  st r0, r5
	  addli r6, r6, (temp_code_pmd - temp_data_pmd) >> \
			HV_LOG2_PAGE_TABLE_ALIGN
	}
	{
	  addli r0, r0, .Lsv_code_pmd - .Lsv_data_pmd
	  bfins r5, r6, HV_PTE_INDEX_PTFN, \
			HV_PTE_INDEX_PTFN + HV_PTE_PTFN_BITS - 1
	}
	/* Store the L0 code PTE. */
	st r0, r5

.Lno_write:
	moveli lr, hw2_last(1f)
	{
	  shl16insli lr, lr, hw1(1f)
	  moveli r0, hw1_last(swapper_pg_dir - PAGE_OFFSET)
	}
	{
	  shl16insli lr, lr, hw0(1f)
	  shl16insli r0, r0, hw0(swapper_pg_dir - PAGE_OFFSET)
	}
	{
132
	  moveli r3, CTX_PAGE_FLAG
133
	  j _hv_install_context
134 135 136 137
	}
1:

	/* Install the interrupt base. */
138 139 140
	moveli r0, hw2_last(intrpt_start)
	shl16insli r0, r0, hw1(intrpt_start)
	shl16insli r0, r0, hw0(intrpt_start)
141 142
	mtspr SPR_INTERRUPT_VECTOR_BASE_K, r0

C
Chris Metcalf 已提交
143
	/* Get our processor number and save it away in SAVE_K_0. */
144
	jal _hv_inquire_topology
145
	{
C
Chris Metcalf 已提交
146 147
	  GET_FIRST_INT(r5, r1)   /* r5 = width */
	  GET_SECOND_INT(r4, r0)  /* r4 = y */
148 149
	}
	{
C
Chris Metcalf 已提交
150
	  GET_FIRST_INT(r6, r0)   /* r6 = x */
151 152 153 154 155 156 157 158 159 160
	  mul_lu_lu r4, r4, r5
	}
	{
	  add r4, r4, r6          /* r4 == cpu == y*width + x */
	}

#ifdef CONFIG_SMP
	/*
	 * Load up our per-cpu offset.  When the first (master) tile
	 * boots, this value is still zero, so we will load boot_pc
161
	 * with start_kernel, and boot_sp with at the top of init_stack.
162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
	 * The master tile initializes the per-cpu offset array, so that
	 * when subsequent (secondary) tiles boot, they will instead load
	 * from their per-cpu versions of boot_sp and boot_pc.
	 */
	moveli r5, hw2_last(__per_cpu_offset)
	shl16insli r5, r5, hw1(__per_cpu_offset)
	shl16insli r5, r5, hw0(__per_cpu_offset)
	shl3add r5, r4, r5
	ld r5, r5
	bnez r5, 1f

	/*
	 * Save the width and height to the smp_topology variable
	 * for later use.
	 */
	moveli r0, hw2_last(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET)
	shl16insli r0, r0, hw1(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET)
	shl16insli r0, r0, hw0(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET)
	st r0, r1
1:
#else
	move r5, zero
#endif

	/* Load and go with the correct pc and sp. */
	{
	  moveli r1, hw2_last(boot_sp)
	  moveli r0, hw2_last(boot_pc)
	}
	{
	  shl16insli r1, r1, hw1(boot_sp)
	  shl16insli r0, r0, hw1(boot_pc)
	}
	{
	  shl16insli r1, r1, hw0(boot_sp)
	  shl16insli r0, r0, hw0(boot_pc)
	}
	{
	  add r1, r1, r5
	  add r0, r0, r5
	}
	ld r0, r0
	ld sp, r1
205 206
	shli r4, r4, CPU_SHIFT
	bfins r4, sp, 0, CPU_SHIFT-1
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
	mtspr SPR_SYSTEM_SAVE_K_0, r4  /* save ksp0 + cpu */
	{
	  move lr, zero   /* stop backtraces in the called function */
	  jr r0
	}
	ENDPROC(_start)

__PAGE_ALIGNED_BSS
	.align PAGE_SIZE
ENTRY(empty_zero_page)
	.fill PAGE_SIZE,1,0
	END(empty_zero_page)

	.macro PTE cpa, bits1
	.quad HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED |\
	      HV_PTE_GLOBAL | (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE) |\
223
	      (\bits1) | (HV_CPA_TO_PTFN(\cpa) << HV_PTE_INDEX_PTFN)
224 225 226 227 228
	.endm

__PAGE_ALIGNED_DATA
	.align PAGE_SIZE
ENTRY(swapper_pg_dir)
229
	.org swapper_pg_dir + PGD_INDEX(PAGE_OFFSET) * HV_PTE_SIZE
230 231
.Lsv_data_pmd:
	.quad 0  /* PTE temp_data_pmd - PAGE_OFFSET, 0 */
232
	.org swapper_pg_dir + PGD_INDEX(MEM_SV_START) * HV_PTE_SIZE
233 234
.Lsv_code_pmd:
	.quad 0  /* PTE temp_code_pmd - PAGE_OFFSET, 0 */
235
	.org swapper_pg_dir + SIZEOF_PGD
236 237 238 239 240 241 242 243 244 245
	END(swapper_pg_dir)

	.align HV_PAGE_TABLE_ALIGN
ENTRY(temp_data_pmd)
	/*
	 * We fill the PAGE_OFFSET pmd with huge pages with
	 * VA = PA + PAGE_OFFSET.  We remap things with more precise access
	 * permissions later.
	 */
	.set addr, 0
246
	.rept PTRS_PER_PMD
247
	PTE addr, HV_PTE_READABLE | HV_PTE_WRITABLE
248
	.set addr, addr + HPAGE_SIZE
249
	.endr
250
	.org temp_data_pmd + SIZEOF_PMD
251 252 253 254 255 256 257 258 259 260
	END(temp_data_pmd)

	.align HV_PAGE_TABLE_ALIGN
ENTRY(temp_code_pmd)
	/*
	 * We fill the MEM_SV_START pmd with huge pages with
	 * VA = PA + PAGE_OFFSET.  We remap things with more precise access
	 * permissions later.
	 */
	.set addr, 0
261
	.rept PTRS_PER_PMD
262
	PTE addr, HV_PTE_READABLE | HV_PTE_EXECUTABLE
263
	.set addr, addr + HPAGE_SIZE
264
	.endr
265
	.org temp_code_pmd + SIZEOF_PMD
266 267 268 269 270 271 272 273 274 275 276 277 278 279
	END(temp_code_pmd)

	/*
	 * Isolate swapper_pgprot to its own cache line, since each cpu
	 * starting up will read it using VA-is-PA and local homing.
	 * This would otherwise likely conflict with other data on the cache
	 * line, once we have set its permanent home in the page tables.
	 */
	__INITDATA
	.align CHIP_L2_LINE_SIZE()
ENTRY(swapper_pgprot)
	.quad HV_PTE_PRESENT | (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE)
	.align CHIP_L2_LINE_SIZE()
	END(swapper_pgprot)