init.c 7.9 KB
Newer Older
1 2
/*
 * linux/arch/sh/mm/init.c
L
Linus Torvalds 已提交
3 4
 *
 *  Copyright (C) 1999  Niibe Yutaka
5
 *  Copyright (C) 2002 - 2007  Paul Mundt
L
Linus Torvalds 已提交
6 7 8 9 10 11 12 13
 *
 *  Based on linux/arch/i386/mm/init.c:
 *   Copyright (C) 1995  Linus Torvalds
 */
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/init.h>
#include <linux/bootmem.h>
P
Paul Mundt 已提交
14
#include <linux/proc_fs.h>
15
#include <linux/pagemap.h>
16 17
#include <linux/percpu.h>
#include <linux/io.h>
L
Linus Torvalds 已提交
18 19 20
#include <asm/mmu_context.h>
#include <asm/tlb.h>
#include <asm/cacheflush.h>
21
#include <asm/sections.h>
L
Linus Torvalds 已提交
22 23 24 25
#include <asm/cache.h>

DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
pgd_t swapper_pg_dir[PTRS_PER_PGD];
26 27 28 29 30 31 32 33 34 35 36 37 38

#ifdef CONFIG_SUPERH32
/*
 * Handle trivial transitions between cached and uncached
 * segments, making use of the 1:1 mapping relationship in
 * 512MB lowmem.
 *
 * This is the offset of the uncached section from its cached alias.
 * Default value only valid in 29 bit mode, in 32bit mode will be
 * overridden in pmb_init.
 */
unsigned long cached_to_uncached = P2SEG - P1SEG;
#endif
L
Linus Torvalds 已提交
39

40
#ifdef CONFIG_MMU
L
Linus Torvalds 已提交
41 42 43
static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
{
	pgd_t *pgd;
44
	pud_t *pud;
L
Linus Torvalds 已提交
45 46 47
	pmd_t *pmd;
	pte_t *pte;

S
Stuart Menefy 已提交
48
	pgd = pgd_offset_k(addr);
L
Linus Torvalds 已提交
49 50 51 52 53
	if (pgd_none(*pgd)) {
		pgd_ERROR(*pgd);
		return;
	}

S
Stuart Menefy 已提交
54 55 56 57
	pud = pud_alloc(NULL, pgd, addr);
	if (unlikely(!pud)) {
		pud_ERROR(*pud);
		return;
58 59
	}

S
Stuart Menefy 已提交
60 61 62 63
	pmd = pmd_alloc(NULL, pud, addr);
	if (unlikely(!pmd)) {
		pmd_ERROR(*pmd);
		return;
L
Linus Torvalds 已提交
64 65 66 67 68 69 70 71 72
	}

	pte = pte_offset_kernel(pmd, addr);
	if (!pte_none(*pte)) {
		pte_ERROR(*pte);
		return;
	}

	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
73
	local_flush_tlb_one(get_asid(), addr);
L
Linus Torvalds 已提交
74 75 76 77 78 79
}

/*
 * As a performance optimization, other platforms preserve the fixmap mapping
 * across a context switch, we don't presently do this, but this could be done
 * in a similar fashion as to the wired TLB interface that sh64 uses (by way
S
Simon Arlott 已提交
80
 * of the memory mapped UTLB configuration) -- this unfortunately forces us to
L
Linus Torvalds 已提交
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
 * give up a TLB entry for each mapping we want to preserve. While this may be
 * viable for a small number of fixmaps, it's not particularly useful for
 * everything and needs to be carefully evaluated. (ie, we may want this for
 * the vsyscall page).
 *
 * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass
 * in at __set_fixmap() time to determine the appropriate behavior to follow.
 *
 *					 -- PFM.
 */
void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
{
	unsigned long address = __fix_to_virt(idx);

	if (idx >= __end_of_fixed_addresses) {
		BUG();
		return;
	}

	set_pte_phys(address, phys, prot);
}
102 103 104 105 106 107 108

void __init page_table_range_init(unsigned long start, unsigned long end,
					 pgd_t *pgd_base)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
109 110
	pte_t *pte;
	int i, j, k;
111 112
	unsigned long vaddr;

113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
	vaddr = start;
	i = __pgd_offset(vaddr);
	j = __pud_offset(vaddr);
	k = __pmd_offset(vaddr);
	pgd = pgd_base + i;

	for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
		pud = (pud_t *)pgd;
		for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
			pmd = (pmd_t *)pud;
			for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
				if (pmd_none(*pmd)) {
					pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
					pmd_populate_kernel(&init_mm, pmd, pte);
					BUG_ON(pte != pte_offset_kernel(pmd, 0));
				}
				vaddr += PMD_SIZE;
			}
			k = 0;
132
		}
133
		j = 0;
134 135
	}
}
136
#endif	/* CONFIG_MMU */
L
Linus Torvalds 已提交
137 138 139 140 141 142

/*
 * paging_init() sets up the page tables
 */
void __init paging_init(void)
{
143
	unsigned long max_zone_pfns[MAX_NR_ZONES];
144
	unsigned long vaddr, end;
145
	int nid;
L
Linus Torvalds 已提交
146

147 148 149 150
	/* We don't need to map the kernel through the TLB, as
	 * it is permanatly mapped using P1. So clear the
	 * entire pgd. */
	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
L
Linus Torvalds 已提交
151

152 153 154 155
	/* Set an initial value for the MMU.TTB so we don't have to
	 * check for a null value. */
	set_TTB(swapper_pg_dir);

156 157
	/*
	 * Populate the relevant portions of swapper_pg_dir so that
158
	 * we can use the fixmap entries without calling kmalloc.
159 160 161
	 * pte's will be filled in by __set_fixmap().
	 */
	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
162 163
	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
	page_table_range_init(vaddr, end, swapper_pg_dir);
164 165

	kmap_coherent_init();
166

167 168
	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));

169 170 171 172
	for_each_online_node(nid) {
		pg_data_t *pgdat = NODE_DATA(nid);
		unsigned long low, start_pfn;

173
		start_pfn = pgdat->bdata->node_min_pfn;
174 175
		low = pgdat->bdata->node_low_pfn;

176 177
		if (max_zone_pfns[ZONE_NORMAL] < low)
			max_zone_pfns[ZONE_NORMAL] = low;
178 179 180 181

		printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
		       nid, start_pfn, low);
	}
182 183

	free_area_init_nodes(max_zone_pfns);
184 185 186

	/* Set up the uncached fixmap */
	set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start));
L
Linus Torvalds 已提交
187 188 189 190
}

void __init mem_init(void)
{
P
Paul Mundt 已提交
191
	int codesize, datasize, initsize;
192
	int nid;
L
Linus Torvalds 已提交
193

194 195 196
	num_physpages = 0;
	high_memory = NULL;

197 198 199 200 201 202 203 204 205 206 207
	for_each_online_node(nid) {
		pg_data_t *pgdat = NODE_DATA(nid);
		unsigned long node_pages = 0;
		void *node_high_memory;

		num_physpages += pgdat->node_present_pages;

		if (pgdat->node_spanned_pages)
			node_pages = free_all_bootmem_node(pgdat);

		totalram_pages += node_pages;
L
Linus Torvalds 已提交
208

209 210 211
		node_high_memory = (void *)__va((pgdat->node_start_pfn +
						 pgdat->node_spanned_pages) <<
						 PAGE_SHIFT);
212 213 214
		if (node_high_memory > high_memory)
			high_memory = node_high_memory;
	}
L
Linus Torvalds 已提交
215

216 217 218
	/* Set this up early, so we can take care of the zero page */
	cpu_cache_init();

L
Linus Torvalds 已提交
219 220 221 222 223 224 225 226
	/* clear the zero-page */
	memset(empty_zero_page, 0, PAGE_SIZE);
	__flush_wback_region(empty_zero_page, PAGE_SIZE);

	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;

P
Paul Mundt 已提交
227
	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
P
Paul Mundt 已提交
228
	       "%dk data, %dk init)\n",
229
		nr_free_pages() << (PAGE_SHIFT-10),
230
		num_physpages << (PAGE_SHIFT-10),
L
Linus Torvalds 已提交
231 232 233 234
		codesize >> 10,
		datasize >> 10,
		initsize >> 10);

P
Paul Mundt 已提交
235 236
	/* Initialize the vDSO */
	vsyscall_init();
L
Linus Torvalds 已提交
237 238 239 240 241
}

void free_initmem(void)
{
	unsigned long addr;
242

L
Linus Torvalds 已提交
243 244 245
	addr = (unsigned long)(&__init_begin);
	for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
		ClearPageReserved(virt_to_page(addr));
246
		init_page_count(virt_to_page(addr));
L
Linus Torvalds 已提交
247 248 249
		free_page(addr);
		totalram_pages++;
	}
250 251 252
	printk("Freeing unused kernel memory: %ldk freed\n",
	       ((unsigned long)&__init_end -
	        (unsigned long)&__init_begin) >> 10);
L
Linus Torvalds 已提交
253 254 255 256 257 258 259 260
}

#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
	unsigned long p;
	for (p = start; p < end; p += PAGE_SIZE) {
		ClearPageReserved(virt_to_page(p));
261
		init_page_count(virt_to_page(p));
L
Linus Torvalds 已提交
262 263 264
		free_page(p);
		totalram_pages++;
	}
265
	printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
L
Linus Torvalds 已提交
266 267
}
#endif
268

269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
#if THREAD_SHIFT < PAGE_SHIFT
static struct kmem_cache *thread_info_cache;

struct thread_info *alloc_thread_info(struct task_struct *tsk)
{
	struct thread_info *ti;

	ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);
	if (unlikely(ti == NULL))
		return NULL;
#ifdef CONFIG_DEBUG_STACK_USAGE
	memset(ti, 0, THREAD_SIZE);
#endif
	return ti;
}

void free_thread_info(struct thread_info *ti)
{
	kmem_cache_free(thread_info_cache, ti);
}

void thread_info_cache_init(void)
{
	thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
					      THREAD_SIZE, 0, NULL);
	BUG_ON(thread_info_cache == NULL);
}
#endif /* THREAD_SHIFT < PAGE_SHIFT */

298 299 300 301 302 303 304 305 306 307 308
#ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size)
{
	pg_data_t *pgdat;
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
	int ret;

	pgdat = NODE_DATA(nid);

	/* We only have ZONE_NORMAL, so this is easy.. */
309 310
	ret = __add_pages(nid, pgdat->node_zones + ZONE_NORMAL,
				start_pfn, nr_pages);
311
	if (unlikely(ret))
312
		printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
313 314 315 316 317

	return ret;
}
EXPORT_SYMBOL_GPL(arch_add_memory);

318
#ifdef CONFIG_NUMA
319 320 321 322 323 324 325
int memory_add_physaddr_to_nid(u64 addr)
{
	/* Node 0 for now.. */
	return 0;
}
EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
#endif
326
#endif /* CONFIG_MEMORY_HOTPLUG */