init.c 7.4 KB
Newer Older
1 2
/*
 * linux/arch/sh/mm/init.c
L
Linus Torvalds 已提交
3 4
 *
 *  Copyright (C) 1999  Niibe Yutaka
5
 *  Copyright (C) 2002 - 2007  Paul Mundt
L
Linus Torvalds 已提交
6 7 8 9 10 11 12 13
 *
 *  Based on linux/arch/i386/mm/init.c:
 *   Copyright (C) 1995  Linus Torvalds
 */
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/init.h>
#include <linux/bootmem.h>
P
Paul Mundt 已提交
14
#include <linux/proc_fs.h>
15
#include <linux/pagemap.h>
16 17
#include <linux/percpu.h>
#include <linux/io.h>
L
Linus Torvalds 已提交
18 19 20
#include <asm/mmu_context.h>
#include <asm/tlb.h>
#include <asm/cacheflush.h>
21
#include <asm/sections.h>
L
Linus Torvalds 已提交
22 23 24 25 26 27 28 29 30 31
#include <asm/cache.h>

DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
pgd_t swapper_pg_dir[PTRS_PER_PGD];

void (*copy_page)(void *from, void *to);
void (*clear_page)(void *to);

void show_mem(void)
{
32 33 34
	int total = 0, reserved = 0, free = 0;
	int shared = 0, cached = 0, slab = 0;
	pg_data_t *pgdat;
L
Linus Torvalds 已提交
35 36 37

	printk("Mem-info:\n");
	show_free_areas();
38 39

	for_each_online_pgdat(pgdat) {
P
Paul Mundt 已提交
40
		unsigned long flags, i;
41 42

		pgdat_resize_lock(pgdat, &flags);
P
Paul Mundt 已提交
43 44
		for (i = 0; i < pgdat->node_spanned_pages; i++) {
			struct page *page = pgdat_page_nr(pgdat, i);
45 46 47 48 49 50 51 52 53 54 55
			total++;
			if (PageReserved(page))
				reserved++;
			else if (PageSwapCache(page))
				cached++;
			else if (PageSlab(page))
				slab++;
			else if (!page_count(page))
				free++;
			else
				shared += page_count(page) - 1;
P
Paul Mundt 已提交
56
		}
57
		pgdat_resize_unlock(pgdat, &flags);
L
Linus Torvalds 已提交
58
	}
59 60 61 62 63 64 65 66

	printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
	printk("%d pages of RAM\n", total);
	printk("%d free pages\n", free);
	printk("%d reserved pages\n", reserved);
	printk("%d slab pages\n", slab);
	printk("%d pages shared\n", shared);
	printk("%d pages swap cached\n", cached);
P
Paul Mundt 已提交
67 68
	printk(KERN_INFO "Total of %ld pages in page table cache\n",
	       quicklist_total_size());
L
Linus Torvalds 已提交
69 70
}

71
#ifdef CONFIG_MMU
L
Linus Torvalds 已提交
72 73 74
static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
{
	pgd_t *pgd;
75
	pud_t *pud;
L
Linus Torvalds 已提交
76 77 78
	pmd_t *pmd;
	pte_t *pte;

S
Stuart Menefy 已提交
79
	pgd = pgd_offset_k(addr);
L
Linus Torvalds 已提交
80 81 82 83 84
	if (pgd_none(*pgd)) {
		pgd_ERROR(*pgd);
		return;
	}

S
Stuart Menefy 已提交
85 86 87 88
	pud = pud_alloc(NULL, pgd, addr);
	if (unlikely(!pud)) {
		pud_ERROR(*pud);
		return;
89 90
	}

S
Stuart Menefy 已提交
91 92 93 94
	pmd = pmd_alloc(NULL, pud, addr);
	if (unlikely(!pmd)) {
		pmd_ERROR(*pmd);
		return;
L
Linus Torvalds 已提交
95 96 97 98 99 100 101 102 103 104
	}

	pte = pte_offset_kernel(pmd, addr);
	if (!pte_none(*pte)) {
		pte_ERROR(*pte);
		return;
	}

	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));

105
	flush_tlb_one(get_asid(), addr);
L
Linus Torvalds 已提交
106 107 108 109 110 111
}

/*
 * As a performance optimization, other platforms preserve the fixmap mapping
 * across a context switch, we don't presently do this, but this could be done
 * in a similar fashion as to the wired TLB interface that sh64 uses (by way
S
Simon Arlott 已提交
112
 * of the memory mapped UTLB configuration) -- this unfortunately forces us to
L
Linus Torvalds 已提交
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
 * give up a TLB entry for each mapping we want to preserve. While this may be
 * viable for a small number of fixmaps, it's not particularly useful for
 * everything and needs to be carefully evaluated. (ie, we may want this for
 * the vsyscall page).
 *
 * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass
 * in at __set_fixmap() time to determine the appropriate behavior to follow.
 *
 *					 -- PFM.
 */
void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
{
	unsigned long address = __fix_to_virt(idx);

	if (idx >= __end_of_fixed_addresses) {
		BUG();
		return;
	}

	set_pte_phys(address, phys, prot);
}
134
#endif	/* CONFIG_MMU */
L
Linus Torvalds 已提交
135 136 137 138 139 140

/*
 * paging_init() sets up the page tables
 */
void __init paging_init(void)
{
141
	unsigned long max_zone_pfns[MAX_NR_ZONES];
142
	int nid;
L
Linus Torvalds 已提交
143

144 145 146 147
	/* We don't need to map the kernel through the TLB, as
	 * it is permanatly mapped using P1. So clear the
	 * entire pgd. */
	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
L
Linus Torvalds 已提交
148

149 150 151 152
	/* Set an initial value for the MMU.TTB so we don't have to
	 * check for a null value. */
	set_TTB(swapper_pg_dir);

153 154
	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));

155 156 157 158 159 160 161
	for_each_online_node(nid) {
		pg_data_t *pgdat = NODE_DATA(nid);
		unsigned long low, start_pfn;

		start_pfn = pgdat->bdata->node_boot_start >> PAGE_SHIFT;
		low = pgdat->bdata->node_low_pfn;

162 163
		if (max_zone_pfns[ZONE_NORMAL] < low)
			max_zone_pfns[ZONE_NORMAL] = low;
164 165 166 167

		printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
		       nid, start_pfn, low);
	}
168 169

	free_area_init_nodes(max_zone_pfns);
L
Linus Torvalds 已提交
170 171
}

P
Paul Mundt 已提交
172 173
static struct kcore_list kcore_mem, kcore_vmalloc;

L
Linus Torvalds 已提交
174 175
void __init mem_init(void)
{
P
Paul Mundt 已提交
176
	int codesize, datasize, initsize;
177
	int nid;
L
Linus Torvalds 已提交
178

179 180 181
	num_physpages = 0;
	high_memory = NULL;

182 183 184 185 186 187 188 189 190 191 192
	for_each_online_node(nid) {
		pg_data_t *pgdat = NODE_DATA(nid);
		unsigned long node_pages = 0;
		void *node_high_memory;

		num_physpages += pgdat->node_present_pages;

		if (pgdat->node_spanned_pages)
			node_pages = free_all_bootmem_node(pgdat);

		totalram_pages += node_pages;
L
Linus Torvalds 已提交
193

194 195 196
		node_high_memory = (void *)__va((pgdat->node_start_pfn +
						 pgdat->node_spanned_pages) <<
						 PAGE_SHIFT);
197 198 199
		if (node_high_memory > high_memory)
			high_memory = node_high_memory;
	}
L
Linus Torvalds 已提交
200 201 202 203 204

	/* clear the zero-page */
	memset(empty_zero_page, 0, PAGE_SIZE);
	__flush_wback_region(empty_zero_page, PAGE_SIZE);

205
	/*
L
Linus Torvalds 已提交
206 207 208
	 * Setup wrappers for copy/clear_page(), these will get overridden
	 * later in the boot process if a better method is available.
	 */
Y
Yoshinori Sato 已提交
209
#ifdef CONFIG_MMU
L
Linus Torvalds 已提交
210 211
	copy_page = copy_page_slow;
	clear_page = clear_page_slow;
Y
Yoshinori Sato 已提交
212 213 214 215
#else
	copy_page = copy_page_nommu;
	clear_page = clear_page_nommu;
#endif
L
Linus Torvalds 已提交
216 217 218 219 220

	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;

P
Paul Mundt 已提交
221 222 223 224 225
	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
		   VMALLOC_END - VMALLOC_START);

	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
P
Paul Mundt 已提交
226
	       "%dk data, %dk init)\n",
L
Linus Torvalds 已提交
227
		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
228
		num_physpages << (PAGE_SHIFT-10),
L
Linus Torvalds 已提交
229 230 231 232 233
		codesize >> 10,
		datasize >> 10,
		initsize >> 10);

	p3_cache_init();
P
Paul Mundt 已提交
234 235 236

	/* Initialize the vDSO */
	vsyscall_init();
L
Linus Torvalds 已提交
237 238 239 240 241
}

void free_initmem(void)
{
	unsigned long addr;
242

L
Linus Torvalds 已提交
243 244 245
	addr = (unsigned long)(&__init_begin);
	for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
		ClearPageReserved(virt_to_page(addr));
246
		init_page_count(virt_to_page(addr));
L
Linus Torvalds 已提交
247 248 249
		free_page(addr);
		totalram_pages++;
	}
250 251 252
	printk("Freeing unused kernel memory: %ldk freed\n",
	       ((unsigned long)&__init_end -
	        (unsigned long)&__init_begin) >> 10);
L
Linus Torvalds 已提交
253 254 255 256 257 258 259 260
}

#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
	unsigned long p;
	for (p = start; p < end; p += PAGE_SIZE) {
		ClearPageReserved(virt_to_page(p));
261
		init_page_count(virt_to_page(p));
L
Linus Torvalds 已提交
262 263 264
		free_page(p);
		totalram_pages++;
	}
265
	printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
L
Linus Torvalds 已提交
266 267
}
#endif
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309

#ifdef CONFIG_MEMORY_HOTPLUG
void online_page(struct page *page)
{
	ClearPageReserved(page);
	init_page_count(page);
	__free_page(page);
	totalram_pages++;
	num_physpages++;
}

int arch_add_memory(int nid, u64 start, u64 size)
{
	pg_data_t *pgdat;
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
	int ret;

	pgdat = NODE_DATA(nid);

	/* We only have ZONE_NORMAL, so this is easy.. */
	ret = __add_pages(pgdat->node_zones + ZONE_NORMAL, start_pfn, nr_pages);
	if (unlikely(ret))
		printk("%s: Failed, __add_pages() == %d\n", __FUNCTION__, ret);

	return ret;
}
EXPORT_SYMBOL_GPL(arch_add_memory);

int remove_memory(u64 start, u64 size)
{
	return -EINVAL;
}
EXPORT_SYMBOL_GPL(remove_memory);

int memory_add_physaddr_to_nid(u64 addr)
{
	/* Node 0 for now.. */
	return 0;
}
EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
#endif