pgtable_32.c 4.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/mm.h>
5
#include <linux/nmi.h>
L
Linus Torvalds 已提交
6 7 8 9 10 11
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/spinlock.h>
12
#include <linux/module.h>
C
Christoph Lameter 已提交
13
#include <linux/quicklist.h>
L
Linus Torvalds 已提交
14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30

#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/fixmap.h>
#include <asm/e820.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>

void show_mem(void)
{
	int total = 0, reserved = 0;
	int shared = 0, cached = 0;
	int highmem = 0;
	struct page *page;
	pg_data_t *pgdat;
	unsigned long i;
31
	unsigned long flags;
L
Linus Torvalds 已提交
32

33
	printk(KERN_INFO "Mem-info:\n");
L
Linus Torvalds 已提交
34
	show_free_areas();
35
	for_each_online_pgdat(pgdat) {
36
		pgdat_resize_lock(pgdat, &flags);
L
Linus Torvalds 已提交
37
		for (i = 0; i < pgdat->node_spanned_pages; ++i) {
38 39
			if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
				touch_nmi_watchdog();
40
			page = pgdat_page_nr(pgdat, i);
L
Linus Torvalds 已提交
41 42 43 44 45 46 47 48 49 50
			total++;
			if (PageHighMem(page))
				highmem++;
			if (PageReserved(page))
				reserved++;
			else if (PageSwapCache(page))
				cached++;
			else if (page_count(page))
				shared += page_count(page) - 1;
		}
51
		pgdat_resize_unlock(pgdat, &flags);
L
Linus Torvalds 已提交
52
	}
53 54 55 56 57
	printk(KERN_INFO "%d pages of RAM\n", total);
	printk(KERN_INFO "%d pages of HIGHMEM\n", highmem);
	printk(KERN_INFO "%d reserved pages\n", reserved);
	printk(KERN_INFO "%d pages shared\n", shared);
	printk(KERN_INFO "%d pages swap cached\n", cached);
58

59
	printk(KERN_INFO "%lu pages dirty\n", global_page_state(NR_FILE_DIRTY));
60 61
	printk(KERN_INFO "%lu pages writeback\n",
					global_page_state(NR_WRITEBACK));
62
	printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
63 64 65
	printk(KERN_INFO "%lu pages slab\n",
		global_page_state(NR_SLAB_RECLAIMABLE) +
		global_page_state(NR_SLAB_UNRECLAIMABLE));
66 67
	printk(KERN_INFO "%lu pages pagetables\n",
					global_page_state(NR_PAGETABLE));
L
Linus Torvalds 已提交
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
}

/*
 * Associate a virtual page frame with a given physical page frame 
 * and protection flags for that frame.
 */ 
static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

	pgd = swapper_pg_dir + pgd_index(vaddr);
	if (pgd_none(*pgd)) {
		BUG();
		return;
	}
	pud = pud_offset(pgd, vaddr);
	if (pud_none(*pud)) {
		BUG();
		return;
	}
	pmd = pmd_offset(pud, vaddr);
	if (pmd_none(*pmd)) {
		BUG();
		return;
	}
	pte = pte_offset_kernel(pmd, vaddr);
97
	if (pgprot_val(flags))
98
		set_pte_present(&init_mm, vaddr, pte, pfn_pte(pfn, flags));
99 100
	else
		pte_clear(&init_mm, vaddr, pte);
L
Linus Torvalds 已提交
101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121

	/*
	 * It's enough to flush this one mapping.
	 * (PGE mappings get flushed as well)
	 */
	__flush_tlb_one(vaddr);
}

/*
 * Associate a large virtual page frame with a given physical page frame 
 * and protection flags for that frame. pfn is for the base of the page,
 * vaddr is what the page gets mapped to - both must be properly aligned. 
 * The pmd must already be instantiated. Assumes PAE mode.
 */ 
void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;

	if (vaddr & (PMD_SIZE-1)) {		/* vaddr is misaligned */
122
		printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n");
L
Linus Torvalds 已提交
123 124 125
		return; /* BUG(); */
	}
	if (pfn & (PTRS_PER_PTE-1)) {		/* pfn is misaligned */
126
		printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n");
L
Linus Torvalds 已提交
127 128 129 130
		return; /* BUG(); */
	}
	pgd = swapper_pg_dir + pgd_index(vaddr);
	if (pgd_none(*pgd)) {
131
		printk(KERN_WARNING "set_pmd_pfn: pgd_none\n");
L
Linus Torvalds 已提交
132 133 134 135 136 137 138 139 140 141 142 143
		return; /* BUG(); */
	}
	pud = pud_offset(pgd, vaddr);
	pmd = pmd_offset(pud, vaddr);
	set_pmd(pmd, pfn_pmd(pfn, flags));
	/*
	 * It's enough to flush this one mapping.
	 * (PGE mappings get flushed as well)
	 */
	__flush_tlb_one(vaddr);
}

144 145 146 147
static int fixmaps;
unsigned long __FIXADDR_TOP = 0xfffff000;
EXPORT_SYMBOL(__FIXADDR_TOP);

L
Linus Torvalds 已提交
148 149 150 151 152 153 154 155 156
void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
{
	unsigned long address = __fix_to_virt(idx);

	if (idx >= __end_of_fixed_addresses) {
		BUG();
		return;
	}
	set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
157 158 159 160 161 162 163 164 165 166 167 168 169
	fixmaps++;
}

/**
 * reserve_top_address - reserves a hole in the top of kernel address space
 * @reserve - size of hole to reserve
 *
 * Can be used to relocate the fixmap area and poke a hole in the top
 * of kernel address space to make room for a hypervisor.
 */
void reserve_top_address(unsigned long reserve)
{
	BUG_ON(fixmaps > 0);
170 171
	printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
	       (int)-reserve);
172 173
	__FIXADDR_TOP = -reserve - PAGE_SIZE;
	__VMALLOC_RESERVE += reserve;
L
Linus Torvalds 已提交
174
}