highmem_32.c 3.7 KB
Newer Older
L
Linus Torvalds 已提交
1
#include <linux/highmem.h>
2
#include <linux/module.h>
L
Linus Torvalds 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20

void *kmap(struct page *page)
{
	might_sleep();
	if (!PageHighMem(page))
		return page_address(page);
	return kmap_high(page);
}

void kunmap(struct page *page)
{
	if (in_interrupt())
		BUG();
	if (!PageHighMem(page))
		return;
	kunmap_high(page);
}

A
Andrew Morton 已提交
21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
static void debug_kmap_atomic_prot(enum km_type type)
{
#ifdef CONFIG_DEBUG_HIGHMEM
	static unsigned warn_count = 10;

	if (unlikely(warn_count == 0))
		return;

	if (unlikely(in_interrupt())) {
		if (in_irq()) {
			if (type != KM_IRQ0 && type != KM_IRQ1 &&
			    type != KM_BIO_SRC_IRQ && type != KM_BIO_DST_IRQ &&
			    type != KM_BOUNCE_READ) {
				WARN_ON(1);
				warn_count--;
			}
		} else if (!irqs_disabled()) {	/* softirq */
			if (type != KM_IRQ0 && type != KM_IRQ1 &&
			    type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 &&
			    type != KM_SKB_SUNRPC_DATA &&
			    type != KM_SKB_DATA_SOFTIRQ &&
			    type != KM_BOUNCE_READ) {
				WARN_ON(1);
				warn_count--;
			}
		}
	}

	if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ ||
			type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ) {
		if (!irqs_disabled()) {
			WARN_ON(1);
			warn_count--;
		}
	} else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) {
		if (irq_count() == 0 && !irqs_disabled()) {
			WARN_ON(1);
			warn_count--;
		}
	}
#endif
}

L
Linus Torvalds 已提交
64 65 66 67 68 69 70 71
/*
 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
 * no global lock is needed and because the kmap code must perform a global TLB
 * invalidation when the kmap pool wraps.
 *
 * However when holding an atomic kmap is is not legal to sleep, so atomic
 * kmaps are appropriate for short, tight code paths only.
 */
72
void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
L
Linus Torvalds 已提交
73 74 75
{
	enum fixed_addresses idx;
	unsigned long vaddr;
A
Andrew Morton 已提交
76

77
	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
78
	pagefault_disable();
79

L
Linus Torvalds 已提交
80 81 82
	if (!PageHighMem(page))
		return page_address(page);

83 84
	debug_kmap_atomic_prot(type);

85
	idx = type + KM_TYPE_NR*smp_processor_id();
L
Linus Torvalds 已提交
86
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
87
	BUG_ON(!pte_none(*(kmap_pte-idx)));
88
	set_pte(kmap_pte-idx, mk_pte(page, prot));
89
	arch_flush_lazy_mmu_mode();
L
Linus Torvalds 已提交
90

91
	return (void *)vaddr;
L
Linus Torvalds 已提交
92 93
}

94 95 96 97 98
void *kmap_atomic(struct page *page, enum km_type type)
{
	return kmap_atomic_prot(page, type, kmap_prot);
}

L
Linus Torvalds 已提交
99 100 101 102 103 104
void kunmap_atomic(void *kvaddr, enum km_type type)
{
	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();

	/*
Z
Zachary Amsden 已提交
105 106 107 108
	 * Force other mappings to Oops if they'll try to access this pte
	 * without first remap it.  Keeping stale mappings around is a bad idea
	 * also, in case the page changes cacheability attributes or becomes
	 * a protected page in a hypervisor.
L
Linus Torvalds 已提交
109
	 */
110 111 112 113 114 115 116 117
	if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
		kpte_clear_flush(kmap_pte-idx, vaddr);
	else {
#ifdef CONFIG_DEBUG_HIGHMEM
		BUG_ON(vaddr < PAGE_OFFSET);
		BUG_ON(vaddr >= (unsigned long)high_memory);
#endif
	}
L
Linus Torvalds 已提交
118

119
	arch_flush_lazy_mmu_mode();
120
	pagefault_enable();
L
Linus Torvalds 已提交
121 122
}

123 124 125 126 127 128 129 130
/* This is the same as kmap_atomic() but can map memory that doesn't
 * have a struct page associated with it.
 */
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
{
	enum fixed_addresses idx;
	unsigned long vaddr;

131
	pagefault_disable();
132 133 134 135

	idx = type + KM_TYPE_NR*smp_processor_id();
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
	set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
136
	arch_flush_lazy_mmu_mode();
137 138 139 140

	return (void*) vaddr;
}

L
Linus Torvalds 已提交
141 142 143 144 145 146 147 148 149 150 151 152 153
struct page *kmap_atomic_to_page(void *ptr)
{
	unsigned long idx, vaddr = (unsigned long)ptr;
	pte_t *pte;

	if (vaddr < FIXADDR_START)
		return virt_to_page(ptr);

	idx = virt_to_fix(vaddr);
	pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
	return pte_page(*pte);
}

154 155 156 157
EXPORT_SYMBOL(kmap);
EXPORT_SYMBOL(kunmap);
EXPORT_SYMBOL(kmap_atomic);
EXPORT_SYMBOL(kunmap_atomic);