iomap_32.c 3.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * Copyright © 2008 Ingo Molnar
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License along
 * with this program; if not, write to the Free Software Foundation, Inc.,
 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
 */

#include <asm/iomap.h>
20
#include <asm/pat.h>
21 22
#include <linux/module.h>

23
#ifdef CONFIG_X86_PAE
24
static int
25 26 27 28 29
is_io_mapping_possible(resource_size_t base, unsigned long size)
{
	return 1;
}
#else
30
static int
31 32 33 34 35 36 37 38 39 40
is_io_mapping_possible(resource_size_t base, unsigned long size)
{
	/* There is no way to map greater than 1 << 32 address without PAE */
	if (base + size > 0x100000000ULL)
		return 0;

	return 1;
}
#endif

41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
int
reserve_io_memtype_wc(u64 base, unsigned long size, pgprot_t *prot)
{
	unsigned long ret_flag;

	if (!is_io_mapping_possible(base, size))
		goto out_err;

	if (!pat_enabled) {
		*prot = pgprot_noncached(PAGE_KERNEL);
		return 0;
	}

	if (reserve_memtype(base, base + size, _PAGE_CACHE_WC, &ret_flag))
		goto out_err;

	if (ret_flag == _PAGE_CACHE_WB)
		goto out_free;

	if (kernel_map_sync_memtype(base, size, ret_flag))
		goto out_free;

	*prot = __pgprot(__PAGE_KERNEL | ret_flag);
	return 0;

out_free:
	free_memtype(base, base + size);
out_err:
	return -EINVAL;
}

void
free_io_memtype(u64 base, unsigned long size)
{
	if (pat_enabled)
		free_memtype(base, base + size);
}

79 80 81 82 83 84 85 86 87 88
/* Map 'pfn' using fixed map 'type' and protections 'prot'
 */
void *
iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
{
	enum fixed_addresses idx;
	unsigned long vaddr;

	pagefault_disable();

89 90 91 92 93 94 95 96 97
	/*
	 * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS.
	 * PAGE_KERNEL_WC maps to PWT, which translates to uncached if the
	 * MTRR is UC or WC.  UC_MINUS gets the real intention, of the
	 * user, which is "WC if the MTRR is WC, UC if you can't do that."
	 */
	if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
		prot = PAGE_KERNEL_UC_MINUS;

98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
	idx = type + KM_TYPE_NR*smp_processor_id();
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
	set_pte(kmap_pte-idx, pfn_pte(pfn, prot));
	arch_flush_lazy_mmu_mode();

	return (void*) vaddr;
}
EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);

void
iounmap_atomic(void *kvaddr, enum km_type type)
{
	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();

	/*
	 * Force other mappings to Oops if they'll try to access this pte
	 * without first remap it.  Keeping stale mappings around is a bad idea
	 * also, in case the page changes cacheability attributes or becomes
	 * a protected page in a hypervisor.
	 */
	if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
		kpte_clear_flush(kmap_pte-idx, vaddr);

	arch_flush_lazy_mmu_mode();
	pagefault_enable();
}
EXPORT_SYMBOL_GPL(iounmap_atomic);