contig.c 7.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 1998-2003 Hewlett-Packard Co
 *	David Mosberger-Tang <davidm@hpl.hp.com>
 *	Stephane Eranian <eranian@hpl.hp.com>
 * Copyright (C) 2000, Rohit Seth <rohit.seth@intel.com>
 * Copyright (C) 1999 VA Linux Systems
 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
 * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
 *
 * Routines used by ia64 machines with contiguous (or virtually contiguous)
 * memory.
 */
#include <linux/bootmem.h>
#include <linux/efi.h>
T
Tejun Heo 已提交
19
#include <linux/memblock.h>
L
Linus Torvalds 已提交
20
#include <linux/mm.h>
21
#include <linux/nmi.h>
L
Linus Torvalds 已提交
22 23 24 25 26 27 28 29 30
#include <linux/swap.h>

#include <asm/meminit.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/mca.h>

#ifdef CONFIG_VIRTUAL_MEM_MAP
31
static unsigned long max_gap;
L
Linus Torvalds 已提交
32 33 34 35 36 37 38 39 40 41 42 43 44 45
#endif

/* physical address where the bootmem map is located */
unsigned long bootmap_start;

/**
 * find_bootmap_location - callback to find a memory area for the bootmap
 * @start: start of region
 * @end: end of region
 * @arg: unused callback data
 *
 * Find a place to put the bootmap and return its starting address in
 * bootmap_start.  This address must be page-aligned.
 */
46
static int __init
47
find_bootmap_location (u64 start, u64 end, void *arg)
L
Linus Torvalds 已提交
48
{
49 50
	u64 needed = *(unsigned long *)arg;
	u64 range_start, range_end, free_start;
L
Linus Torvalds 已提交
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
	int i;

#if IGNORE_PFN0
	if (start == PAGE_OFFSET) {
		start += PAGE_SIZE;
		if (start >= end)
			return 0;
	}
#endif

	free_start = PAGE_OFFSET;

	for (i = 0; i < num_rsvd_regions; i++) {
		range_start = max(start, free_start);
		range_end   = min(end, rsvd_region[i].start & PAGE_MASK);

		free_start = PAGE_ALIGN(rsvd_region[i].end);

		if (range_end <= range_start)
			continue; /* skip over empty range */

		if (range_end - range_start >= needed) {
			bootmap_start = __pa(range_start);
			return -1;	/* done */
		}

		/* nothing more available in this segment */
		if (range_end == end)
			return 0;
	}
	return 0;
}

84 85 86 87 88 89 90
#ifdef CONFIG_SMP
static void *cpu_data;
/**
 * per_cpu_init - setup per-cpu variables
 *
 * Allocate and setup per-cpu data areas.
 */
91
void *per_cpu_init(void)
92
{
93 94 95 96 97 98 99
	static bool first_time = true;
	void *cpu0_data = __cpu0_per_cpu;
	unsigned int cpu;

	if (!first_time)
		goto skip;
	first_time = false;
100 101

	/*
102 103 104
	 * get_free_pages() cannot be used before cpu_init() done.
	 * BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs
	 * to avoid that AP calls get_zeroed_page().
105
	 */
106
	for_each_possible_cpu(cpu) {
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
		void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start;

		memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start);
		__per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start;
		per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];

		/*
		 * percpu area for cpu0 is moved from the __init area
		 * which is setup by head.S and used till this point.
		 * Update ar.k3.  This move is ensures that percpu
		 * area for cpu0 is on the correct node and its
		 * virtual address isn't insanely far from other
		 * percpu areas which is important for congruent
		 * percpu allocator.
		 */
		if (cpu == 0)
			ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data) -
				    (unsigned long)__per_cpu_start);

		cpu_data += PERCPU_PAGE_SIZE;
127
	}
128
skip:
129 130 131 132 133 134
	return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
}

static inline void
alloc_per_cpu_data(void)
{
135
	cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * num_possible_cpus(),
136 137
				   PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
}
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185

/**
 * setup_per_cpu_areas - setup percpu areas
 *
 * Arch code has already allocated and initialized percpu areas.  All
 * this function has to do is to teach the determined layout to the
 * dynamic percpu allocator, which happens to be more complex than
 * creating whole new ones using helpers.
 */
void __init
setup_per_cpu_areas(void)
{
	struct pcpu_alloc_info *ai;
	struct pcpu_group_info *gi;
	unsigned int cpu;
	ssize_t static_size, reserved_size, dyn_size;
	int rc;

	ai = pcpu_alloc_alloc_info(1, num_possible_cpus());
	if (!ai)
		panic("failed to allocate pcpu_alloc_info");
	gi = &ai->groups[0];

	/* units are assigned consecutively to possible cpus */
	for_each_possible_cpu(cpu)
		gi->cpu_map[gi->nr_units++] = cpu;

	/* set parameters */
	static_size = __per_cpu_end - __per_cpu_start;
	reserved_size = PERCPU_MODULE_RESERVE;
	dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
	if (dyn_size < 0)
		panic("percpu area overflow static=%zd reserved=%zd\n",
		      static_size, reserved_size);

	ai->static_size		= static_size;
	ai->reserved_size	= reserved_size;
	ai->dyn_size		= dyn_size;
	ai->unit_size		= PERCPU_PAGE_SIZE;
	ai->atom_size		= PAGE_SIZE;
	ai->alloc_size		= PERCPU_PAGE_SIZE;

	rc = pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]);
	if (rc)
		panic("failed to setup percpu area (err=%d)", rc);

	pcpu_free_alloc_info(ai);
}
186 187 188 189
#else
#define alloc_per_cpu_data() do { } while (0)
#endif /* CONFIG_SMP */

L
Linus Torvalds 已提交
190 191 192 193 194 195
/**
 * find_memory - setup memory map
 *
 * Walk the EFI memory map and find usable memory for the system, taking
 * into account reserved areas.
 */
196
void __init
L
Linus Torvalds 已提交
197 198 199 200 201 202 203
find_memory (void)
{
	unsigned long bootmap_size;

	reserve_memory();

	/* first find highest page frame number */
204 205 206 207
	min_low_pfn = ~0UL;
	max_low_pfn = 0;
	efi_memmap_walk(find_max_min_low_pfn, NULL);
	max_pfn = max_low_pfn;
L
Linus Torvalds 已提交
208 209 210 211 212 213 214 215 216
	/* how many bytes to cover all the pages */
	bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT;

	/* look for a location to hold the bootmap */
	bootmap_start = ~0UL;
	efi_memmap_walk(find_bootmap_location, &bootmap_size);
	if (bootmap_start == ~0UL)
		panic("Cannot find %ld bytes for bootmap\n", bootmap_size);

217 218
	bootmap_size = init_bootmem_node(NODE_DATA(0),
			(bootmap_start >> PAGE_SHIFT), 0, max_pfn);
L
Linus Torvalds 已提交
219 220 221

	/* Free all available memory, then mark bootmem-map as being in use. */
	efi_memmap_walk(filter_rsvd_memory, free_bootmem);
222
	reserve_bootmem(bootmap_start, bootmap_size, BOOTMEM_DEFAULT);
L
Linus Torvalds 已提交
223 224

	find_initrd();
225

226
	alloc_per_cpu_data();
L
Linus Torvalds 已提交
227 228 229 230 231 232
}

/*
 * Set up the page tables.
 */

233
void __init
L
Linus Torvalds 已提交
234 235 236
paging_init (void)
{
	unsigned long max_dma;
237
	unsigned long max_zone_pfns[MAX_NR_ZONES];
L
Linus Torvalds 已提交
238

239
	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
240
#ifdef CONFIG_ZONE_DMA32
241
	max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
242
	max_zone_pfns[ZONE_DMA32] = max_dma;
243
#endif
244
	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
L
Linus Torvalds 已提交
245 246

#ifdef CONFIG_VIRTUAL_MEM_MAP
247
	efi_memmap_walk(filter_memory, register_active_ranges);
L
Linus Torvalds 已提交
248 249 250 251 252 253 254 255
	efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
	if (max_gap < LARGE_GAP) {
		vmem_map = (struct page *) 0;
	} else {
		unsigned long map_size;

		/* allocate virtual_mem_map */

256 257
		map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
			sizeof(struct page));
258 259
		VMALLOC_END -= map_size;
		vmem_map = (struct page *) VMALLOC_END;
L
Linus Torvalds 已提交
260 261
		efi_memmap_walk(create_mem_map_page_table, NULL);

262 263 264 265 266 267
		/*
		 * alloc_node_mem_map makes an adjustment for mem_map
		 * which isn't compatible with vmem_map.
		 */
		NODE_DATA(0)->node_mem_map = vmem_map +
			find_min_pfn_with_active_regions();
L
Linus Torvalds 已提交
268 269 270 271

		printk("Virtual mem_map starts at 0x%p\n", mem_map);
	}
#else /* !CONFIG_VIRTUAL_MEM_MAP */
T
Tejun Heo 已提交
272
	memblock_add_node(0, PFN_PHYS(max_low_pfn), 0);
L
Linus Torvalds 已提交
273
#endif /* !CONFIG_VIRTUAL_MEM_MAP */
274
	free_area_init_nodes(max_zone_pfns);
L
Linus Torvalds 已提交
275 276
	zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
}