memblock.h 12.0 KB
Newer Older
Y
Yinghai Lu 已提交
1 2 3 4
#ifndef _LINUX_MEMBLOCK_H
#define _LINUX_MEMBLOCK_H
#ifdef __KERNEL__

5
#ifdef CONFIG_HAVE_MEMBLOCK
Y
Yinghai Lu 已提交
6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * Logical memory blocks.
 *
 * Copyright (C) 2001 Peter Bergner, IBM Corp.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

#include <linux/init.h>
#include <linux/mm.h>

Y
Yinghai Lu 已提交
20
#define INIT_MEMBLOCK_REGIONS	128
21
#define INIT_PHYSMEM_REGIONS	4
Y
Yinghai Lu 已提交
22

23 24 25
/* Definition of memblock flags. */
#define MEMBLOCK_HOTPLUG	0x1	/* hotpluggable region */

26
struct memblock_region {
27 28
	phys_addr_t base;
	phys_addr_t size;
29
	unsigned long flags;
T
Tejun Heo 已提交
30 31 32
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
	int nid;
#endif
Y
Yinghai Lu 已提交
33 34
};

35
struct memblock_type {
36 37
	unsigned long cnt;	/* number of regions */
	unsigned long max;	/* size of the allocated array */
38
	phys_addr_t total_size;	/* size of all regions */
39
	struct memblock_region *regions;
Y
Yinghai Lu 已提交
40 41 42
};

struct memblock {
43
	bool bottom_up;  /* is bottom up direction? */
44
	phys_addr_t current_limit;
45 46
	struct memblock_type memory;
	struct memblock_type reserved;
47 48 49
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
	struct memblock_type physmem;
#endif
Y
Yinghai Lu 已提交
50 51 52
};

extern struct memblock memblock;
53
extern int memblock_debug;
54 55 56 57
#ifdef CONFIG_MOVABLE_NODE
/* If movable_node boot option specified */
extern bool movable_node_enabled;
#endif /* CONFIG_MOVABLE_NODE */
58 59 60

#define memblock_dbg(fmt, ...) \
	if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
Y
Yinghai Lu 已提交
61

62 63 64
phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
					    phys_addr_t start, phys_addr_t end,
					    int nid);
65 66
phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
				   phys_addr_t size, phys_addr_t align);
67
phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr);
68
phys_addr_t get_allocated_memblock_memory_regions_info(phys_addr_t *addr);
69
void memblock_allow_resize(void);
70
int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
71 72 73 74
int memblock_add(phys_addr_t base, phys_addr_t size);
int memblock_remove(phys_addr_t base, phys_addr_t size);
int memblock_free(phys_addr_t base, phys_addr_t size);
int memblock_reserve(phys_addr_t base, phys_addr_t size);
75
void memblock_trim_memory(phys_addr_t align);
76 77
int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134

/* Low level functions */
int memblock_add_range(struct memblock_type *type,
		       phys_addr_t base, phys_addr_t size,
		       int nid, unsigned long flags);

int memblock_remove_range(struct memblock_type *type,
			  phys_addr_t base,
			  phys_addr_t size);

void __next_mem_range(u64 *idx, int nid, struct memblock_type *type_a,
		      struct memblock_type *type_b, phys_addr_t *out_start,
		      phys_addr_t *out_end, int *out_nid);

void __next_mem_range_rev(u64 *idx, int nid, struct memblock_type *type_a,
			  struct memblock_type *type_b, phys_addr_t *out_start,
			  phys_addr_t *out_end, int *out_nid);

/**
 * for_each_mem_range - iterate through memblock areas from type_a and not
 * included in type_b. Or just type_a if type_b is NULL.
 * @i: u64 used as loop variable
 * @type_a: ptr to memblock_type to iterate
 * @type_b: ptr to memblock_type which excludes from the iteration
 * @nid: node selector, %NUMA_NO_NODE for all nodes
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 */
#define for_each_mem_range(i, type_a, type_b, nid,			\
			   p_start, p_end, p_nid)			\
	for (i = 0, __next_mem_range(&i, nid, type_a, type_b,		\
				     p_start, p_end, p_nid);		\
	     i != (u64)ULLONG_MAX;					\
	     __next_mem_range(&i, nid, type_a, type_b,			\
			      p_start, p_end, p_nid))

/**
 * for_each_mem_range_rev - reverse iterate through memblock areas from
 * type_a and not included in type_b. Or just type_a if type_b is NULL.
 * @i: u64 used as loop variable
 * @type_a: ptr to memblock_type to iterate
 * @type_b: ptr to memblock_type which excludes from the iteration
 * @nid: node selector, %NUMA_NO_NODE for all nodes
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 */
#define for_each_mem_range_rev(i, type_a, type_b, nid,			\
			       p_start, p_end, p_nid)			\
	for (i = (u64)ULLONG_MAX,					\
		     __next_mem_range_rev(&i, nid, type_a, type_b,	\
					 p_start, p_end, p_nid);	\
	     i != (u64)ULLONG_MAX;					\
	     __next_mem_range_rev(&i, nid, type_a, type_b,		\
				  p_start, p_end, p_nid))

135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
#ifdef CONFIG_MOVABLE_NODE
static inline bool memblock_is_hotpluggable(struct memblock_region *m)
{
	return m->flags & MEMBLOCK_HOTPLUG;
}

static inline bool movable_node_is_enabled(void)
{
	return movable_node_enabled;
}
#else
static inline bool memblock_is_hotpluggable(struct memblock_region *m)
{
	return false;
}
static inline bool movable_node_is_enabled(void)
{
	return false;
}
#endif
155

T
Tejun Heo 已提交
156
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
157 158
int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
			    unsigned long  *end_pfn);
T
Tejun Heo 已提交
159 160 161 162 163 164 165 166 167 168 169
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
			  unsigned long *out_end_pfn, int *out_nid);

/**
 * for_each_mem_pfn_range - early memory pfn range iterator
 * @i: an integer used as loop variable
 * @nid: node selector, %MAX_NUMNODES for all nodes
 * @p_start: ptr to ulong for start pfn of the range, can be %NULL
 * @p_end: ptr to ulong for end pfn of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 *
170
 * Walks over configured memory ranges.
T
Tejun Heo 已提交
171 172 173 174 175 176
 */
#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid)		\
	for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
	     i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */

177 178 179
/**
 * for_each_free_mem_range - iterate through free memblock areas
 * @i: u64 used as loop variable
180
 * @nid: node selector, %NUMA_NO_NODE for all nodes
181 182 183 184 185 186 187 188
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 *
 * Walks over free (memory && !reserved) areas of memblock.  Available as
 * soon as memblock is initialized.
 */
#define for_each_free_mem_range(i, nid, p_start, p_end, p_nid)		\
189 190
	for_each_mem_range(i, &memblock.memory, &memblock.reserved,	\
			   nid, p_start, p_end, p_nid)
191 192 193 194

/**
 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
 * @i: u64 used as loop variable
195
 * @nid: node selector, %NUMA_NO_NODE for all nodes
196 197 198 199 200 201 202 203
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 *
 * Walks over free (memory && !reserved) areas of memblock in reverse
 * order.  Available as soon as memblock is initialized.
 */
#define for_each_free_mem_range_reverse(i, nid, p_start, p_end, p_nid)	\
204 205
	for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved,	\
			       nid, p_start, p_end, p_nid)
206

207 208 209 210 211 212 213 214 215 216 217 218
static inline void memblock_set_region_flags(struct memblock_region *r,
					     unsigned long flags)
{
	r->flags |= flags;
}

static inline void memblock_clear_region_flags(struct memblock_region *r,
					       unsigned long flags)
{
	r->flags &= ~flags;
}

T
Tejun Heo 已提交
219
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
220 221
int memblock_set_node(phys_addr_t base, phys_addr_t size,
		      struct memblock_type *type, int nid);
T
Tejun Heo 已提交
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242

static inline void memblock_set_region_node(struct memblock_region *r, int nid)
{
	r->nid = nid;
}

static inline int memblock_get_region_node(const struct memblock_region *r)
{
	return r->nid;
}
#else
static inline void memblock_set_region_node(struct memblock_region *r, int nid)
{
}

static inline int memblock_get_region_node(const struct memblock_region *r)
{
	return 0;
}
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */

243 244
phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
245

246
phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
247

248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
#ifdef CONFIG_MOVABLE_NODE
/*
 * Set the allocation direction to bottom-up or top-down.
 */
static inline void memblock_set_bottom_up(bool enable)
{
	memblock.bottom_up = enable;
}

/*
 * Check if the allocation direction is bottom-up or not.
 * if this is true, that said, memblock will allocate memory
 * in bottom-up direction.
 */
static inline bool memblock_bottom_up(void)
{
	return memblock.bottom_up;
}
#else
static inline void memblock_set_bottom_up(bool enable) {}
static inline bool memblock_bottom_up(void) { return false; }
#endif

271
/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
272
#define MEMBLOCK_ALLOC_ANYWHERE	(~(phys_addr_t)0)
273 274
#define MEMBLOCK_ALLOC_ACCESSIBLE	0

275 276
phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
					phys_addr_t start, phys_addr_t end);
277 278 279 280 281
phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
				phys_addr_t max_addr);
phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
				  phys_addr_t max_addr);
phys_addr_t memblock_phys_mem_size(void);
Y
Yinghai Lu 已提交
282
phys_addr_t memblock_mem_size(unsigned long limit_pfn);
283 284 285 286 287 288 289 290
phys_addr_t memblock_start_of_DRAM(void);
phys_addr_t memblock_end_of_DRAM(void);
void memblock_enforce_memory_limit(phys_addr_t memory_limit);
int memblock_is_memory(phys_addr_t addr);
int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
int memblock_is_reserved(phys_addr_t addr);
int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);

T
Tejun Heo 已提交
291 292 293 294 295 296 297
extern void __memblock_dump_all(void);

static inline void memblock_dump_all(void)
{
	if (memblock_debug)
		__memblock_dump_all();
}
Y
Yinghai Lu 已提交
298

299 300 301 302 303 304
/**
 * memblock_set_current_limit - Set the current allocation limit to allow
 *                         limiting allocations to what is currently
 *                         accessible during boot
 * @limit: New limit value (physical address)
 */
305
void memblock_set_current_limit(phys_addr_t limit);
306

307

308 309
phys_addr_t memblock_get_current_limit(void);

310 311 312 313 314 315 316 317 318
/*
 * pfn conversion functions
 *
 * While the memory MEMBLOCKs should always be page aligned, the reserved
 * MEMBLOCKs may not be. This accessor attempt to provide a very clear
 * idea of what they return for such non aligned MEMBLOCKs.
 */

/**
319
 * memblock_region_memory_base_pfn - Return the lowest pfn intersecting with the memory region
320 321
 * @reg: memblock_region structure
 */
322
static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
323
{
324
	return PFN_UP(reg->base);
325 326 327
}

/**
328
 * memblock_region_memory_end_pfn - Return the end_pfn this region
329 330
 * @reg: memblock_region structure
 */
331
static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
332
{
333
	return PFN_DOWN(reg->base + reg->size);
334 335 336
}

/**
337
 * memblock_region_reserved_base_pfn - Return the lowest pfn intersecting with the reserved region
338 339
 * @reg: memblock_region structure
 */
340
static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
341
{
342
	return PFN_DOWN(reg->base);
343 344 345
}

/**
346
 * memblock_region_reserved_end_pfn - Return the end_pfn this region
347 348
 * @reg: memblock_region structure
 */
349
static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
350
{
351
	return PFN_UP(reg->base + reg->size);
352 353 354 355 356 357 358 359
}

#define for_each_memblock(memblock_type, region)					\
	for (region = memblock.memblock_type.regions;				\
	     region < (memblock.memblock_type.regions + memblock.memblock_type.cnt);	\
	     region++)


360
#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
361 362
#define __init_memblock __meminit
#define __initdata_memblock __meminitdata
363 364 365 366 367
#else
#define __init_memblock
#define __initdata_memblock
#endif

368 369 370
#else
static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
{
T
Tejun Heo 已提交
371
	return 0;
372 373
}

374 375
#endif /* CONFIG_HAVE_MEMBLOCK */

Y
Yinghai Lu 已提交
376 377 378
#endif /* __KERNEL__ */

#endif /* _LINUX_MEMBLOCK_H */