memblock.h 19.3 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0-or-later */
Y
Yinghai Lu 已提交
2 3 4 5 6 7 8 9 10 11 12 13
#ifndef _LINUX_MEMBLOCK_H
#define _LINUX_MEMBLOCK_H
#ifdef __KERNEL__

/*
 * Logical memory blocks.
 *
 * Copyright (C) 2001 Peter Bergner, IBM Corp.
 */

#include <linux/init.h>
#include <linux/mm.h>
M
Mike Rapoport 已提交
14 15 16 17 18 19 20 21 22 23 24 25 26
#include <asm/dma.h>

extern unsigned long max_low_pfn;
extern unsigned long min_low_pfn;

/*
 * highest page
 */
extern unsigned long max_pfn;
/*
 * highest possible page
 */
extern unsigned long long max_possible_pfn;
Y
Yinghai Lu 已提交
27

28 29 30 31 32 33 34
/**
 * enum memblock_flags - definition of memory region attributes
 * @MEMBLOCK_NONE: no special request
 * @MEMBLOCK_HOTPLUG: hotpluggable region
 * @MEMBLOCK_MIRROR: mirrored region
 * @MEMBLOCK_NOMAP: don't add to kernel direct mapping
 */
35
enum memblock_flags {
36 37
	MEMBLOCK_NONE		= 0x0,	/* No special request */
	MEMBLOCK_HOTPLUG	= 0x1,	/* hotpluggable region */
38
	MEMBLOCK_MIRROR		= 0x2,	/* mirrored region */
39
	MEMBLOCK_NOMAP		= 0x4,	/* don't add to kernel direct mapping */
40
};
41

42 43
/**
 * struct memblock_region - represents a memory region
44
 * @base: base address of the region
45 46 47 48
 * @size: size of the region
 * @flags: memory region attributes
 * @nid: NUMA node id
 */
49
struct memblock_region {
50 51
	phys_addr_t base;
	phys_addr_t size;
52
	enum memblock_flags flags;
53
#ifdef CONFIG_NEED_MULTIPLE_NODES
T
Tejun Heo 已提交
54 55
	int nid;
#endif
Y
Yinghai Lu 已提交
56 57
};

58 59 60 61 62 63 64 65
/**
 * struct memblock_type - collection of memory regions of certain type
 * @cnt: number of regions
 * @max: size of the allocated array
 * @total_size: size of all regions
 * @regions: array of regions
 * @name: the memory type symbolic name
 */
66
struct memblock_type {
67 68 69
	unsigned long cnt;
	unsigned long max;
	phys_addr_t total_size;
70
	struct memblock_region *regions;
71
	char *name;
Y
Yinghai Lu 已提交
72 73
};

74 75 76 77
/**
 * struct memblock - memblock allocator metadata
 * @bottom_up: is bottom up direction?
 * @current_limit: physical address of the current allocation limit
78
 * @memory: usable memory regions
79 80
 * @reserved: reserved memory regions
 */
Y
Yinghai Lu 已提交
81
struct memblock {
82
	bool bottom_up;  /* is bottom up direction? */
83
	phys_addr_t current_limit;
84 85
	struct memblock_type memory;
	struct memblock_type reserved;
Y
Yinghai Lu 已提交
86 87 88
};

extern struct memblock memblock;
89 90
extern int memblock_debug;

91
#ifndef CONFIG_ARCH_KEEP_MEMBLOCK
92 93
#define __init_memblock __meminit
#define __initdata_memblock __meminitdata
P
Pavel Tatashin 已提交
94
void memblock_discard(void);
95 96 97
#else
#define __init_memblock
#define __initdata_memblock
98
static inline void memblock_discard(void) {}
99 100
#endif

101 102
#define memblock_dbg(fmt, ...) \
	if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
Y
Yinghai Lu 已提交
103

104 105
phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
				   phys_addr_t size, phys_addr_t align);
106
void memblock_allow_resize(void);
107
int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
108 109 110 111
int memblock_add(phys_addr_t base, phys_addr_t size);
int memblock_remove(phys_addr_t base, phys_addr_t size);
int memblock_free(phys_addr_t base, phys_addr_t size);
int memblock_reserve(phys_addr_t base, phys_addr_t size);
112 113 114
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
#endif
115
void memblock_trim_memory(phys_addr_t align);
116 117
bool memblock_overlaps_region(struct memblock_type *type,
			      phys_addr_t base, phys_addr_t size);
118 119
int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
120
int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
121
int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
122
int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
123

M
Mike Rapoport 已提交
124 125 126 127
unsigned long memblock_free_all(void);
void reset_node_managed_pages(pg_data_t *pgdat);
void reset_all_zones_managed_pages(void);

128
/* Low level functions */
129
void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
130
		      struct memblock_type *type_a,
131 132 133
		      struct memblock_type *type_b, phys_addr_t *out_start,
		      phys_addr_t *out_end, int *out_nid);

134
void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
135
			  struct memblock_type *type_a,
136 137 138
			  struct memblock_type *type_b, phys_addr_t *out_start,
			  phys_addr_t *out_end, int *out_nid);

139
void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
140
				phys_addr_t *out_end);
141

P
Pavel Tatashin 已提交
142 143
void __memblock_free_late(phys_addr_t base, phys_addr_t size);

144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
					phys_addr_t *out_start,
					phys_addr_t *out_end)
{
	extern struct memblock_type physmem;

	__next_mem_range(idx, NUMA_NO_NODE, MEMBLOCK_NONE, &physmem, type,
			 out_start, out_end, NULL);
}

/**
 * for_each_physmem_range - iterate through physmem areas not included in type.
 * @i: u64 used as loop variable
 * @type: ptr to memblock_type which excludes from the iteration, can be %NULL
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 */
#define for_each_physmem_range(i, type, p_start, p_end)			\
	for (i = 0, __next_physmem_range(&i, type, p_start, p_end);	\
	     i != (u64)ULLONG_MAX;					\
	     __next_physmem_range(&i, type, p_start, p_end))
#endif /* CONFIG_HAVE_MEMBLOCK_PHYS_MAP */

168 169 170 171 172 173 174
/**
 * for_each_mem_range - iterate through memblock areas from type_a and not
 * included in type_b. Or just type_a if type_b is NULL.
 * @i: u64 used as loop variable
 * @type_a: ptr to memblock_type to iterate
 * @type_b: ptr to memblock_type which excludes from the iteration
 * @nid: node selector, %NUMA_NO_NODE for all nodes
175
 * @flags: pick from blocks based on memory attributes
176 177 178 179
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 */
180
#define for_each_mem_range(i, type_a, type_b, nid, flags,		\
181
			   p_start, p_end, p_nid)			\
182
	for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b,	\
183 184
				     p_start, p_end, p_nid);		\
	     i != (u64)ULLONG_MAX;					\
185
	     __next_mem_range(&i, nid, flags, type_a, type_b,		\
186 187 188 189 190 191 192 193 194
			      p_start, p_end, p_nid))

/**
 * for_each_mem_range_rev - reverse iterate through memblock areas from
 * type_a and not included in type_b. Or just type_a if type_b is NULL.
 * @i: u64 used as loop variable
 * @type_a: ptr to memblock_type to iterate
 * @type_b: ptr to memblock_type which excludes from the iteration
 * @nid: node selector, %NUMA_NO_NODE for all nodes
195
 * @flags: pick from blocks based on memory attributes
196 197 198 199
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 */
200
#define for_each_mem_range_rev(i, type_a, type_b, nid, flags,		\
201 202
			       p_start, p_end, p_nid)			\
	for (i = (u64)ULLONG_MAX,					\
203
		     __next_mem_range_rev(&i, nid, flags, type_a, type_b,\
204
					  p_start, p_end, p_nid);	\
205
	     i != (u64)ULLONG_MAX;					\
206
	     __next_mem_range_rev(&i, nid, flags, type_a, type_b,	\
207 208
				  p_start, p_end, p_nid))

209 210 211 212 213 214 215 216 217 218
/**
 * for_each_reserved_mem_region - iterate over all reserved memblock areas
 * @i: u64 used as loop variable
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 *
 * Walks over reserved areas of memblock. Available as soon as memblock
 * is initialized.
 */
#define for_each_reserved_mem_region(i, p_start, p_end)			\
219
	for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end);	\
220 221 222
	     i != (u64)ULLONG_MAX;					\
	     __next_reserved_mem_region(&i, p_start, p_end))

223 224 225 226 227
static inline bool memblock_is_hotpluggable(struct memblock_region *m)
{
	return m->flags & MEMBLOCK_HOTPLUG;
}

228 229 230 231 232
static inline bool memblock_is_mirror(struct memblock_region *m)
{
	return m->flags & MEMBLOCK_MIRROR;
}

233 234 235 236 237
static inline bool memblock_is_nomap(struct memblock_region *m)
{
	return m->flags & MEMBLOCK_NOMAP;
}

238 239
int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
			    unsigned long  *end_pfn);
T
Tejun Heo 已提交
240 241 242 243 244 245 246 247 248 249 250
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
			  unsigned long *out_end_pfn, int *out_nid);

/**
 * for_each_mem_pfn_range - early memory pfn range iterator
 * @i: an integer used as loop variable
 * @nid: node selector, %MAX_NUMNODES for all nodes
 * @p_start: ptr to ulong for start pfn of the range, can be %NULL
 * @p_end: ptr to ulong for end pfn of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 *
251
 * Walks over configured memory ranges.
T
Tejun Heo 已提交
252 253 254 255 256
 */
#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid)		\
	for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
	     i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))

257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
				  unsigned long *out_spfn,
				  unsigned long *out_epfn);
/**
 * for_each_free_mem_range_in_zone - iterate through zone specific free
 * memblock areas
 * @i: u64 used as loop variable
 * @zone: zone in which all of the memory blocks reside
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 *
 * Walks over free (memory && !reserved) areas of memblock in a specific
 * zone. Available once memblock and an empty zone is initialized. The main
 * assumption is that the zone start, end, and pgdat have been associated.
 * This way we can use the zone to determine NUMA node, and if a given part
 * of the memblock is valid for the zone.
 */
#define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end)	\
	for (i = 0,							\
	     __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end);	\
	     i != U64_MAX;					\
	     __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295

/**
 * for_each_free_mem_range_in_zone_from - iterate through zone specific
 * free memblock areas from a given point
 * @i: u64 used as loop variable
 * @zone: zone in which all of the memory blocks reside
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 *
 * Walks over free (memory && !reserved) areas of memblock in a specific
 * zone, continuing from current position. Available as soon as memblock is
 * initialized.
 */
#define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \
	for (; i != U64_MAX;					  \
	     __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
296 297 298

int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask);

299 300
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */

301 302 303
/**
 * for_each_free_mem_range - iterate through free memblock areas
 * @i: u64 used as loop variable
304
 * @nid: node selector, %NUMA_NO_NODE for all nodes
305
 * @flags: pick from blocks based on memory attributes
306 307 308 309 310 311 312
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 *
 * Walks over free (memory && !reserved) areas of memblock.  Available as
 * soon as memblock is initialized.
 */
313
#define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid)	\
314
	for_each_mem_range(i, &memblock.memory, &memblock.reserved,	\
315
			   nid, flags, p_start, p_end, p_nid)
316 317 318 319

/**
 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
 * @i: u64 used as loop variable
320
 * @nid: node selector, %NUMA_NO_NODE for all nodes
321
 * @flags: pick from blocks based on memory attributes
322 323 324 325 326 327 328
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 *
 * Walks over free (memory && !reserved) areas of memblock in reverse
 * order.  Available as soon as memblock is initialized.
 */
329 330
#define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end,	\
					p_nid)				\
331
	for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved,	\
332
			       nid, flags, p_start, p_end, p_nid)
333

334 335
int memblock_set_node(phys_addr_t base, phys_addr_t size,
		      struct memblock_type *type, int nid);
T
Tejun Heo 已提交
336

337
#ifdef CONFIG_NEED_MULTIPLE_NODES
T
Tejun Heo 已提交
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
static inline void memblock_set_region_node(struct memblock_region *r, int nid)
{
	r->nid = nid;
}

static inline int memblock_get_region_node(const struct memblock_region *r)
{
	return r->nid;
}
#else
static inline void memblock_set_region_node(struct memblock_region *r, int nid)
{
}

static inline int memblock_get_region_node(const struct memblock_region *r)
{
	return 0;
}
356
#endif /* CONFIG_NEED_MULTIPLE_NODES */
T
Tejun Heo 已提交
357

M
Mike Rapoport 已提交
358 359 360
/* Flags for memblock allocation APIs */
#define MEMBLOCK_ALLOC_ANYWHERE	(~(phys_addr_t)0)
#define MEMBLOCK_ALLOC_ACCESSIBLE	0
361
#define MEMBLOCK_ALLOC_KASAN		1
M
Mike Rapoport 已提交
362 363 364 365 366 367 368 369

/* We are using top down, so it is safe to use 0 here */
#define MEMBLOCK_LOW_LIMIT 0

#ifndef ARCH_LOW_ADDRESS_LIMIT
#define ARCH_LOW_ADDRESS_LIMIT  0xffffffffUL
#endif

370 371
phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align,
				      phys_addr_t start, phys_addr_t end);
A
Aslan Bakirov 已提交
372 373 374
phys_addr_t memblock_alloc_range_nid(phys_addr_t size,
				      phys_addr_t align, phys_addr_t start,
				      phys_addr_t end, int nid, bool exact_nid);
375
phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
376

377 378 379 380 381 382
static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
					      phys_addr_t align)
{
	return memblock_phys_alloc_range(size, align, 0,
					 MEMBLOCK_ALLOC_ACCESSIBLE);
}
383

384 385 386
void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align,
				 phys_addr_t min_addr, phys_addr_t max_addr,
				 int nid);
M
Mike Rapoport 已提交
387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
				 phys_addr_t min_addr, phys_addr_t max_addr,
				 int nid);
void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
			     phys_addr_t min_addr, phys_addr_t max_addr,
			     int nid);

static inline void * __init memblock_alloc(phys_addr_t size,  phys_addr_t align)
{
	return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
				      MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
}

static inline void * __init memblock_alloc_raw(phys_addr_t size,
					       phys_addr_t align)
{
	return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
					  MEMBLOCK_ALLOC_ACCESSIBLE,
					  NUMA_NO_NODE);
}

static inline void * __init memblock_alloc_from(phys_addr_t size,
						phys_addr_t align,
						phys_addr_t min_addr)
{
	return memblock_alloc_try_nid(size, align, min_addr,
				      MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
}

static inline void * __init memblock_alloc_low(phys_addr_t size,
					       phys_addr_t align)
{
	return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
				      ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
}

static inline void * __init memblock_alloc_node(phys_addr_t size,
						phys_addr_t align, int nid)
{
	return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
				      MEMBLOCK_ALLOC_ACCESSIBLE, nid);
}

static inline void __init memblock_free_early(phys_addr_t base,
					      phys_addr_t size)
{
433
	memblock_free(base, size);
M
Mike Rapoport 已提交
434 435 436 437 438
}

static inline void __init memblock_free_early_nid(phys_addr_t base,
						  phys_addr_t size, int nid)
{
439
	memblock_free(base, size);
M
Mike Rapoport 已提交
440 441 442 443 444 445 446
}

static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
{
	__memblock_free_late(base, size);
}

447 448 449
/*
 * Set the allocation direction to bottom-up or top-down.
 */
450
static inline void __init memblock_set_bottom_up(bool enable)
451 452 453 454 455 456 457 458 459 460 461 462 463 464
{
	memblock.bottom_up = enable;
}

/*
 * Check if the allocation direction is bottom-up or not.
 * if this is true, that said, memblock will allocate memory
 * in bottom-up direction.
 */
static inline bool memblock_bottom_up(void)
{
	return memblock.bottom_up;
}

465
phys_addr_t memblock_phys_mem_size(void);
466
phys_addr_t memblock_reserved_size(void);
Y
Yinghai Lu 已提交
467
phys_addr_t memblock_mem_size(unsigned long limit_pfn);
468 469 470
phys_addr_t memblock_start_of_DRAM(void);
phys_addr_t memblock_end_of_DRAM(void);
void memblock_enforce_memory_limit(phys_addr_t memory_limit);
471
void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
472
void memblock_mem_limit_remove_map(phys_addr_t limit);
473
bool memblock_is_memory(phys_addr_t addr);
474 475
bool memblock_is_map_memory(phys_addr_t addr);
bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
476
bool memblock_is_reserved(phys_addr_t addr);
477
bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
478

T
Tejun Heo 已提交
479 480 481 482 483 484 485
extern void __memblock_dump_all(void);

static inline void memblock_dump_all(void)
{
	if (memblock_debug)
		__memblock_dump_all();
}
Y
Yinghai Lu 已提交
486

487 488 489 490 491 492
/**
 * memblock_set_current_limit - Set the current allocation limit to allow
 *                         limiting allocations to what is currently
 *                         accessible during boot
 * @limit: New limit value (physical address)
 */
493
void memblock_set_current_limit(phys_addr_t limit);
494

495

496 497
phys_addr_t memblock_get_current_limit(void);

498 499 500 501 502 503 504 505 506
/*
 * pfn conversion functions
 *
 * While the memory MEMBLOCKs should always be page aligned, the reserved
 * MEMBLOCKs may not be. This accessor attempt to provide a very clear
 * idea of what they return for such non aligned MEMBLOCKs.
 */

/**
507
 * memblock_region_memory_base_pfn - get the lowest pfn of the memory region
508
 * @reg: memblock_region structure
509 510
 *
 * Return: the lowest pfn intersecting with the memory region
511
 */
512
static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
513
{
514
	return PFN_UP(reg->base);
515 516 517
}

/**
518
 * memblock_region_memory_end_pfn - get the end pfn of the memory region
519
 * @reg: memblock_region structure
520 521
 *
 * Return: the end_pfn of the reserved region
522
 */
523
static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
524
{
525
	return PFN_DOWN(reg->base + reg->size);
526 527 528
}

/**
529
 * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region
530
 * @reg: memblock_region structure
531 532
 *
 * Return: the lowest pfn intersecting with the reserved region
533
 */
534
static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
535
{
536
	return PFN_DOWN(reg->base);
537 538 539
}

/**
540
 * memblock_region_reserved_end_pfn - get the end pfn of the reserved region
541
 * @reg: memblock_region structure
542 543
 *
 * Return: the end_pfn of the reserved region
544
 */
545
static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
546
{
547
	return PFN_UP(reg->base + reg->size);
548 549 550
}

#define for_each_memblock(memblock_type, region)					\
551
	for (region = memblock.memblock_type.regions;					\
552 553 554
	     region < (memblock.memblock_type.regions + memblock.memblock_type.cnt);	\
	     region++)

M
Mike Rapoport 已提交
555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579
extern void *alloc_large_system_hash(const char *tablename,
				     unsigned long bucketsize,
				     unsigned long numentries,
				     int scale,
				     int flags,
				     unsigned int *_hash_shift,
				     unsigned int *_hash_mask,
				     unsigned long low_limit,
				     unsigned long high_limit);

#define HASH_EARLY	0x00000001	/* Allocating during early boot? */
#define HASH_SMALL	0x00000002	/* sub-page allocation allowed, min
					 * shift passed via *_hash_shift */
#define HASH_ZERO	0x00000004	/* Zero allocated hash table */

/* Only NUMA needs hash distribution. 64bit NUMA architectures have
 * sufficient vmalloc space.
 */
#ifdef CONFIG_NUMA
#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
extern int hashdist;		/* Distribute hashes across NUMA nodes? */
#else
#define hashdist (0)
#endif

V
Vladimir Murzin 已提交
580
#ifdef CONFIG_MEMTEST
581
extern void early_memtest(phys_addr_t start, phys_addr_t end);
V
Vladimir Murzin 已提交
582
#else
583
static inline void early_memtest(phys_addr_t start, phys_addr_t end)
V
Vladimir Murzin 已提交
584 585 586
{
}
#endif
587

Y
Yinghai Lu 已提交
588 589 590
#endif /* __KERNEL__ */

#endif /* _LINUX_MEMBLOCK_H */