memblock.h 18.7 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0-or-later */
Y
Yinghai Lu 已提交
2 3 4 5 6 7 8 9 10 11 12 13
#ifndef _LINUX_MEMBLOCK_H
#define _LINUX_MEMBLOCK_H
#ifdef __KERNEL__

/*
 * Logical memory blocks.
 *
 * Copyright (C) 2001 Peter Bergner, IBM Corp.
 */

#include <linux/init.h>
#include <linux/mm.h>
M
Mike Rapoport 已提交
14 15 16 17 18 19 20 21 22 23 24 25 26
#include <asm/dma.h>

extern unsigned long max_low_pfn;
extern unsigned long min_low_pfn;

/*
 * highest page
 */
extern unsigned long max_pfn;
/*
 * highest possible page
 */
extern unsigned long long max_possible_pfn;
Y
Yinghai Lu 已提交
27

28 29 30 31 32 33 34
/**
 * enum memblock_flags - definition of memory region attributes
 * @MEMBLOCK_NONE: no special request
 * @MEMBLOCK_HOTPLUG: hotpluggable region
 * @MEMBLOCK_MIRROR: mirrored region
 * @MEMBLOCK_NOMAP: don't add to kernel direct mapping
 */
35
enum memblock_flags {
36 37
	MEMBLOCK_NONE		= 0x0,	/* No special request */
	MEMBLOCK_HOTPLUG	= 0x1,	/* hotpluggable region */
38
	MEMBLOCK_MIRROR		= 0x2,	/* mirrored region */
39
	MEMBLOCK_NOMAP		= 0x4,	/* don't add to kernel direct mapping */
40
};
41

42 43
/**
 * struct memblock_region - represents a memory region
44
 * @base: base address of the region
45 46 47 48
 * @size: size of the region
 * @flags: memory region attributes
 * @nid: NUMA node id
 */
49
struct memblock_region {
50 51
	phys_addr_t base;
	phys_addr_t size;
52
	enum memblock_flags flags;
53
#ifdef CONFIG_NEED_MULTIPLE_NODES
T
Tejun Heo 已提交
54 55
	int nid;
#endif
Y
Yinghai Lu 已提交
56 57
};

58 59 60 61 62 63 64 65
/**
 * struct memblock_type - collection of memory regions of certain type
 * @cnt: number of regions
 * @max: size of the allocated array
 * @total_size: size of all regions
 * @regions: array of regions
 * @name: the memory type symbolic name
 */
66
struct memblock_type {
67 68 69
	unsigned long cnt;
	unsigned long max;
	phys_addr_t total_size;
70
	struct memblock_region *regions;
71
	char *name;
Y
Yinghai Lu 已提交
72 73
};

74 75 76 77
/**
 * struct memblock - memblock allocator metadata
 * @bottom_up: is bottom up direction?
 * @current_limit: physical address of the current allocation limit
78
 * @memory: usable memory regions
79 80 81
 * @reserved: reserved memory regions
 * @physmem: all physical memory
 */
Y
Yinghai Lu 已提交
82
struct memblock {
83
	bool bottom_up;  /* is bottom up direction? */
84
	phys_addr_t current_limit;
85 86
	struct memblock_type memory;
	struct memblock_type reserved;
87 88 89
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
	struct memblock_type physmem;
#endif
Y
Yinghai Lu 已提交
90 91 92
};

extern struct memblock memblock;
93 94
extern int memblock_debug;

95
#ifndef CONFIG_ARCH_KEEP_MEMBLOCK
96 97
#define __init_memblock __meminit
#define __initdata_memblock __meminitdata
P
Pavel Tatashin 已提交
98
void memblock_discard(void);
99 100 101
#else
#define __init_memblock
#define __initdata_memblock
102
static inline void memblock_discard(void) {}
103 104
#endif

105 106
#define memblock_dbg(fmt, ...) \
	if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
Y
Yinghai Lu 已提交
107

108 109
phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
				   phys_addr_t size, phys_addr_t align);
110
void memblock_allow_resize(void);
111
int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
112 113 114 115
int memblock_add(phys_addr_t base, phys_addr_t size);
int memblock_remove(phys_addr_t base, phys_addr_t size);
int memblock_free(phys_addr_t base, phys_addr_t size);
int memblock_reserve(phys_addr_t base, phys_addr_t size);
116 117 118
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
#endif
119
void memblock_trim_memory(phys_addr_t align);
120 121
bool memblock_overlaps_region(struct memblock_type *type,
			      phys_addr_t base, phys_addr_t size);
122 123
int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
124
int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
125
int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
126
int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
127

M
Mike Rapoport 已提交
128 129 130 131
unsigned long memblock_free_all(void);
void reset_node_managed_pages(pg_data_t *pgdat);
void reset_all_zones_managed_pages(void);

132
/* Low level functions */
133
void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
134
		      struct memblock_type *type_a,
135 136 137
		      struct memblock_type *type_b, phys_addr_t *out_start,
		      phys_addr_t *out_end, int *out_nid);

138
void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
139
			  struct memblock_type *type_a,
140 141 142
			  struct memblock_type *type_b, phys_addr_t *out_start,
			  phys_addr_t *out_end, int *out_nid);

143
void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
144
				phys_addr_t *out_end);
145

P
Pavel Tatashin 已提交
146 147
void __memblock_free_late(phys_addr_t base, phys_addr_t size);

148 149 150 151 152 153 154
/**
 * for_each_mem_range - iterate through memblock areas from type_a and not
 * included in type_b. Or just type_a if type_b is NULL.
 * @i: u64 used as loop variable
 * @type_a: ptr to memblock_type to iterate
 * @type_b: ptr to memblock_type which excludes from the iteration
 * @nid: node selector, %NUMA_NO_NODE for all nodes
155
 * @flags: pick from blocks based on memory attributes
156 157 158 159
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 */
160
#define for_each_mem_range(i, type_a, type_b, nid, flags,		\
161
			   p_start, p_end, p_nid)			\
162
	for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b,	\
163 164
				     p_start, p_end, p_nid);		\
	     i != (u64)ULLONG_MAX;					\
165
	     __next_mem_range(&i, nid, flags, type_a, type_b,		\
166 167 168 169 170 171 172 173 174
			      p_start, p_end, p_nid))

/**
 * for_each_mem_range_rev - reverse iterate through memblock areas from
 * type_a and not included in type_b. Or just type_a if type_b is NULL.
 * @i: u64 used as loop variable
 * @type_a: ptr to memblock_type to iterate
 * @type_b: ptr to memblock_type which excludes from the iteration
 * @nid: node selector, %NUMA_NO_NODE for all nodes
175
 * @flags: pick from blocks based on memory attributes
176 177 178 179
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 */
180
#define for_each_mem_range_rev(i, type_a, type_b, nid, flags,		\
181 182
			       p_start, p_end, p_nid)			\
	for (i = (u64)ULLONG_MAX,					\
183
		     __next_mem_range_rev(&i, nid, flags, type_a, type_b,\
184
					  p_start, p_end, p_nid);	\
185
	     i != (u64)ULLONG_MAX;					\
186
	     __next_mem_range_rev(&i, nid, flags, type_a, type_b,	\
187 188
				  p_start, p_end, p_nid))

189 190 191 192 193 194 195 196 197 198
/**
 * for_each_reserved_mem_region - iterate over all reserved memblock areas
 * @i: u64 used as loop variable
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 *
 * Walks over reserved areas of memblock. Available as soon as memblock
 * is initialized.
 */
#define for_each_reserved_mem_region(i, p_start, p_end)			\
199
	for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end);	\
200 201 202
	     i != (u64)ULLONG_MAX;					\
	     __next_reserved_mem_region(&i, p_start, p_end))

203 204 205 206 207
static inline bool memblock_is_hotpluggable(struct memblock_region *m)
{
	return m->flags & MEMBLOCK_HOTPLUG;
}

208 209 210 211 212
static inline bool memblock_is_mirror(struct memblock_region *m)
{
	return m->flags & MEMBLOCK_MIRROR;
}

213 214 215 216 217
static inline bool memblock_is_nomap(struct memblock_region *m)
{
	return m->flags & MEMBLOCK_NOMAP;
}

218 219
int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
			    unsigned long  *end_pfn);
T
Tejun Heo 已提交
220 221 222 223 224 225 226 227 228 229 230
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
			  unsigned long *out_end_pfn, int *out_nid);

/**
 * for_each_mem_pfn_range - early memory pfn range iterator
 * @i: an integer used as loop variable
 * @nid: node selector, %MAX_NUMNODES for all nodes
 * @p_start: ptr to ulong for start pfn of the range, can be %NULL
 * @p_end: ptr to ulong for end pfn of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 *
231
 * Walks over configured memory ranges.
T
Tejun Heo 已提交
232 233 234 235 236
 */
#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid)		\
	for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
	     i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))

237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
				  unsigned long *out_spfn,
				  unsigned long *out_epfn);
/**
 * for_each_free_mem_range_in_zone - iterate through zone specific free
 * memblock areas
 * @i: u64 used as loop variable
 * @zone: zone in which all of the memory blocks reside
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 *
 * Walks over free (memory && !reserved) areas of memblock in a specific
 * zone. Available once memblock and an empty zone is initialized. The main
 * assumption is that the zone start, end, and pgdat have been associated.
 * This way we can use the zone to determine NUMA node, and if a given part
 * of the memblock is valid for the zone.
 */
#define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end)	\
	for (i = 0,							\
	     __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end);	\
	     i != U64_MAX;					\
	     __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275

/**
 * for_each_free_mem_range_in_zone_from - iterate through zone specific
 * free memblock areas from a given point
 * @i: u64 used as loop variable
 * @zone: zone in which all of the memory blocks reside
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 *
 * Walks over free (memory && !reserved) areas of memblock in a specific
 * zone, continuing from current position. Available as soon as memblock is
 * initialized.
 */
#define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \
	for (; i != U64_MAX;					  \
	     __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
276 277 278

int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask);

279 280
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */

281 282 283
/**
 * for_each_free_mem_range - iterate through free memblock areas
 * @i: u64 used as loop variable
284
 * @nid: node selector, %NUMA_NO_NODE for all nodes
285
 * @flags: pick from blocks based on memory attributes
286 287 288 289 290 291 292
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 *
 * Walks over free (memory && !reserved) areas of memblock.  Available as
 * soon as memblock is initialized.
 */
293
#define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid)	\
294
	for_each_mem_range(i, &memblock.memory, &memblock.reserved,	\
295
			   nid, flags, p_start, p_end, p_nid)
296 297 298 299

/**
 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
 * @i: u64 used as loop variable
300
 * @nid: node selector, %NUMA_NO_NODE for all nodes
301
 * @flags: pick from blocks based on memory attributes
302 303 304 305 306 307 308
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 *
 * Walks over free (memory && !reserved) areas of memblock in reverse
 * order.  Available as soon as memblock is initialized.
 */
309 310
#define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end,	\
					p_nid)				\
311
	for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved,	\
312
			       nid, flags, p_start, p_end, p_nid)
313

314 315
int memblock_set_node(phys_addr_t base, phys_addr_t size,
		      struct memblock_type *type, int nid);
T
Tejun Heo 已提交
316

317
#ifdef CONFIG_NEED_MULTIPLE_NODES
T
Tejun Heo 已提交
318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
static inline void memblock_set_region_node(struct memblock_region *r, int nid)
{
	r->nid = nid;
}

static inline int memblock_get_region_node(const struct memblock_region *r)
{
	return r->nid;
}
#else
static inline void memblock_set_region_node(struct memblock_region *r, int nid)
{
}

static inline int memblock_get_region_node(const struct memblock_region *r)
{
	return 0;
}
336
#endif /* CONFIG_NEED_MULTIPLE_NODES */
T
Tejun Heo 已提交
337

M
Mike Rapoport 已提交
338 339 340
/* Flags for memblock allocation APIs */
#define MEMBLOCK_ALLOC_ANYWHERE	(~(phys_addr_t)0)
#define MEMBLOCK_ALLOC_ACCESSIBLE	0
341
#define MEMBLOCK_ALLOC_KASAN		1
M
Mike Rapoport 已提交
342 343 344 345 346 347 348 349

/* We are using top down, so it is safe to use 0 here */
#define MEMBLOCK_LOW_LIMIT 0

#ifndef ARCH_LOW_ADDRESS_LIMIT
#define ARCH_LOW_ADDRESS_LIMIT  0xffffffffUL
#endif

350 351
phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align,
				      phys_addr_t start, phys_addr_t end);
A
Aslan Bakirov 已提交
352 353 354
phys_addr_t memblock_alloc_range_nid(phys_addr_t size,
				      phys_addr_t align, phys_addr_t start,
				      phys_addr_t end, int nid, bool exact_nid);
355
phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
356

357 358 359 360 361 362
static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
					      phys_addr_t align)
{
	return memblock_phys_alloc_range(size, align, 0,
					 MEMBLOCK_ALLOC_ACCESSIBLE);
}
363

364 365 366
void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align,
				 phys_addr_t min_addr, phys_addr_t max_addr,
				 int nid);
M
Mike Rapoport 已提交
367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412
void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
				 phys_addr_t min_addr, phys_addr_t max_addr,
				 int nid);
void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
			     phys_addr_t min_addr, phys_addr_t max_addr,
			     int nid);

static inline void * __init memblock_alloc(phys_addr_t size,  phys_addr_t align)
{
	return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
				      MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
}

static inline void * __init memblock_alloc_raw(phys_addr_t size,
					       phys_addr_t align)
{
	return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
					  MEMBLOCK_ALLOC_ACCESSIBLE,
					  NUMA_NO_NODE);
}

static inline void * __init memblock_alloc_from(phys_addr_t size,
						phys_addr_t align,
						phys_addr_t min_addr)
{
	return memblock_alloc_try_nid(size, align, min_addr,
				      MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
}

static inline void * __init memblock_alloc_low(phys_addr_t size,
					       phys_addr_t align)
{
	return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
				      ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
}

static inline void * __init memblock_alloc_node(phys_addr_t size,
						phys_addr_t align, int nid)
{
	return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
				      MEMBLOCK_ALLOC_ACCESSIBLE, nid);
}

static inline void __init memblock_free_early(phys_addr_t base,
					      phys_addr_t size)
{
413
	memblock_free(base, size);
M
Mike Rapoport 已提交
414 415 416 417 418
}

static inline void __init memblock_free_early_nid(phys_addr_t base,
						  phys_addr_t size, int nid)
{
419
	memblock_free(base, size);
M
Mike Rapoport 已提交
420 421 422 423 424 425 426
}

static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
{
	__memblock_free_late(base, size);
}

427 428 429
/*
 * Set the allocation direction to bottom-up or top-down.
 */
430
static inline void __init memblock_set_bottom_up(bool enable)
431 432 433 434 435 436 437 438 439 440 441 442 443 444
{
	memblock.bottom_up = enable;
}

/*
 * Check if the allocation direction is bottom-up or not.
 * if this is true, that said, memblock will allocate memory
 * in bottom-up direction.
 */
static inline bool memblock_bottom_up(void)
{
	return memblock.bottom_up;
}

445
phys_addr_t memblock_phys_mem_size(void);
446
phys_addr_t memblock_reserved_size(void);
Y
Yinghai Lu 已提交
447
phys_addr_t memblock_mem_size(unsigned long limit_pfn);
448 449 450
phys_addr_t memblock_start_of_DRAM(void);
phys_addr_t memblock_end_of_DRAM(void);
void memblock_enforce_memory_limit(phys_addr_t memory_limit);
451
void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
452
void memblock_mem_limit_remove_map(phys_addr_t limit);
453
bool memblock_is_memory(phys_addr_t addr);
454 455
bool memblock_is_map_memory(phys_addr_t addr);
bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
456
bool memblock_is_reserved(phys_addr_t addr);
457
bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
458

T
Tejun Heo 已提交
459 460 461 462 463 464 465
extern void __memblock_dump_all(void);

static inline void memblock_dump_all(void)
{
	if (memblock_debug)
		__memblock_dump_all();
}
Y
Yinghai Lu 已提交
466

467 468 469 470 471 472
/**
 * memblock_set_current_limit - Set the current allocation limit to allow
 *                         limiting allocations to what is currently
 *                         accessible during boot
 * @limit: New limit value (physical address)
 */
473
void memblock_set_current_limit(phys_addr_t limit);
474

475

476 477
phys_addr_t memblock_get_current_limit(void);

478 479 480 481 482 483 484 485 486
/*
 * pfn conversion functions
 *
 * While the memory MEMBLOCKs should always be page aligned, the reserved
 * MEMBLOCKs may not be. This accessor attempt to provide a very clear
 * idea of what they return for such non aligned MEMBLOCKs.
 */

/**
487
 * memblock_region_memory_base_pfn - get the lowest pfn of the memory region
488
 * @reg: memblock_region structure
489 490
 *
 * Return: the lowest pfn intersecting with the memory region
491
 */
492
static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
493
{
494
	return PFN_UP(reg->base);
495 496 497
}

/**
498
 * memblock_region_memory_end_pfn - get the end pfn of the memory region
499
 * @reg: memblock_region structure
500 501
 *
 * Return: the end_pfn of the reserved region
502
 */
503
static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
504
{
505
	return PFN_DOWN(reg->base + reg->size);
506 507 508
}

/**
509
 * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region
510
 * @reg: memblock_region structure
511 512
 *
 * Return: the lowest pfn intersecting with the reserved region
513
 */
514
static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
515
{
516
	return PFN_DOWN(reg->base);
517 518 519
}

/**
520
 * memblock_region_reserved_end_pfn - get the end pfn of the reserved region
521
 * @reg: memblock_region structure
522 523
 *
 * Return: the end_pfn of the reserved region
524
 */
525
static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
526
{
527
	return PFN_UP(reg->base + reg->size);
528 529 530
}

#define for_each_memblock(memblock_type, region)					\
531
	for (region = memblock.memblock_type.regions;					\
532 533 534
	     region < (memblock.memblock_type.regions + memblock.memblock_type.cnt);	\
	     region++)

535 536 537 538
#define for_each_memblock_type(i, memblock_type, rgn)			\
	for (i = 0, rgn = &memblock_type->regions[0];			\
	     i < memblock_type->cnt;					\
	     i++, rgn = &memblock_type->regions[i])
539

M
Mike Rapoport 已提交
540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564
extern void *alloc_large_system_hash(const char *tablename,
				     unsigned long bucketsize,
				     unsigned long numentries,
				     int scale,
				     int flags,
				     unsigned int *_hash_shift,
				     unsigned int *_hash_mask,
				     unsigned long low_limit,
				     unsigned long high_limit);

#define HASH_EARLY	0x00000001	/* Allocating during early boot? */
#define HASH_SMALL	0x00000002	/* sub-page allocation allowed, min
					 * shift passed via *_hash_shift */
#define HASH_ZERO	0x00000004	/* Zero allocated hash table */

/* Only NUMA needs hash distribution. 64bit NUMA architectures have
 * sufficient vmalloc space.
 */
#ifdef CONFIG_NUMA
#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
extern int hashdist;		/* Distribute hashes across NUMA nodes? */
#else
#define hashdist (0)
#endif

V
Vladimir Murzin 已提交
565
#ifdef CONFIG_MEMTEST
566
extern void early_memtest(phys_addr_t start, phys_addr_t end);
V
Vladimir Murzin 已提交
567
#else
568
static inline void early_memtest(phys_addr_t start, phys_addr_t end)
V
Vladimir Murzin 已提交
569 570 571
{
}
#endif
572

Y
Yinghai Lu 已提交
573 574 575
#endif /* __KERNEL__ */

#endif /* _LINUX_MEMBLOCK_H */