memblock.h 18.4 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0-or-later */
Y
Yinghai Lu 已提交
2 3 4 5 6 7 8 9 10 11 12 13
#ifndef _LINUX_MEMBLOCK_H
#define _LINUX_MEMBLOCK_H
#ifdef __KERNEL__

/*
 * Logical memory blocks.
 *
 * Copyright (C) 2001 Peter Bergner, IBM Corp.
 */

#include <linux/init.h>
#include <linux/mm.h>
M
Mike Rapoport 已提交
14 15 16 17 18 19 20 21 22 23 24 25 26
#include <asm/dma.h>

extern unsigned long max_low_pfn;
extern unsigned long min_low_pfn;

/*
 * highest page
 */
extern unsigned long max_pfn;
/*
 * highest possible page
 */
extern unsigned long long max_possible_pfn;
Y
Yinghai Lu 已提交
27

28 29 30 31 32 33 34
/**
 * enum memblock_flags - definition of memory region attributes
 * @MEMBLOCK_NONE: no special request
 * @MEMBLOCK_HOTPLUG: hotpluggable region
 * @MEMBLOCK_MIRROR: mirrored region
 * @MEMBLOCK_NOMAP: don't add to kernel direct mapping
 */
35
enum memblock_flags {
36 37
	MEMBLOCK_NONE		= 0x0,	/* No special request */
	MEMBLOCK_HOTPLUG	= 0x1,	/* hotpluggable region */
38
	MEMBLOCK_MIRROR		= 0x2,	/* mirrored region */
39
	MEMBLOCK_NOMAP		= 0x4,	/* don't add to kernel direct mapping */
40
};
41

42 43 44 45 46 47 48
/**
 * struct memblock_region - represents a memory region
 * @base: physical address of the region
 * @size: size of the region
 * @flags: memory region attributes
 * @nid: NUMA node id
 */
49
struct memblock_region {
50 51
	phys_addr_t base;
	phys_addr_t size;
52
	enum memblock_flags flags;
T
Tejun Heo 已提交
53 54 55
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
	int nid;
#endif
Y
Yinghai Lu 已提交
56 57
};

58 59 60 61 62 63 64 65
/**
 * struct memblock_type - collection of memory regions of certain type
 * @cnt: number of regions
 * @max: size of the allocated array
 * @total_size: size of all regions
 * @regions: array of regions
 * @name: the memory type symbolic name
 */
66
struct memblock_type {
67 68 69
	unsigned long cnt;
	unsigned long max;
	phys_addr_t total_size;
70
	struct memblock_region *regions;
71
	char *name;
Y
Yinghai Lu 已提交
72 73
};

74 75 76 77 78 79 80 81
/**
 * struct memblock - memblock allocator metadata
 * @bottom_up: is bottom up direction?
 * @current_limit: physical address of the current allocation limit
 * @memory: usabe memory regions
 * @reserved: reserved memory regions
 * @physmem: all physical memory
 */
Y
Yinghai Lu 已提交
82
struct memblock {
83
	bool bottom_up;  /* is bottom up direction? */
84
	phys_addr_t current_limit;
85 86
	struct memblock_type memory;
	struct memblock_type reserved;
87 88 89
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
	struct memblock_type physmem;
#endif
Y
Yinghai Lu 已提交
90 91 92
};

extern struct memblock memblock;
93 94
extern int memblock_debug;

95
#ifndef CONFIG_ARCH_KEEP_MEMBLOCK
96 97
#define __init_memblock __meminit
#define __initdata_memblock __meminitdata
P
Pavel Tatashin 已提交
98
void memblock_discard(void);
99 100 101
#else
#define __init_memblock
#define __initdata_memblock
102
static inline void memblock_discard(void) {}
103 104
#endif

105 106
#define memblock_dbg(fmt, ...) \
	if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
Y
Yinghai Lu 已提交
107

108 109
phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
				   phys_addr_t size, phys_addr_t align);
110
void memblock_allow_resize(void);
111
int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
112 113 114 115
int memblock_add(phys_addr_t base, phys_addr_t size);
int memblock_remove(phys_addr_t base, phys_addr_t size);
int memblock_free(phys_addr_t base, phys_addr_t size);
int memblock_reserve(phys_addr_t base, phys_addr_t size);
116
void memblock_trim_memory(phys_addr_t align);
117 118
bool memblock_overlaps_region(struct memblock_type *type,
			      phys_addr_t base, phys_addr_t size);
119 120
int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
121
int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
122
int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
123
int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
124

M
Mike Rapoport 已提交
125 126 127 128
unsigned long memblock_free_all(void);
void reset_node_managed_pages(pg_data_t *pgdat);
void reset_all_zones_managed_pages(void);

129 130 131
/* Low level functions */
int memblock_add_range(struct memblock_type *type,
		       phys_addr_t base, phys_addr_t size,
132
		       int nid, enum memblock_flags flags);
133

134
void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
135
		      struct memblock_type *type_a,
136 137 138
		      struct memblock_type *type_b, phys_addr_t *out_start,
		      phys_addr_t *out_end, int *out_nid);

139
void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
140
			  struct memblock_type *type_a,
141 142 143
			  struct memblock_type *type_b, phys_addr_t *out_start,
			  phys_addr_t *out_end, int *out_nid);

144
void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
145
				phys_addr_t *out_end);
146

P
Pavel Tatashin 已提交
147 148
void __memblock_free_late(phys_addr_t base, phys_addr_t size);

149 150 151 152 153 154 155
/**
 * for_each_mem_range - iterate through memblock areas from type_a and not
 * included in type_b. Or just type_a if type_b is NULL.
 * @i: u64 used as loop variable
 * @type_a: ptr to memblock_type to iterate
 * @type_b: ptr to memblock_type which excludes from the iteration
 * @nid: node selector, %NUMA_NO_NODE for all nodes
156
 * @flags: pick from blocks based on memory attributes
157 158 159 160
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 */
161
#define for_each_mem_range(i, type_a, type_b, nid, flags,		\
162
			   p_start, p_end, p_nid)			\
163
	for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b,	\
164 165
				     p_start, p_end, p_nid);		\
	     i != (u64)ULLONG_MAX;					\
166
	     __next_mem_range(&i, nid, flags, type_a, type_b,		\
167 168 169 170 171 172 173 174 175
			      p_start, p_end, p_nid))

/**
 * for_each_mem_range_rev - reverse iterate through memblock areas from
 * type_a and not included in type_b. Or just type_a if type_b is NULL.
 * @i: u64 used as loop variable
 * @type_a: ptr to memblock_type to iterate
 * @type_b: ptr to memblock_type which excludes from the iteration
 * @nid: node selector, %NUMA_NO_NODE for all nodes
176
 * @flags: pick from blocks based on memory attributes
177 178 179 180
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 */
181
#define for_each_mem_range_rev(i, type_a, type_b, nid, flags,		\
182 183
			       p_start, p_end, p_nid)			\
	for (i = (u64)ULLONG_MAX,					\
184
		     __next_mem_range_rev(&i, nid, flags, type_a, type_b,\
185
					  p_start, p_end, p_nid);	\
186
	     i != (u64)ULLONG_MAX;					\
187
	     __next_mem_range_rev(&i, nid, flags, type_a, type_b,	\
188 189
				  p_start, p_end, p_nid))

190 191 192 193 194 195 196 197 198 199
/**
 * for_each_reserved_mem_region - iterate over all reserved memblock areas
 * @i: u64 used as loop variable
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 *
 * Walks over reserved areas of memblock. Available as soon as memblock
 * is initialized.
 */
#define for_each_reserved_mem_region(i, p_start, p_end)			\
200
	for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end);	\
201 202 203
	     i != (u64)ULLONG_MAX;					\
	     __next_reserved_mem_region(&i, p_start, p_end))

204 205 206 207 208
static inline bool memblock_is_hotpluggable(struct memblock_region *m)
{
	return m->flags & MEMBLOCK_HOTPLUG;
}

209 210 211 212 213
static inline bool memblock_is_mirror(struct memblock_region *m)
{
	return m->flags & MEMBLOCK_MIRROR;
}

214 215 216 217 218
static inline bool memblock_is_nomap(struct memblock_region *m)
{
	return m->flags & MEMBLOCK_NOMAP;
}

T
Tejun Heo 已提交
219
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
220 221
int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
			    unsigned long  *end_pfn);
T
Tejun Heo 已提交
222 223 224 225 226 227 228 229 230 231 232
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
			  unsigned long *out_end_pfn, int *out_nid);

/**
 * for_each_mem_pfn_range - early memory pfn range iterator
 * @i: an integer used as loop variable
 * @nid: node selector, %MAX_NUMNODES for all nodes
 * @p_start: ptr to ulong for start pfn of the range, can be %NULL
 * @p_end: ptr to ulong for end pfn of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 *
233
 * Walks over configured memory ranges.
T
Tejun Heo 已提交
234 235 236 237 238 239
 */
#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid)		\
	for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
	     i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */

240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
				  unsigned long *out_spfn,
				  unsigned long *out_epfn);
/**
 * for_each_free_mem_range_in_zone - iterate through zone specific free
 * memblock areas
 * @i: u64 used as loop variable
 * @zone: zone in which all of the memory blocks reside
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 *
 * Walks over free (memory && !reserved) areas of memblock in a specific
 * zone. Available once memblock and an empty zone is initialized. The main
 * assumption is that the zone start, end, and pgdat have been associated.
 * This way we can use the zone to determine NUMA node, and if a given part
 * of the memblock is valid for the zone.
 */
#define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end)	\
	for (i = 0,							\
	     __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end);	\
	     i != U64_MAX;					\
	     __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278

/**
 * for_each_free_mem_range_in_zone_from - iterate through zone specific
 * free memblock areas from a given point
 * @i: u64 used as loop variable
 * @zone: zone in which all of the memory blocks reside
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 *
 * Walks over free (memory && !reserved) areas of memblock in a specific
 * zone, continuing from current position. Available as soon as memblock is
 * initialized.
 */
#define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \
	for (; i != U64_MAX;					  \
	     __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
279 280
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */

281 282 283
/**
 * for_each_free_mem_range - iterate through free memblock areas
 * @i: u64 used as loop variable
284
 * @nid: node selector, %NUMA_NO_NODE for all nodes
285
 * @flags: pick from blocks based on memory attributes
286 287 288 289 290 291 292
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 *
 * Walks over free (memory && !reserved) areas of memblock.  Available as
 * soon as memblock is initialized.
 */
293
#define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid)	\
294
	for_each_mem_range(i, &memblock.memory, &memblock.reserved,	\
295
			   nid, flags, p_start, p_end, p_nid)
296 297 298 299

/**
 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
 * @i: u64 used as loop variable
300
 * @nid: node selector, %NUMA_NO_NODE for all nodes
301
 * @flags: pick from blocks based on memory attributes
302 303 304 305 306 307 308
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 *
 * Walks over free (memory && !reserved) areas of memblock in reverse
 * order.  Available as soon as memblock is initialized.
 */
309 310
#define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end,	\
					p_nid)				\
311
	for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved,	\
312
			       nid, flags, p_start, p_end, p_nid)
313

T
Tejun Heo 已提交
314
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
315 316
int memblock_set_node(phys_addr_t base, phys_addr_t size,
		      struct memblock_type *type, int nid);
T
Tejun Heo 已提交
317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337

static inline void memblock_set_region_node(struct memblock_region *r, int nid)
{
	r->nid = nid;
}

static inline int memblock_get_region_node(const struct memblock_region *r)
{
	return r->nid;
}
#else
static inline void memblock_set_region_node(struct memblock_region *r, int nid)
{
}

static inline int memblock_get_region_node(const struct memblock_region *r)
{
	return 0;
}
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */

M
Mike Rapoport 已提交
338 339 340
/* Flags for memblock allocation APIs */
#define MEMBLOCK_ALLOC_ANYWHERE	(~(phys_addr_t)0)
#define MEMBLOCK_ALLOC_ACCESSIBLE	0
341
#define MEMBLOCK_ALLOC_KASAN		1
M
Mike Rapoport 已提交
342 343 344 345 346 347 348 349

/* We are using top down, so it is safe to use 0 here */
#define MEMBLOCK_LOW_LIMIT 0

#ifndef ARCH_LOW_ADDRESS_LIMIT
#define ARCH_LOW_ADDRESS_LIMIT  0xffffffffUL
#endif

350 351
phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align,
				      phys_addr_t start, phys_addr_t end);
352
phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
353

354 355 356 357 358 359
static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
					      phys_addr_t align)
{
	return memblock_phys_alloc_range(size, align, 0,
					 MEMBLOCK_ALLOC_ACCESSIBLE);
}
360

M
Mike Rapoport 已提交
361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
				 phys_addr_t min_addr, phys_addr_t max_addr,
				 int nid);
void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
			     phys_addr_t min_addr, phys_addr_t max_addr,
			     int nid);

static inline void * __init memblock_alloc(phys_addr_t size,  phys_addr_t align)
{
	return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
				      MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
}

static inline void * __init memblock_alloc_raw(phys_addr_t size,
					       phys_addr_t align)
{
	return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
					  MEMBLOCK_ALLOC_ACCESSIBLE,
					  NUMA_NO_NODE);
}

static inline void * __init memblock_alloc_from(phys_addr_t size,
						phys_addr_t align,
						phys_addr_t min_addr)
{
	return memblock_alloc_try_nid(size, align, min_addr,
				      MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
}

static inline void * __init memblock_alloc_low(phys_addr_t size,
					       phys_addr_t align)
{
	return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
				      ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
}

static inline void * __init memblock_alloc_node(phys_addr_t size,
						phys_addr_t align, int nid)
{
	return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
				      MEMBLOCK_ALLOC_ACCESSIBLE, nid);
}

static inline void __init memblock_free_early(phys_addr_t base,
					      phys_addr_t size)
{
407
	memblock_free(base, size);
M
Mike Rapoport 已提交
408 409 410 411 412
}

static inline void __init memblock_free_early_nid(phys_addr_t base,
						  phys_addr_t size, int nid)
{
413
	memblock_free(base, size);
M
Mike Rapoport 已提交
414 415 416 417 418 419 420
}

static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
{
	__memblock_free_late(base, size);
}

421 422 423
/*
 * Set the allocation direction to bottom-up or top-down.
 */
424
static inline void __init memblock_set_bottom_up(bool enable)
425 426 427 428 429 430 431 432 433 434 435 436 437 438
{
	memblock.bottom_up = enable;
}

/*
 * Check if the allocation direction is bottom-up or not.
 * if this is true, that said, memblock will allocate memory
 * in bottom-up direction.
 */
static inline bool memblock_bottom_up(void)
{
	return memblock.bottom_up;
}

439
phys_addr_t memblock_phys_mem_size(void);
440
phys_addr_t memblock_reserved_size(void);
Y
Yinghai Lu 已提交
441
phys_addr_t memblock_mem_size(unsigned long limit_pfn);
442 443 444
phys_addr_t memblock_start_of_DRAM(void);
phys_addr_t memblock_end_of_DRAM(void);
void memblock_enforce_memory_limit(phys_addr_t memory_limit);
445
void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
446
void memblock_mem_limit_remove_map(phys_addr_t limit);
447
bool memblock_is_memory(phys_addr_t addr);
448 449
bool memblock_is_map_memory(phys_addr_t addr);
bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
450
bool memblock_is_reserved(phys_addr_t addr);
451
bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
452

T
Tejun Heo 已提交
453 454 455 456 457 458 459
extern void __memblock_dump_all(void);

static inline void memblock_dump_all(void)
{
	if (memblock_debug)
		__memblock_dump_all();
}
Y
Yinghai Lu 已提交
460

461 462 463 464 465 466
/**
 * memblock_set_current_limit - Set the current allocation limit to allow
 *                         limiting allocations to what is currently
 *                         accessible during boot
 * @limit: New limit value (physical address)
 */
467
void memblock_set_current_limit(phys_addr_t limit);
468

469

470 471
phys_addr_t memblock_get_current_limit(void);

472 473 474 475 476 477 478 479 480
/*
 * pfn conversion functions
 *
 * While the memory MEMBLOCKs should always be page aligned, the reserved
 * MEMBLOCKs may not be. This accessor attempt to provide a very clear
 * idea of what they return for such non aligned MEMBLOCKs.
 */

/**
481
 * memblock_region_memory_base_pfn - get the lowest pfn of the memory region
482
 * @reg: memblock_region structure
483 484
 *
 * Return: the lowest pfn intersecting with the memory region
485
 */
486
static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
487
{
488
	return PFN_UP(reg->base);
489 490 491
}

/**
492
 * memblock_region_memory_end_pfn - get the end pfn of the memory region
493
 * @reg: memblock_region structure
494 495
 *
 * Return: the end_pfn of the reserved region
496
 */
497
static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
498
{
499
	return PFN_DOWN(reg->base + reg->size);
500 501 502
}

/**
503
 * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region
504
 * @reg: memblock_region structure
505 506
 *
 * Return: the lowest pfn intersecting with the reserved region
507
 */
508
static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
509
{
510
	return PFN_DOWN(reg->base);
511 512 513
}

/**
514
 * memblock_region_reserved_end_pfn - get the end pfn of the reserved region
515
 * @reg: memblock_region structure
516 517
 *
 * Return: the end_pfn of the reserved region
518
 */
519
static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
520
{
521
	return PFN_UP(reg->base + reg->size);
522 523 524
}

#define for_each_memblock(memblock_type, region)					\
525
	for (region = memblock.memblock_type.regions;					\
526 527 528
	     region < (memblock.memblock_type.regions + memblock.memblock_type.cnt);	\
	     region++)

529 530 531 532
#define for_each_memblock_type(i, memblock_type, rgn)			\
	for (i = 0, rgn = &memblock_type->regions[0];			\
	     i < memblock_type->cnt;					\
	     i++, rgn = &memblock_type->regions[i])
533

M
Mike Rapoport 已提交
534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558
extern void *alloc_large_system_hash(const char *tablename,
				     unsigned long bucketsize,
				     unsigned long numentries,
				     int scale,
				     int flags,
				     unsigned int *_hash_shift,
				     unsigned int *_hash_mask,
				     unsigned long low_limit,
				     unsigned long high_limit);

#define HASH_EARLY	0x00000001	/* Allocating during early boot? */
#define HASH_SMALL	0x00000002	/* sub-page allocation allowed, min
					 * shift passed via *_hash_shift */
#define HASH_ZERO	0x00000004	/* Zero allocated hash table */

/* Only NUMA needs hash distribution. 64bit NUMA architectures have
 * sufficient vmalloc space.
 */
#ifdef CONFIG_NUMA
#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
extern int hashdist;		/* Distribute hashes across NUMA nodes? */
#else
#define hashdist (0)
#endif

V
Vladimir Murzin 已提交
559
#ifdef CONFIG_MEMTEST
560
extern void early_memtest(phys_addr_t start, phys_addr_t end);
V
Vladimir Murzin 已提交
561
#else
562
static inline void early_memtest(phys_addr_t start, phys_addr_t end)
V
Vladimir Murzin 已提交
563 564 565
{
}
#endif
566

Y
Yinghai Lu 已提交
567 568 569
#endif /* __KERNEL__ */

#endif /* _LINUX_MEMBLOCK_H */