memblock.h 18.5 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0-or-later */
Y
Yinghai Lu 已提交
2 3 4 5 6 7 8 9 10 11 12 13
#ifndef _LINUX_MEMBLOCK_H
#define _LINUX_MEMBLOCK_H
#ifdef __KERNEL__

/*
 * Logical memory blocks.
 *
 * Copyright (C) 2001 Peter Bergner, IBM Corp.
 */

#include <linux/init.h>
#include <linux/mm.h>
M
Mike Rapoport 已提交
14 15 16 17 18 19 20 21 22 23 24 25 26
#include <asm/dma.h>

extern unsigned long max_low_pfn;
extern unsigned long min_low_pfn;

/*
 * highest page
 */
extern unsigned long max_pfn;
/*
 * highest possible page
 */
extern unsigned long long max_possible_pfn;
Y
Yinghai Lu 已提交
27

28 29 30 31 32 33 34
/**
 * enum memblock_flags - definition of memory region attributes
 * @MEMBLOCK_NONE: no special request
 * @MEMBLOCK_HOTPLUG: hotpluggable region
 * @MEMBLOCK_MIRROR: mirrored region
 * @MEMBLOCK_NOMAP: don't add to kernel direct mapping
 */
35
enum memblock_flags {
36 37
	MEMBLOCK_NONE		= 0x0,	/* No special request */
	MEMBLOCK_HOTPLUG	= 0x1,	/* hotpluggable region */
38
	MEMBLOCK_MIRROR		= 0x2,	/* mirrored region */
39
	MEMBLOCK_NOMAP		= 0x4,	/* don't add to kernel direct mapping */
40
};
41

42 43 44 45 46 47 48
/**
 * struct memblock_region - represents a memory region
 * @base: physical address of the region
 * @size: size of the region
 * @flags: memory region attributes
 * @nid: NUMA node id
 */
49
struct memblock_region {
50 51
	phys_addr_t base;
	phys_addr_t size;
52
	enum memblock_flags flags;
T
Tejun Heo 已提交
53 54 55
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
	int nid;
#endif
Y
Yinghai Lu 已提交
56 57
};

58 59 60 61 62 63 64 65
/**
 * struct memblock_type - collection of memory regions of certain type
 * @cnt: number of regions
 * @max: size of the allocated array
 * @total_size: size of all regions
 * @regions: array of regions
 * @name: the memory type symbolic name
 */
66
struct memblock_type {
67 68 69
	unsigned long cnt;
	unsigned long max;
	phys_addr_t total_size;
70
	struct memblock_region *regions;
71
	char *name;
Y
Yinghai Lu 已提交
72 73
};

74 75 76 77 78 79 80 81
/**
 * struct memblock - memblock allocator metadata
 * @bottom_up: is bottom up direction?
 * @current_limit: physical address of the current allocation limit
 * @memory: usabe memory regions
 * @reserved: reserved memory regions
 * @physmem: all physical memory
 */
Y
Yinghai Lu 已提交
82
struct memblock {
83
	bool bottom_up;  /* is bottom up direction? */
84
	phys_addr_t current_limit;
85 86
	struct memblock_type memory;
	struct memblock_type reserved;
87 88 89
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
	struct memblock_type physmem;
#endif
Y
Yinghai Lu 已提交
90 91 92
};

extern struct memblock memblock;
93 94
extern int memblock_debug;

95
#ifndef CONFIG_ARCH_KEEP_MEMBLOCK
96 97
#define __init_memblock __meminit
#define __initdata_memblock __meminitdata
P
Pavel Tatashin 已提交
98
void memblock_discard(void);
99 100 101
#else
#define __init_memblock
#define __initdata_memblock
102
static inline void memblock_discard(void) {}
103 104
#endif

105 106
#define memblock_dbg(fmt, ...) \
	if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
Y
Yinghai Lu 已提交
107

108 109
phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
				   phys_addr_t size, phys_addr_t align);
110
void memblock_allow_resize(void);
111
int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
112 113 114 115
int memblock_add(phys_addr_t base, phys_addr_t size);
int memblock_remove(phys_addr_t base, phys_addr_t size);
int memblock_free(phys_addr_t base, phys_addr_t size);
int memblock_reserve(phys_addr_t base, phys_addr_t size);
116 117 118
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
#endif
119
void memblock_trim_memory(phys_addr_t align);
120 121
bool memblock_overlaps_region(struct memblock_type *type,
			      phys_addr_t base, phys_addr_t size);
122 123
int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
124
int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
125
int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
126
int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
127

M
Mike Rapoport 已提交
128 129 130 131
unsigned long memblock_free_all(void);
void reset_node_managed_pages(pg_data_t *pgdat);
void reset_all_zones_managed_pages(void);

132
/* Low level functions */
133
void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
134
		      struct memblock_type *type_a,
135 136 137
		      struct memblock_type *type_b, phys_addr_t *out_start,
		      phys_addr_t *out_end, int *out_nid);

138
void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
139
			  struct memblock_type *type_a,
140 141 142
			  struct memblock_type *type_b, phys_addr_t *out_start,
			  phys_addr_t *out_end, int *out_nid);

143
void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
144
				phys_addr_t *out_end);
145

P
Pavel Tatashin 已提交
146 147
void __memblock_free_late(phys_addr_t base, phys_addr_t size);

148 149 150 151 152 153 154
/**
 * for_each_mem_range - iterate through memblock areas from type_a and not
 * included in type_b. Or just type_a if type_b is NULL.
 * @i: u64 used as loop variable
 * @type_a: ptr to memblock_type to iterate
 * @type_b: ptr to memblock_type which excludes from the iteration
 * @nid: node selector, %NUMA_NO_NODE for all nodes
155
 * @flags: pick from blocks based on memory attributes
156 157 158 159
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 */
160
#define for_each_mem_range(i, type_a, type_b, nid, flags,		\
161
			   p_start, p_end, p_nid)			\
162
	for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b,	\
163 164
				     p_start, p_end, p_nid);		\
	     i != (u64)ULLONG_MAX;					\
165
	     __next_mem_range(&i, nid, flags, type_a, type_b,		\
166 167 168 169 170 171 172 173 174
			      p_start, p_end, p_nid))

/**
 * for_each_mem_range_rev - reverse iterate through memblock areas from
 * type_a and not included in type_b. Or just type_a if type_b is NULL.
 * @i: u64 used as loop variable
 * @type_a: ptr to memblock_type to iterate
 * @type_b: ptr to memblock_type which excludes from the iteration
 * @nid: node selector, %NUMA_NO_NODE for all nodes
175
 * @flags: pick from blocks based on memory attributes
176 177 178 179
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 */
180
#define for_each_mem_range_rev(i, type_a, type_b, nid, flags,		\
181 182
			       p_start, p_end, p_nid)			\
	for (i = (u64)ULLONG_MAX,					\
183
		     __next_mem_range_rev(&i, nid, flags, type_a, type_b,\
184
					  p_start, p_end, p_nid);	\
185
	     i != (u64)ULLONG_MAX;					\
186
	     __next_mem_range_rev(&i, nid, flags, type_a, type_b,	\
187 188
				  p_start, p_end, p_nid))

189 190 191 192 193 194 195 196 197 198
/**
 * for_each_reserved_mem_region - iterate over all reserved memblock areas
 * @i: u64 used as loop variable
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 *
 * Walks over reserved areas of memblock. Available as soon as memblock
 * is initialized.
 */
#define for_each_reserved_mem_region(i, p_start, p_end)			\
199
	for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end);	\
200 201 202
	     i != (u64)ULLONG_MAX;					\
	     __next_reserved_mem_region(&i, p_start, p_end))

203 204 205 206 207
static inline bool memblock_is_hotpluggable(struct memblock_region *m)
{
	return m->flags & MEMBLOCK_HOTPLUG;
}

208 209 210 211 212
static inline bool memblock_is_mirror(struct memblock_region *m)
{
	return m->flags & MEMBLOCK_MIRROR;
}

213 214 215 216 217
static inline bool memblock_is_nomap(struct memblock_region *m)
{
	return m->flags & MEMBLOCK_NOMAP;
}

T
Tejun Heo 已提交
218
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
219 220
int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
			    unsigned long  *end_pfn);
T
Tejun Heo 已提交
221 222 223 224 225 226 227 228 229 230 231
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
			  unsigned long *out_end_pfn, int *out_nid);

/**
 * for_each_mem_pfn_range - early memory pfn range iterator
 * @i: an integer used as loop variable
 * @nid: node selector, %MAX_NUMNODES for all nodes
 * @p_start: ptr to ulong for start pfn of the range, can be %NULL
 * @p_end: ptr to ulong for end pfn of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 *
232
 * Walks over configured memory ranges.
T
Tejun Heo 已提交
233 234 235 236 237 238
 */
#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid)		\
	for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
	     i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */

239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
				  unsigned long *out_spfn,
				  unsigned long *out_epfn);
/**
 * for_each_free_mem_range_in_zone - iterate through zone specific free
 * memblock areas
 * @i: u64 used as loop variable
 * @zone: zone in which all of the memory blocks reside
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 *
 * Walks over free (memory && !reserved) areas of memblock in a specific
 * zone. Available once memblock and an empty zone is initialized. The main
 * assumption is that the zone start, end, and pgdat have been associated.
 * This way we can use the zone to determine NUMA node, and if a given part
 * of the memblock is valid for the zone.
 */
#define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end)	\
	for (i = 0,							\
	     __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end);	\
	     i != U64_MAX;					\
	     __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277

/**
 * for_each_free_mem_range_in_zone_from - iterate through zone specific
 * free memblock areas from a given point
 * @i: u64 used as loop variable
 * @zone: zone in which all of the memory blocks reside
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 *
 * Walks over free (memory && !reserved) areas of memblock in a specific
 * zone, continuing from current position. Available as soon as memblock is
 * initialized.
 */
#define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \
	for (; i != U64_MAX;					  \
	     __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
278 279
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */

280 281 282
/**
 * for_each_free_mem_range - iterate through free memblock areas
 * @i: u64 used as loop variable
283
 * @nid: node selector, %NUMA_NO_NODE for all nodes
284
 * @flags: pick from blocks based on memory attributes
285 286 287 288 289 290 291
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 *
 * Walks over free (memory && !reserved) areas of memblock.  Available as
 * soon as memblock is initialized.
 */
292
#define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid)	\
293
	for_each_mem_range(i, &memblock.memory, &memblock.reserved,	\
294
			   nid, flags, p_start, p_end, p_nid)
295 296 297 298

/**
 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
 * @i: u64 used as loop variable
299
 * @nid: node selector, %NUMA_NO_NODE for all nodes
300
 * @flags: pick from blocks based on memory attributes
301 302 303 304 305 306 307
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 *
 * Walks over free (memory && !reserved) areas of memblock in reverse
 * order.  Available as soon as memblock is initialized.
 */
308 309
#define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end,	\
					p_nid)				\
310
	for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved,	\
311
			       nid, flags, p_start, p_end, p_nid)
312

T
Tejun Heo 已提交
313
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
314 315
int memblock_set_node(phys_addr_t base, phys_addr_t size,
		      struct memblock_type *type, int nid);
T
Tejun Heo 已提交
316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336

static inline void memblock_set_region_node(struct memblock_region *r, int nid)
{
	r->nid = nid;
}

static inline int memblock_get_region_node(const struct memblock_region *r)
{
	return r->nid;
}
#else
static inline void memblock_set_region_node(struct memblock_region *r, int nid)
{
}

static inline int memblock_get_region_node(const struct memblock_region *r)
{
	return 0;
}
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */

M
Mike Rapoport 已提交
337 338 339
/* Flags for memblock allocation APIs */
#define MEMBLOCK_ALLOC_ANYWHERE	(~(phys_addr_t)0)
#define MEMBLOCK_ALLOC_ACCESSIBLE	0
340
#define MEMBLOCK_ALLOC_KASAN		1
M
Mike Rapoport 已提交
341 342 343 344 345 346 347 348

/* We are using top down, so it is safe to use 0 here */
#define MEMBLOCK_LOW_LIMIT 0

#ifndef ARCH_LOW_ADDRESS_LIMIT
#define ARCH_LOW_ADDRESS_LIMIT  0xffffffffUL
#endif

349 350
phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align,
				      phys_addr_t start, phys_addr_t end);
351
phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
352

353 354 355 356 357 358
static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
					      phys_addr_t align)
{
	return memblock_phys_alloc_range(size, align, 0,
					 MEMBLOCK_ALLOC_ACCESSIBLE);
}
359

360 361 362
void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align,
				 phys_addr_t min_addr, phys_addr_t max_addr,
				 int nid);
M
Mike Rapoport 已提交
363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408
void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
				 phys_addr_t min_addr, phys_addr_t max_addr,
				 int nid);
void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
			     phys_addr_t min_addr, phys_addr_t max_addr,
			     int nid);

static inline void * __init memblock_alloc(phys_addr_t size,  phys_addr_t align)
{
	return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
				      MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
}

static inline void * __init memblock_alloc_raw(phys_addr_t size,
					       phys_addr_t align)
{
	return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
					  MEMBLOCK_ALLOC_ACCESSIBLE,
					  NUMA_NO_NODE);
}

static inline void * __init memblock_alloc_from(phys_addr_t size,
						phys_addr_t align,
						phys_addr_t min_addr)
{
	return memblock_alloc_try_nid(size, align, min_addr,
				      MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
}

static inline void * __init memblock_alloc_low(phys_addr_t size,
					       phys_addr_t align)
{
	return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
				      ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
}

static inline void * __init memblock_alloc_node(phys_addr_t size,
						phys_addr_t align, int nid)
{
	return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
				      MEMBLOCK_ALLOC_ACCESSIBLE, nid);
}

static inline void __init memblock_free_early(phys_addr_t base,
					      phys_addr_t size)
{
409
	memblock_free(base, size);
M
Mike Rapoport 已提交
410 411 412 413 414
}

static inline void __init memblock_free_early_nid(phys_addr_t base,
						  phys_addr_t size, int nid)
{
415
	memblock_free(base, size);
M
Mike Rapoport 已提交
416 417 418 419 420 421 422
}

static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
{
	__memblock_free_late(base, size);
}

423 424 425
/*
 * Set the allocation direction to bottom-up or top-down.
 */
426
static inline void __init memblock_set_bottom_up(bool enable)
427 428 429 430 431 432 433 434 435 436 437 438 439 440
{
	memblock.bottom_up = enable;
}

/*
 * Check if the allocation direction is bottom-up or not.
 * if this is true, that said, memblock will allocate memory
 * in bottom-up direction.
 */
static inline bool memblock_bottom_up(void)
{
	return memblock.bottom_up;
}

441
phys_addr_t memblock_phys_mem_size(void);
442
phys_addr_t memblock_reserved_size(void);
Y
Yinghai Lu 已提交
443
phys_addr_t memblock_mem_size(unsigned long limit_pfn);
444 445 446
phys_addr_t memblock_start_of_DRAM(void);
phys_addr_t memblock_end_of_DRAM(void);
void memblock_enforce_memory_limit(phys_addr_t memory_limit);
447
void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
448
void memblock_mem_limit_remove_map(phys_addr_t limit);
449
bool memblock_is_memory(phys_addr_t addr);
450 451
bool memblock_is_map_memory(phys_addr_t addr);
bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
452
bool memblock_is_reserved(phys_addr_t addr);
453
bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
454

T
Tejun Heo 已提交
455 456 457 458 459 460 461
extern void __memblock_dump_all(void);

static inline void memblock_dump_all(void)
{
	if (memblock_debug)
		__memblock_dump_all();
}
Y
Yinghai Lu 已提交
462

463 464 465 466 467 468
/**
 * memblock_set_current_limit - Set the current allocation limit to allow
 *                         limiting allocations to what is currently
 *                         accessible during boot
 * @limit: New limit value (physical address)
 */
469
void memblock_set_current_limit(phys_addr_t limit);
470

471

472 473
phys_addr_t memblock_get_current_limit(void);

474 475 476 477 478 479 480 481 482
/*
 * pfn conversion functions
 *
 * While the memory MEMBLOCKs should always be page aligned, the reserved
 * MEMBLOCKs may not be. This accessor attempt to provide a very clear
 * idea of what they return for such non aligned MEMBLOCKs.
 */

/**
483
 * memblock_region_memory_base_pfn - get the lowest pfn of the memory region
484
 * @reg: memblock_region structure
485 486
 *
 * Return: the lowest pfn intersecting with the memory region
487
 */
488
static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
489
{
490
	return PFN_UP(reg->base);
491 492 493
}

/**
494
 * memblock_region_memory_end_pfn - get the end pfn of the memory region
495
 * @reg: memblock_region structure
496 497
 *
 * Return: the end_pfn of the reserved region
498
 */
499
static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
500
{
501
	return PFN_DOWN(reg->base + reg->size);
502 503 504
}

/**
505
 * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region
506
 * @reg: memblock_region structure
507 508
 *
 * Return: the lowest pfn intersecting with the reserved region
509
 */
510
static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
511
{
512
	return PFN_DOWN(reg->base);
513 514 515
}

/**
516
 * memblock_region_reserved_end_pfn - get the end pfn of the reserved region
517
 * @reg: memblock_region structure
518 519
 *
 * Return: the end_pfn of the reserved region
520
 */
521
static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
522
{
523
	return PFN_UP(reg->base + reg->size);
524 525 526
}

#define for_each_memblock(memblock_type, region)					\
527
	for (region = memblock.memblock_type.regions;					\
528 529 530
	     region < (memblock.memblock_type.regions + memblock.memblock_type.cnt);	\
	     region++)

531 532 533 534
#define for_each_memblock_type(i, memblock_type, rgn)			\
	for (i = 0, rgn = &memblock_type->regions[0];			\
	     i < memblock_type->cnt;					\
	     i++, rgn = &memblock_type->regions[i])
535

M
Mike Rapoport 已提交
536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
extern void *alloc_large_system_hash(const char *tablename,
				     unsigned long bucketsize,
				     unsigned long numentries,
				     int scale,
				     int flags,
				     unsigned int *_hash_shift,
				     unsigned int *_hash_mask,
				     unsigned long low_limit,
				     unsigned long high_limit);

#define HASH_EARLY	0x00000001	/* Allocating during early boot? */
#define HASH_SMALL	0x00000002	/* sub-page allocation allowed, min
					 * shift passed via *_hash_shift */
#define HASH_ZERO	0x00000004	/* Zero allocated hash table */

/* Only NUMA needs hash distribution. 64bit NUMA architectures have
 * sufficient vmalloc space.
 */
#ifdef CONFIG_NUMA
#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
extern int hashdist;		/* Distribute hashes across NUMA nodes? */
#else
#define hashdist (0)
#endif

V
Vladimir Murzin 已提交
561
#ifdef CONFIG_MEMTEST
562
extern void early_memtest(phys_addr_t start, phys_addr_t end);
V
Vladimir Murzin 已提交
563
#else
564
static inline void early_memtest(phys_addr_t start, phys_addr_t end)
V
Vladimir Murzin 已提交
565 566 567
{
}
#endif
568

Y
Yinghai Lu 已提交
569 570 571
#endif /* __KERNEL__ */

#endif /* _LINUX_MEMBLOCK_H */