memblock.h 17.9 KB
Newer Older
Y
Yinghai Lu 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
#ifndef _LINUX_MEMBLOCK_H
#define _LINUX_MEMBLOCK_H
#ifdef __KERNEL__

/*
 * Logical memory blocks.
 *
 * Copyright (C) 2001 Peter Bergner, IBM Corp.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

#include <linux/init.h>
#include <linux/mm.h>
M
Mike Rapoport 已提交
18 19 20 21 22 23 24 25 26 27 28 29 30
#include <asm/dma.h>

extern unsigned long max_low_pfn;
extern unsigned long min_low_pfn;

/*
 * highest page
 */
extern unsigned long max_pfn;
/*
 * highest possible page
 */
extern unsigned long long max_possible_pfn;
Y
Yinghai Lu 已提交
31

32 33 34 35 36 37 38
/**
 * enum memblock_flags - definition of memory region attributes
 * @MEMBLOCK_NONE: no special request
 * @MEMBLOCK_HOTPLUG: hotpluggable region
 * @MEMBLOCK_MIRROR: mirrored region
 * @MEMBLOCK_NOMAP: don't add to kernel direct mapping
 */
39
enum memblock_flags {
40 41
	MEMBLOCK_NONE		= 0x0,	/* No special request */
	MEMBLOCK_HOTPLUG	= 0x1,	/* hotpluggable region */
42
	MEMBLOCK_MIRROR		= 0x2,	/* mirrored region */
43
	MEMBLOCK_NOMAP		= 0x4,	/* don't add to kernel direct mapping */
44
};
45

46 47 48 49 50 51 52
/**
 * struct memblock_region - represents a memory region
 * @base: physical address of the region
 * @size: size of the region
 * @flags: memory region attributes
 * @nid: NUMA node id
 */
53
struct memblock_region {
54 55
	phys_addr_t base;
	phys_addr_t size;
56
	enum memblock_flags flags;
T
Tejun Heo 已提交
57 58 59
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
	int nid;
#endif
Y
Yinghai Lu 已提交
60 61
};

62 63 64 65 66 67 68 69
/**
 * struct memblock_type - collection of memory regions of certain type
 * @cnt: number of regions
 * @max: size of the allocated array
 * @total_size: size of all regions
 * @regions: array of regions
 * @name: the memory type symbolic name
 */
70
struct memblock_type {
71 72 73
	unsigned long cnt;
	unsigned long max;
	phys_addr_t total_size;
74
	struct memblock_region *regions;
75
	char *name;
Y
Yinghai Lu 已提交
76 77
};

78 79 80 81 82 83 84 85
/**
 * struct memblock - memblock allocator metadata
 * @bottom_up: is bottom up direction?
 * @current_limit: physical address of the current allocation limit
 * @memory: usabe memory regions
 * @reserved: reserved memory regions
 * @physmem: all physical memory
 */
Y
Yinghai Lu 已提交
86
struct memblock {
87
	bool bottom_up;  /* is bottom up direction? */
88
	phys_addr_t current_limit;
89 90
	struct memblock_type memory;
	struct memblock_type reserved;
91 92 93
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
	struct memblock_type physmem;
#endif
Y
Yinghai Lu 已提交
94 95 96
};

extern struct memblock memblock;
97 98
extern int memblock_debug;

99 100 101
#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
#define __init_memblock __meminit
#define __initdata_memblock __meminitdata
P
Pavel Tatashin 已提交
102
void memblock_discard(void);
103 104 105 106 107
#else
#define __init_memblock
#define __initdata_memblock
#endif

108 109
#define memblock_dbg(fmt, ...) \
	if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
Y
Yinghai Lu 已提交
110

111 112
phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
				   phys_addr_t size, phys_addr_t align);
113
void memblock_allow_resize(void);
114
int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
115 116 117 118
int memblock_add(phys_addr_t base, phys_addr_t size);
int memblock_remove(phys_addr_t base, phys_addr_t size);
int memblock_free(phys_addr_t base, phys_addr_t size);
int memblock_reserve(phys_addr_t base, phys_addr_t size);
119
void memblock_trim_memory(phys_addr_t align);
120 121
bool memblock_overlaps_region(struct memblock_type *type,
			      phys_addr_t base, phys_addr_t size);
122 123
int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
124
int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
125
int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
126
int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
127

M
Mike Rapoport 已提交
128 129 130 131
unsigned long memblock_free_all(void);
void reset_node_managed_pages(pg_data_t *pgdat);
void reset_all_zones_managed_pages(void);

132 133 134
/* Low level functions */
int memblock_add_range(struct memblock_type *type,
		       phys_addr_t base, phys_addr_t size,
135
		       int nid, enum memblock_flags flags);
136

137
void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
138
		      struct memblock_type *type_a,
139 140 141
		      struct memblock_type *type_b, phys_addr_t *out_start,
		      phys_addr_t *out_end, int *out_nid);

142
void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
143
			  struct memblock_type *type_a,
144 145 146
			  struct memblock_type *type_b, phys_addr_t *out_start,
			  phys_addr_t *out_end, int *out_nid);

147
void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
148
				phys_addr_t *out_end);
149

P
Pavel Tatashin 已提交
150 151
void __memblock_free_late(phys_addr_t base, phys_addr_t size);

152 153 154 155 156 157 158
/**
 * for_each_mem_range - iterate through memblock areas from type_a and not
 * included in type_b. Or just type_a if type_b is NULL.
 * @i: u64 used as loop variable
 * @type_a: ptr to memblock_type to iterate
 * @type_b: ptr to memblock_type which excludes from the iteration
 * @nid: node selector, %NUMA_NO_NODE for all nodes
159
 * @flags: pick from blocks based on memory attributes
160 161 162 163
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 */
164
#define for_each_mem_range(i, type_a, type_b, nid, flags,		\
165
			   p_start, p_end, p_nid)			\
166
	for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b,	\
167 168
				     p_start, p_end, p_nid);		\
	     i != (u64)ULLONG_MAX;					\
169
	     __next_mem_range(&i, nid, flags, type_a, type_b,		\
170 171 172 173 174 175 176 177 178
			      p_start, p_end, p_nid))

/**
 * for_each_mem_range_rev - reverse iterate through memblock areas from
 * type_a and not included in type_b. Or just type_a if type_b is NULL.
 * @i: u64 used as loop variable
 * @type_a: ptr to memblock_type to iterate
 * @type_b: ptr to memblock_type which excludes from the iteration
 * @nid: node selector, %NUMA_NO_NODE for all nodes
179
 * @flags: pick from blocks based on memory attributes
180 181 182 183
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 */
184
#define for_each_mem_range_rev(i, type_a, type_b, nid, flags,		\
185 186
			       p_start, p_end, p_nid)			\
	for (i = (u64)ULLONG_MAX,					\
187
		     __next_mem_range_rev(&i, nid, flags, type_a, type_b,\
188
					  p_start, p_end, p_nid);	\
189
	     i != (u64)ULLONG_MAX;					\
190
	     __next_mem_range_rev(&i, nid, flags, type_a, type_b,	\
191 192
				  p_start, p_end, p_nid))

193 194 195 196 197 198 199 200 201 202
/**
 * for_each_reserved_mem_region - iterate over all reserved memblock areas
 * @i: u64 used as loop variable
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 *
 * Walks over reserved areas of memblock. Available as soon as memblock
 * is initialized.
 */
#define for_each_reserved_mem_region(i, p_start, p_end)			\
203
	for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end);	\
204 205 206
	     i != (u64)ULLONG_MAX;					\
	     __next_reserved_mem_region(&i, p_start, p_end))

207 208 209 210 211
static inline bool memblock_is_hotpluggable(struct memblock_region *m)
{
	return m->flags & MEMBLOCK_HOTPLUG;
}

212 213 214 215 216
static inline bool memblock_is_mirror(struct memblock_region *m)
{
	return m->flags & MEMBLOCK_MIRROR;
}

217 218 219 220 221
static inline bool memblock_is_nomap(struct memblock_region *m)
{
	return m->flags & MEMBLOCK_NOMAP;
}

T
Tejun Heo 已提交
222
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
223 224
int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
			    unsigned long  *end_pfn);
T
Tejun Heo 已提交
225 226 227 228 229 230 231 232 233 234 235
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
			  unsigned long *out_end_pfn, int *out_nid);

/**
 * for_each_mem_pfn_range - early memory pfn range iterator
 * @i: an integer used as loop variable
 * @nid: node selector, %MAX_NUMNODES for all nodes
 * @p_start: ptr to ulong for start pfn of the range, can be %NULL
 * @p_end: ptr to ulong for end pfn of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 *
236
 * Walks over configured memory ranges.
T
Tejun Heo 已提交
237 238 239 240 241 242
 */
#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid)		\
	for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
	     i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */

243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
				  unsigned long *out_spfn,
				  unsigned long *out_epfn);
/**
 * for_each_free_mem_range_in_zone - iterate through zone specific free
 * memblock areas
 * @i: u64 used as loop variable
 * @zone: zone in which all of the memory blocks reside
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 *
 * Walks over free (memory && !reserved) areas of memblock in a specific
 * zone. Available once memblock and an empty zone is initialized. The main
 * assumption is that the zone start, end, and pgdat have been associated.
 * This way we can use the zone to determine NUMA node, and if a given part
 * of the memblock is valid for the zone.
 */
#define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end)	\
	for (i = 0,							\
	     __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end);	\
	     i != U64_MAX;					\
	     __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */

268 269 270
/**
 * for_each_free_mem_range - iterate through free memblock areas
 * @i: u64 used as loop variable
271
 * @nid: node selector, %NUMA_NO_NODE for all nodes
272
 * @flags: pick from blocks based on memory attributes
273 274 275 276 277 278 279
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 *
 * Walks over free (memory && !reserved) areas of memblock.  Available as
 * soon as memblock is initialized.
 */
280
#define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid)	\
281
	for_each_mem_range(i, &memblock.memory, &memblock.reserved,	\
282
			   nid, flags, p_start, p_end, p_nid)
283 284 285 286

/**
 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
 * @i: u64 used as loop variable
287
 * @nid: node selector, %NUMA_NO_NODE for all nodes
288
 * @flags: pick from blocks based on memory attributes
289 290 291 292 293 294 295
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 *
 * Walks over free (memory && !reserved) areas of memblock in reverse
 * order.  Available as soon as memblock is initialized.
 */
296 297
#define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end,	\
					p_nid)				\
298
	for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved,	\
299
			       nid, flags, p_start, p_end, p_nid)
300

T
Tejun Heo 已提交
301
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
302 303
int memblock_set_node(phys_addr_t base, phys_addr_t size,
		      struct memblock_type *type, int nid);
T
Tejun Heo 已提交
304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324

static inline void memblock_set_region_node(struct memblock_region *r, int nid)
{
	r->nid = nid;
}

static inline int memblock_get_region_node(const struct memblock_region *r)
{
	return r->nid;
}
#else
static inline void memblock_set_region_node(struct memblock_region *r, int nid)
{
}

static inline int memblock_get_region_node(const struct memblock_region *r)
{
	return 0;
}
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */

M
Mike Rapoport 已提交
325 326 327
/* Flags for memblock allocation APIs */
#define MEMBLOCK_ALLOC_ANYWHERE	(~(phys_addr_t)0)
#define MEMBLOCK_ALLOC_ACCESSIBLE	0
328
#define MEMBLOCK_ALLOC_KASAN		1
M
Mike Rapoport 已提交
329 330 331 332 333 334 335 336

/* We are using top down, so it is safe to use 0 here */
#define MEMBLOCK_LOW_LIMIT 0

#ifndef ARCH_LOW_ADDRESS_LIMIT
#define ARCH_LOW_ADDRESS_LIMIT  0xffffffffUL
#endif

337 338
phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align,
				      phys_addr_t start, phys_addr_t end);
339
phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
340

341 342 343 344 345 346
static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
					      phys_addr_t align)
{
	return memblock_phys_alloc_range(size, align, 0,
					 MEMBLOCK_ALLOC_ACCESSIBLE);
}
347

M
Mike Rapoport 已提交
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393
void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
				 phys_addr_t min_addr, phys_addr_t max_addr,
				 int nid);
void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
			     phys_addr_t min_addr, phys_addr_t max_addr,
			     int nid);

static inline void * __init memblock_alloc(phys_addr_t size,  phys_addr_t align)
{
	return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
				      MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
}

static inline void * __init memblock_alloc_raw(phys_addr_t size,
					       phys_addr_t align)
{
	return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
					  MEMBLOCK_ALLOC_ACCESSIBLE,
					  NUMA_NO_NODE);
}

static inline void * __init memblock_alloc_from(phys_addr_t size,
						phys_addr_t align,
						phys_addr_t min_addr)
{
	return memblock_alloc_try_nid(size, align, min_addr,
				      MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
}

static inline void * __init memblock_alloc_low(phys_addr_t size,
					       phys_addr_t align)
{
	return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
				      ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
}

static inline void * __init memblock_alloc_node(phys_addr_t size,
						phys_addr_t align, int nid)
{
	return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
				      MEMBLOCK_ALLOC_ACCESSIBLE, nid);
}

static inline void __init memblock_free_early(phys_addr_t base,
					      phys_addr_t size)
{
394
	memblock_free(base, size);
M
Mike Rapoport 已提交
395 396 397 398 399
}

static inline void __init memblock_free_early_nid(phys_addr_t base,
						  phys_addr_t size, int nid)
{
400
	memblock_free(base, size);
M
Mike Rapoport 已提交
401 402 403 404 405 406 407
}

static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
{
	__memblock_free_late(base, size);
}

408 409 410
/*
 * Set the allocation direction to bottom-up or top-down.
 */
411
static inline void __init memblock_set_bottom_up(bool enable)
412 413 414 415 416 417 418 419 420 421 422 423 424 425
{
	memblock.bottom_up = enable;
}

/*
 * Check if the allocation direction is bottom-up or not.
 * if this is true, that said, memblock will allocate memory
 * in bottom-up direction.
 */
static inline bool memblock_bottom_up(void)
{
	return memblock.bottom_up;
}

426
phys_addr_t memblock_phys_mem_size(void);
427
phys_addr_t memblock_reserved_size(void);
Y
Yinghai Lu 已提交
428
phys_addr_t memblock_mem_size(unsigned long limit_pfn);
429 430 431
phys_addr_t memblock_start_of_DRAM(void);
phys_addr_t memblock_end_of_DRAM(void);
void memblock_enforce_memory_limit(phys_addr_t memory_limit);
432
void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
433
void memblock_mem_limit_remove_map(phys_addr_t limit);
434
bool memblock_is_memory(phys_addr_t addr);
435 436
bool memblock_is_map_memory(phys_addr_t addr);
bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
437
bool memblock_is_reserved(phys_addr_t addr);
438
bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
439

T
Tejun Heo 已提交
440 441 442 443 444 445 446
extern void __memblock_dump_all(void);

static inline void memblock_dump_all(void)
{
	if (memblock_debug)
		__memblock_dump_all();
}
Y
Yinghai Lu 已提交
447

448 449 450 451 452 453
/**
 * memblock_set_current_limit - Set the current allocation limit to allow
 *                         limiting allocations to what is currently
 *                         accessible during boot
 * @limit: New limit value (physical address)
 */
454
void memblock_set_current_limit(phys_addr_t limit);
455

456

457 458
phys_addr_t memblock_get_current_limit(void);

459 460 461 462 463 464 465 466 467
/*
 * pfn conversion functions
 *
 * While the memory MEMBLOCKs should always be page aligned, the reserved
 * MEMBLOCKs may not be. This accessor attempt to provide a very clear
 * idea of what they return for such non aligned MEMBLOCKs.
 */

/**
468
 * memblock_region_memory_base_pfn - get the lowest pfn of the memory region
469
 * @reg: memblock_region structure
470 471
 *
 * Return: the lowest pfn intersecting with the memory region
472
 */
473
static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
474
{
475
	return PFN_UP(reg->base);
476 477 478
}

/**
479
 * memblock_region_memory_end_pfn - get the end pfn of the memory region
480
 * @reg: memblock_region structure
481 482
 *
 * Return: the end_pfn of the reserved region
483
 */
484
static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
485
{
486
	return PFN_DOWN(reg->base + reg->size);
487 488 489
}

/**
490
 * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region
491
 * @reg: memblock_region structure
492 493
 *
 * Return: the lowest pfn intersecting with the reserved region
494
 */
495
static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
496
{
497
	return PFN_DOWN(reg->base);
498 499 500
}

/**
501
 * memblock_region_reserved_end_pfn - get the end pfn of the reserved region
502
 * @reg: memblock_region structure
503 504
 *
 * Return: the end_pfn of the reserved region
505
 */
506
static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
507
{
508
	return PFN_UP(reg->base + reg->size);
509 510 511
}

#define for_each_memblock(memblock_type, region)					\
512
	for (region = memblock.memblock_type.regions;					\
513 514 515
	     region < (memblock.memblock_type.regions + memblock.memblock_type.cnt);	\
	     region++)

516 517 518 519
#define for_each_memblock_type(i, memblock_type, rgn)			\
	for (i = 0, rgn = &memblock_type->regions[0];			\
	     i < memblock_type->cnt;					\
	     i++, rgn = &memblock_type->regions[i])
520

M
Mike Rapoport 已提交
521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545
extern void *alloc_large_system_hash(const char *tablename,
				     unsigned long bucketsize,
				     unsigned long numentries,
				     int scale,
				     int flags,
				     unsigned int *_hash_shift,
				     unsigned int *_hash_mask,
				     unsigned long low_limit,
				     unsigned long high_limit);

#define HASH_EARLY	0x00000001	/* Allocating during early boot? */
#define HASH_SMALL	0x00000002	/* sub-page allocation allowed, min
					 * shift passed via *_hash_shift */
#define HASH_ZERO	0x00000004	/* Zero allocated hash table */

/* Only NUMA needs hash distribution. 64bit NUMA architectures have
 * sufficient vmalloc space.
 */
#ifdef CONFIG_NUMA
#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
extern int hashdist;		/* Distribute hashes across NUMA nodes? */
#else
#define hashdist (0)
#endif

V
Vladimir Murzin 已提交
546
#ifdef CONFIG_MEMTEST
547
extern void early_memtest(phys_addr_t start, phys_addr_t end);
V
Vladimir Murzin 已提交
548
#else
549
static inline void early_memtest(phys_addr_t start, phys_addr_t end)
V
Vladimir Murzin 已提交
550 551 552
{
}
#endif
553

Y
Yinghai Lu 已提交
554 555 556
#endif /* __KERNEL__ */

#endif /* _LINUX_MEMBLOCK_H */