memblock.h 4.8 KB
Newer Older
Y
Yinghai Lu 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
#ifndef _LINUX_MEMBLOCK_H
#define _LINUX_MEMBLOCK_H
#ifdef __KERNEL__

/*
 * Logical memory blocks.
 *
 * Copyright (C) 2001 Peter Bergner, IBM Corp.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

#include <linux/init.h>
#include <linux/mm.h>

19 20
#include <asm/memblock.h>

Y
Yinghai Lu 已提交
21 22
#define INIT_MEMBLOCK_REGIONS	128
#define MEMBLOCK_ERROR		(~(phys_addr_t)0)
Y
Yinghai Lu 已提交
23

24
struct memblock_region {
25 26
	phys_addr_t base;
	phys_addr_t size;
Y
Yinghai Lu 已提交
27 28
};

29
struct memblock_type {
30 31 32
	unsigned long cnt;	/* number of regions */
	unsigned long max;	/* size of the allocated array */
	struct memblock_region *regions;
Y
Yinghai Lu 已提交
33 34 35
};

struct memblock {
36
	phys_addr_t current_limit;
37
	phys_addr_t memory_size;	/* Updated by memblock_analyze() */
38 39
	struct memblock_type memory;
	struct memblock_type reserved;
Y
Yinghai Lu 已提交
40 41 42
};

extern struct memblock memblock;
43 44 45 46 47
extern int memblock_debug;
extern int memblock_can_resize;

#define memblock_dbg(fmt, ...) \
	if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
Y
Yinghai Lu 已提交
48 49 50

extern void __init memblock_init(void);
extern void __init memblock_analyze(void);
51 52 53 54
extern long memblock_add(phys_addr_t base, phys_addr_t size);
extern long memblock_remove(phys_addr_t base, phys_addr_t size);
extern long __init memblock_free(phys_addr_t base, phys_addr_t size);
extern long __init memblock_reserve(phys_addr_t base, phys_addr_t size);
55

56 57 58
/* The numa aware allocator is only available if
 * CONFIG_ARCH_POPULATES_NODE_MAP is set
 */
59 60 61 62 63
extern phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align,
					int nid);
extern phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
					    int nid);

64
extern phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align);
65 66

/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
67
#define MEMBLOCK_ALLOC_ANYWHERE	(~(phys_addr_t)0)
68 69
#define MEMBLOCK_ALLOC_ACCESSIBLE	0

70
extern phys_addr_t __init memblock_alloc_base(phys_addr_t size,
71 72
					 phys_addr_t align,
					 phys_addr_t max_addr);
73
extern phys_addr_t __init __memblock_alloc_base(phys_addr_t size,
74 75
					   phys_addr_t align,
					   phys_addr_t max_addr);
76 77 78 79 80 81 82
extern phys_addr_t __init memblock_phys_mem_size(void);
extern phys_addr_t memblock_end_of_DRAM(void);
extern void __init memblock_enforce_memory_limit(phys_addr_t memory_limit);
extern int memblock_is_memory(phys_addr_t addr);
extern int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
extern int __init memblock_is_reserved(phys_addr_t addr);
extern int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
Y
Yinghai Lu 已提交
83 84 85

extern void memblock_dump_all(void);

86
/* Provided by the architecture */
87
extern phys_addr_t memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid);
88 89
extern int memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1,
				   phys_addr_t addr2, phys_addr_t size2);
90

91 92 93 94 95 96
/**
 * memblock_set_current_limit - Set the current allocation limit to allow
 *                         limiting allocations to what is currently
 *                         accessible during boot
 * @limit: New limit value (physical address)
 */
97
extern void memblock_set_current_limit(phys_addr_t limit);
98

99

100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
/*
 * pfn conversion functions
 *
 * While the memory MEMBLOCKs should always be page aligned, the reserved
 * MEMBLOCKs may not be. This accessor attempt to provide a very clear
 * idea of what they return for such non aligned MEMBLOCKs.
 */

/**
 * memblock_region_base_pfn - Return the lowest pfn intersecting with the region
 * @reg: memblock_region structure
 */
static inline unsigned long memblock_region_base_pfn(const struct memblock_region *reg)
{
	return reg->base >> PAGE_SHIFT;
}

/**
 * memblock_region_last_pfn - Return the highest pfn intersecting with the region
 * @reg: memblock_region structure
 */
static inline unsigned long memblock_region_last_pfn(const struct memblock_region *reg)
{
	return (reg->base + reg->size - 1) >> PAGE_SHIFT;
}

/**
 * memblock_region_end_pfn - Return the pfn of the first page following the region
 *                      but not intersecting it
 * @reg: memblock_region structure
 */
static inline unsigned long memblock_region_end_pfn(const struct memblock_region *reg)
{
	return memblock_region_last_pfn(reg) + 1;
}

/**
 * memblock_region_pages - Return the number of pages covering a region
 * @reg: memblock_region structure
 */
static inline unsigned long memblock_region_pages(const struct memblock_region *reg)
{
	return memblock_region_end_pfn(reg) - memblock_region_end_pfn(reg);
}

#define for_each_memblock(memblock_type, region)					\
	for (region = memblock.memblock_type.regions;				\
	     region < (memblock.memblock_type.regions + memblock.memblock_type.cnt);	\
	     region++)


Y
Yinghai Lu 已提交
151 152 153
#endif /* __KERNEL__ */

#endif /* _LINUX_MEMBLOCK_H */