提交 2f1b6248 编写于 作者: C Christoph Lameter 提交者: Linus Torvalds

[PATCH] reduce MAX_NR_ZONES: use enum to define zones, reformat and comment

Use enum for zones and reformat zones dependent information

Add comments explaning the use of zones and add a zones_t type for zone
numbers.

Line up information that will be #ifdefd by the following patches.

[akpm@osdl.org: comment cleanups]
Signed-off-by: NChristoph Lameter <clameter@sgi.com>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 98d2b0eb
...@@ -470,7 +470,7 @@ void split_page(struct page *page, unsigned int order); ...@@ -470,7 +470,7 @@ void split_page(struct page *page, unsigned int order);
#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) #define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
#define ZONETABLE_MASK ((1UL << ZONETABLE_SHIFT) - 1) #define ZONETABLE_MASK ((1UL << ZONETABLE_SHIFT) - 1)
static inline unsigned long page_zonenum(struct page *page) static inline enum zone_type page_zonenum(struct page *page)
{ {
return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
} }
...@@ -499,11 +499,12 @@ static inline unsigned long page_to_section(struct page *page) ...@@ -499,11 +499,12 @@ static inline unsigned long page_to_section(struct page *page)
return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
} }
static inline void set_page_zone(struct page *page, unsigned long zone) static inline void set_page_zone(struct page *page, enum zone_type zone)
{ {
page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT; page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
} }
static inline void set_page_node(struct page *page, unsigned long node) static inline void set_page_node(struct page *page, unsigned long node)
{ {
page->flags &= ~(NODES_MASK << NODES_PGSHIFT); page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
...@@ -515,7 +516,7 @@ static inline void set_page_section(struct page *page, unsigned long section) ...@@ -515,7 +516,7 @@ static inline void set_page_section(struct page *page, unsigned long section)
page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
} }
static inline void set_page_links(struct page *page, unsigned long zone, static inline void set_page_links(struct page *page, enum zone_type zone,
unsigned long node, unsigned long pfn) unsigned long node, unsigned long pfn)
{ {
set_page_zone(page, zone); set_page_zone(page, zone);
......
...@@ -88,14 +88,53 @@ struct per_cpu_pageset { ...@@ -88,14 +88,53 @@ struct per_cpu_pageset {
#define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)]) #define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)])
#endif #endif
#define ZONE_DMA 0 enum zone_type {
#define ZONE_DMA32 1 /*
#define ZONE_NORMAL 2 * ZONE_DMA is used when there are devices that are not able
#define ZONE_HIGHMEM 3 * to do DMA to all of addressable memory (ZONE_NORMAL). Then we
* carve out the portion of memory that is needed for these devices.
* The range is arch specific.
*
* Some examples
*
* Architecture Limit
* ---------------------------
* parisc, ia64, sparc <4G
* s390 <2G
* arm26 <48M
* arm Various
* alpha Unlimited or 0-16MB.
*
* i386, x86_64 and multiple other arches
* <16M.
*/
ZONE_DMA,
/*
* x86_64 needs two ZONE_DMAs because it supports devices that are
* only able to do DMA to the lower 16M but also 32 bit devices that
* can only do DMA areas below 4G.
*/
ZONE_DMA32,
/*
* Normal addressable memory is in ZONE_NORMAL. DMA operations can be
* performed on pages in ZONE_NORMAL if the DMA devices support
* transfers to all addressable memory.
*/
ZONE_NORMAL,
/*
* A memory area that is only addressable by the kernel through
* mapping portions into its own address space. This is for example
* used by i386 to allow the kernel to address the memory beyond
* 900MB. The kernel will set up special mappings (page
* table entries on i386) for each page that the kernel needs to
* access.
*/
ZONE_HIGHMEM,
#define MAX_NR_ZONES 4 /* Sync this with ZONES_SHIFT */ MAX_NR_ZONES
#define ZONES_SHIFT 2 /* ceil(log2(MAX_NR_ZONES)) */ };
#define ZONES_SHIFT 2 /* ceil(log2(MAX_NR_ZONES)) */
/* /*
* When a memory allocation must conform to specific limitations (such * When a memory allocation must conform to specific limitations (such
...@@ -126,16 +165,6 @@ struct per_cpu_pageset { ...@@ -126,16 +165,6 @@ struct per_cpu_pageset {
/* #define GFP_ZONETYPES (GFP_ZONEMASK + 1) */ /* Non-loner */ /* #define GFP_ZONETYPES (GFP_ZONEMASK + 1) */ /* Non-loner */
#define GFP_ZONETYPES ((GFP_ZONEMASK + 1) / 2 + 1) /* Loner */ #define GFP_ZONETYPES ((GFP_ZONEMASK + 1) / 2 + 1) /* Loner */
/*
* On machines where it is needed (eg PCs) we divide physical memory
* into multiple physical zones. On a 32bit PC we have 4 zones:
*
* ZONE_DMA < 16 MB ISA DMA capable memory
* ZONE_DMA32 0 MB Empty
* ZONE_NORMAL 16-896 MB direct mapped by the kernel
* ZONE_HIGHMEM > 896 MB only page cache and user processes
*/
struct zone { struct zone {
/* Fields commonly accessed by the page allocator */ /* Fields commonly accessed by the page allocator */
unsigned long free_pages; unsigned long free_pages;
...@@ -266,7 +295,6 @@ struct zone { ...@@ -266,7 +295,6 @@ struct zone {
char *name; char *name;
} ____cacheline_internodealigned_in_smp; } ____cacheline_internodealigned_in_smp;
/* /*
* The "priority" of VM scanning is how much of the queues we will scan in one * The "priority" of VM scanning is how much of the queues we will scan in one
* go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
...@@ -373,12 +401,12 @@ static inline int populated_zone(struct zone *zone) ...@@ -373,12 +401,12 @@ static inline int populated_zone(struct zone *zone)
return (!!zone->present_pages); return (!!zone->present_pages);
} }
static inline int is_highmem_idx(int idx) static inline int is_highmem_idx(enum zone_type idx)
{ {
return (idx == ZONE_HIGHMEM); return (idx == ZONE_HIGHMEM);
} }
static inline int is_normal_idx(int idx) static inline int is_normal_idx(enum zone_type idx)
{ {
return (idx == ZONE_NORMAL); return (idx == ZONE_NORMAL);
} }
......
...@@ -68,7 +68,11 @@ static void __free_pages_ok(struct page *page, unsigned int order); ...@@ -68,7 +68,11 @@ static void __free_pages_ok(struct page *page, unsigned int order);
* TBD: should special case ZONE_DMA32 machines here - in those we normally * TBD: should special case ZONE_DMA32 machines here - in those we normally
* don't need any ZONE_NORMAL reservation * don't need any ZONE_NORMAL reservation
*/ */
int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 256, 256, 32 }; int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
256,
256,
32
};
EXPORT_SYMBOL(totalram_pages); EXPORT_SYMBOL(totalram_pages);
...@@ -79,7 +83,13 @@ EXPORT_SYMBOL(totalram_pages); ...@@ -79,7 +83,13 @@ EXPORT_SYMBOL(totalram_pages);
struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly; struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly;
EXPORT_SYMBOL(zone_table); EXPORT_SYMBOL(zone_table);
static char *zone_names[MAX_NR_ZONES] = { "DMA", "DMA32", "Normal", "HighMem" }; static char *zone_names[MAX_NR_ZONES] = {
"DMA",
"DMA32",
"Normal",
"HighMem"
};
int min_free_kbytes = 1024; int min_free_kbytes = 1024;
unsigned long __meminitdata nr_kernel_pages; unsigned long __meminitdata nr_kernel_pages;
...@@ -1487,7 +1497,9 @@ static void __meminit build_zonelists(pg_data_t *pgdat) ...@@ -1487,7 +1497,9 @@ static void __meminit build_zonelists(pg_data_t *pgdat)
static void __meminit build_zonelists(pg_data_t *pgdat) static void __meminit build_zonelists(pg_data_t *pgdat)
{ {
int i, j, k, node, local_node; int i, node, local_node;
enum zone_type k;
enum zone_type j;
local_node = pgdat->node_id; local_node = pgdat->node_id;
for (i = 0; i < GFP_ZONETYPES; i++) { for (i = 0; i < GFP_ZONETYPES; i++) {
...@@ -1675,8 +1687,8 @@ void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone, ...@@ -1675,8 +1687,8 @@ void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone,
} }
#define ZONETABLE_INDEX(x, zone_nr) ((x << ZONES_SHIFT) | zone_nr) #define ZONETABLE_INDEX(x, zone_nr) ((x << ZONES_SHIFT) | zone_nr)
void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn, void zonetable_add(struct zone *zone, int nid, enum zone_type zid,
unsigned long size) unsigned long pfn, unsigned long size)
{ {
unsigned long snum = pfn_to_section_nr(pfn); unsigned long snum = pfn_to_section_nr(pfn);
unsigned long end = pfn_to_section_nr(pfn + size); unsigned long end = pfn_to_section_nr(pfn + size);
...@@ -1960,7 +1972,7 @@ __meminit int init_currently_empty_zone(struct zone *zone, ...@@ -1960,7 +1972,7 @@ __meminit int init_currently_empty_zone(struct zone *zone,
static void __meminit free_area_init_core(struct pglist_data *pgdat, static void __meminit free_area_init_core(struct pglist_data *pgdat,
unsigned long *zones_size, unsigned long *zholes_size) unsigned long *zones_size, unsigned long *zholes_size)
{ {
unsigned long j; enum zone_type j;
int nid = pgdat->node_id; int nid = pgdat->node_id;
unsigned long zone_start_pfn = pgdat->node_start_pfn; unsigned long zone_start_pfn = pgdat->node_start_pfn;
int ret; int ret;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册