vmstat.h 7.2 KB
Newer Older
1 2 3 4 5
#ifndef _LINUX_VMSTAT_H
#define _LINUX_VMSTAT_H

#include <linux/types.h>
#include <linux/percpu.h>
C
Christoph Lameter 已提交
6
#include <linux/mm.h>
7
#include <linux/mmzone.h>
8
#include <linux/vm_event_item.h>
A
Arun Sharma 已提交
9
#include <linux/atomic.h>
10

A
Adrian Bunk 已提交
11 12
extern int sysctl_stat_interval;

13 14 15 16 17 18 19 20 21 22 23
#ifdef CONFIG_VM_EVENT_COUNTERS
/*
 * Light weight per cpu counter implementation.
 *
 * Counters should only be incremented and no critical kernel component
 * should rely on the counter values.
 *
 * Counters are handled completely inline. On many platforms the code
 * generated will simply be the increment of a global address.
 */

24 25
struct vm_event_state {
	unsigned long event[NR_VM_EVENT_ITEMS];
26 27
};

28 29 30 31
DECLARE_PER_CPU(struct vm_event_state, vm_event_states);

static inline void __count_vm_event(enum vm_event_item item)
{
R
Rusty Russell 已提交
32
	__this_cpu_inc(vm_event_states.event[item]);
33 34 35 36
}

static inline void count_vm_event(enum vm_event_item item)
{
R
Rusty Russell 已提交
37
	this_cpu_inc(vm_event_states.event[item]);
38 39 40 41
}

static inline void __count_vm_events(enum vm_event_item item, long delta)
{
R
Rusty Russell 已提交
42
	__this_cpu_add(vm_event_states.event[item], delta);
43 44 45 46
}

static inline void count_vm_events(enum vm_event_item item, long delta)
{
R
Rusty Russell 已提交
47
	this_cpu_add(vm_event_states.event[item], delta);
48 49 50
}

extern void all_vm_events(unsigned long *);
Y
Yijing Wang 已提交
51

52 53 54 55 56
extern void vm_events_fold_cpu(int cpu);

#else

/* Disable counters */
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
static inline void count_vm_event(enum vm_event_item item)
{
}
static inline void count_vm_events(enum vm_event_item item, long delta)
{
}
static inline void __count_vm_event(enum vm_event_item item)
{
}
static inline void __count_vm_events(enum vm_event_item item, long delta)
{
}
static inline void all_vm_events(unsigned long *ret)
{
}
static inline void vm_events_fold_cpu(int cpu)
{
}
75 76 77

#endif /* CONFIG_VM_EVENT_COUNTERS */

78 79 80 81 82
#ifdef CONFIG_NUMA_BALANCING
#define count_vm_numa_event(x)     count_vm_event(x)
#define count_vm_numa_events(x, y) count_vm_events(x, y)
#else
#define count_vm_numa_event(x) do {} while (0)
83
#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
84 85
#endif /* CONFIG_NUMA_BALANCING */

86
#define __count_zone_vm_events(item, zone, delta) \
87 88
		__count_vm_events(item##_NORMAL - ZONE_NORMAL + \
		zone_idx(zone), delta)
89

90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
/*
 * Zone based page accounting with per cpu differentials.
 */
extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];

static inline void zone_page_state_add(long x, struct zone *zone,
				 enum zone_stat_item item)
{
	atomic_long_add(x, &zone->vm_stat[item]);
	atomic_long_add(x, &vm_stat[item]);
}

static inline unsigned long global_page_state(enum zone_stat_item item)
{
	long x = atomic_long_read(&vm_stat[item]);
#ifdef CONFIG_SMP
	if (x < 0)
		x = 0;
#endif
	return x;
}

static inline unsigned long zone_page_state(struct zone *zone,
					enum zone_stat_item item)
{
	long x = atomic_long_read(&zone->vm_stat[item]);
#ifdef CONFIG_SMP
	if (x < 0)
		x = 0;
#endif
	return x;
}

123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
/*
 * More accurate version that also considers the currently pending
 * deltas. For that we need to loop over all cpus to find the current
 * deltas. There is no synchronization so the result cannot be
 * exactly accurate either.
 */
static inline unsigned long zone_page_state_snapshot(struct zone *zone,
					enum zone_stat_item item)
{
	long x = atomic_long_read(&zone->vm_stat[item]);

#ifdef CONFIG_SMP
	int cpu;
	for_each_online_cpu(cpu)
		x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];

	if (x < 0)
		x = 0;
#endif
	return x;
}

145 146
extern unsigned long global_reclaimable_pages(void);
extern unsigned long zone_reclaimable_pages(struct zone *zone);
147

148 149 150 151 152 153 154 155 156 157 158 159
#ifdef CONFIG_NUMA
/*
 * Determine the per node value of a stat item. This function
 * is called frequently in a NUMA machine, so try to be as
 * frugal as possible.
 */
static inline unsigned long node_page_state(int node,
				 enum zone_stat_item item)
{
	struct zone *zones = NODE_DATA(node)->node_zones;

	return
160 161 162
#ifdef CONFIG_ZONE_DMA
		zone_page_state(&zones[ZONE_DMA], item) +
#endif
163
#ifdef CONFIG_ZONE_DMA32
164 165 166 167 168
		zone_page_state(&zones[ZONE_DMA32], item) +
#endif
#ifdef CONFIG_HIGHMEM
		zone_page_state(&zones[ZONE_HIGHMEM], item) +
#endif
M
Mel Gorman 已提交
169 170
		zone_page_state(&zones[ZONE_NORMAL], item) +
		zone_page_state(&zones[ZONE_MOVABLE], item);
171
}
172

A
Andi Kleen 已提交
173
extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
174

175
#else
176

177
#define node_page_state(node, item) global_page_state(item)
A
Andi Kleen 已提交
178
#define zone_statistics(_zl, _z, gfp) do { } while (0)
179 180

#endif /* CONFIG_NUMA */
181 182 183 184

#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))

185 186
extern void inc_zone_state(struct zone *, enum zone_stat_item);

187 188 189 190
#ifdef CONFIG_SMP
void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
void __inc_zone_page_state(struct page *, enum zone_stat_item);
void __dec_zone_page_state(struct page *, enum zone_stat_item);
191

192 193 194 195 196
void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
void inc_zone_page_state(struct page *, enum zone_stat_item);
void dec_zone_page_state(struct page *, enum zone_stat_item);

extern void inc_zone_state(struct zone *, enum zone_stat_item);
197 198 199
extern void __inc_zone_state(struct zone *, enum zone_stat_item);
extern void dec_zone_state(struct zone *, enum zone_stat_item);
extern void __dec_zone_state(struct zone *, enum zone_stat_item);
200 201

void refresh_cpu_vm_stats(int);
202
void refresh_zone_stat_thresholds(void);
203

204 205
void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);

206 207 208 209
int calculate_pressure_threshold(struct zone *zone);
int calculate_normal_threshold(struct zone *zone);
void set_pgdat_percpu_threshold(pg_data_t *pgdat,
				int (*calculate_pressure)(struct zone *));
210 211 212 213 214 215 216 217 218 219 220 221
#else /* CONFIG_SMP */

/*
 * We do not maintain differentials in a single processor configuration.
 * The functions directly modify the zone and global counters.
 */
static inline void __mod_zone_page_state(struct zone *zone,
			enum zone_stat_item item, int delta)
{
	zone_page_state_add(delta, zone, item);
}

222 223 224 225 226 227
static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
{
	atomic_long_inc(&zone->vm_stat[item]);
	atomic_long_inc(&vm_stat[item]);
}

228 229 230
static inline void __inc_zone_page_state(struct page *page,
			enum zone_stat_item item)
{
231
	__inc_zone_state(page_zone(page), item);
232 233
}

234 235 236 237 238 239
static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
{
	atomic_long_dec(&zone->vm_stat[item]);
	atomic_long_dec(&vm_stat[item]);
}

240 241 242
static inline void __dec_zone_page_state(struct page *page,
			enum zone_stat_item item)
{
243
	__dec_zone_state(page_zone(page), item);
244 245 246 247 248 249 250 251 252 253
}

/*
 * We only use atomic operations to update counters. So there is no need to
 * disable interrupts.
 */
#define inc_zone_page_state __inc_zone_page_state
#define dec_zone_page_state __dec_zone_page_state
#define mod_zone_page_state __mod_zone_page_state

254
#define set_pgdat_percpu_threshold(pgdat, callback) { }
255

256
static inline void refresh_cpu_vm_stats(int cpu) { }
257 258
static inline void refresh_zone_stat_thresholds(void) { }

259 260
static inline void drain_zonestat(struct zone *zone,
			struct per_cpu_pageset *pset) { }
261 262
#endif		/* CONFIG_SMP */

263 264 265 266 267 268 269 270
static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
					     int migratetype)
{
	__mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
	if (is_migrate_cma(migratetype))
		__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
}

271
extern const char * const vmstat_text[];
272 273

#endif /* _LINUX_VMSTAT_H */