提交 38f71479 编写于 作者: D David Howells

NOMMU: Improve procfs output using per-MM VMAs

Improve procfs output using per-MM VMAs for process memory accounting.
Signed-off-by: NDavid Howells <dhowells@redhat.com>
Tested-by: NMike Frysinger <vapier.adi@gmail.com>
Acked-by: NPaul Mundt <lethal@linux-sh.org>
上级 dd8632a1
...@@ -16,24 +16,31 @@ ...@@ -16,24 +16,31 @@
void task_mem(struct seq_file *m, struct mm_struct *mm) void task_mem(struct seq_file *m, struct mm_struct *mm)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
struct vm_region *region;
struct rb_node *p; struct rb_node *p;
unsigned long bytes = 0, sbytes = 0, slack = 0; unsigned long bytes = 0, sbytes = 0, slack = 0, size;
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
vma = rb_entry(p, struct vm_area_struct, vm_rb); vma = rb_entry(p, struct vm_area_struct, vm_rb);
bytes += kobjsize(vma); bytes += kobjsize(vma);
region = vma->vm_region;
if (region) {
size = kobjsize(region);
size += region->vm_end - region->vm_start;
} else {
size = vma->vm_end - vma->vm_start;
}
if (atomic_read(&mm->mm_count) > 1 || if (atomic_read(&mm->mm_count) > 1 ||
vma->vm_region ||
vma->vm_flags & VM_MAYSHARE) { vma->vm_flags & VM_MAYSHARE) {
sbytes += kobjsize((void *) vma->vm_start); sbytes += size;
if (vma->vm_region)
sbytes += kobjsize(vma->vm_region);
} else { } else {
bytes += kobjsize((void *) vma->vm_start); bytes += size;
slack += kobjsize((void *) vma->vm_start) - if (region)
(vma->vm_end - vma->vm_start); slack = region->vm_end - vma->vm_end;
} }
} }
...@@ -77,7 +84,7 @@ unsigned long task_vsize(struct mm_struct *mm) ...@@ -77,7 +84,7 @@ unsigned long task_vsize(struct mm_struct *mm)
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
vma = rb_entry(p, struct vm_area_struct, vm_rb); vma = rb_entry(p, struct vm_area_struct, vm_rb);
vsize += vma->vm_region->vm_end - vma->vm_region->vm_start; vsize += vma->vm_end - vma->vm_start;
} }
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
return vsize; return vsize;
...@@ -87,6 +94,7 @@ int task_statm(struct mm_struct *mm, int *shared, int *text, ...@@ -87,6 +94,7 @@ int task_statm(struct mm_struct *mm, int *shared, int *text,
int *data, int *resident) int *data, int *resident)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
struct vm_region *region;
struct rb_node *p; struct rb_node *p;
int size = kobjsize(mm); int size = kobjsize(mm);
...@@ -94,7 +102,11 @@ int task_statm(struct mm_struct *mm, int *shared, int *text, ...@@ -94,7 +102,11 @@ int task_statm(struct mm_struct *mm, int *shared, int *text,
for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
vma = rb_entry(p, struct vm_area_struct, vm_rb); vma = rb_entry(p, struct vm_area_struct, vm_rb);
size += kobjsize(vma); size += kobjsize(vma);
size += kobjsize((void *) vma->vm_start); region = vma->vm_region;
if (region) {
size += kobjsize(region);
size += region->vm_end - region->vm_start;
}
} }
size += (*text = mm->end_code - mm->start_code); size += (*text = mm->end_code - mm->start_code);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册