提交 d99e9446 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

perf_counter: Remove munmap stuff

In name of keeping it simple, only track mmap events. Userspace
will have to remove old overlapping maps when it encounters them.
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 60313ebe
...@@ -148,11 +148,10 @@ struct perf_counter_attr { ...@@ -148,11 +148,10 @@ struct perf_counter_attr {
exclude_hv : 1, /* ditto hypervisor */ exclude_hv : 1, /* ditto hypervisor */
exclude_idle : 1, /* don't count when idle */ exclude_idle : 1, /* don't count when idle */
mmap : 1, /* include mmap data */ mmap : 1, /* include mmap data */
munmap : 1, /* include munmap data */
comm : 1, /* include comm data */ comm : 1, /* include comm data */
freq : 1, /* use freq, not period */ freq : 1, /* use freq, not period */
__reserved_1 : 52; __reserved_1 : 53;
__u32 wakeup_events; /* wakeup every n events */ __u32 wakeup_events; /* wakeup every n events */
__u32 __reserved_2; __u32 __reserved_2;
...@@ -246,7 +245,6 @@ enum perf_event_type { ...@@ -246,7 +245,6 @@ enum perf_event_type {
* }; * };
*/ */
PERF_EVENT_MMAP = 1, PERF_EVENT_MMAP = 1,
PERF_EVENT_MUNMAP = 2,
/* /*
* struct { * struct {
...@@ -622,9 +620,6 @@ extern void perf_swcounter_event(u32, u64, int, struct pt_regs *, u64); ...@@ -622,9 +620,6 @@ extern void perf_swcounter_event(u32, u64, int, struct pt_regs *, u64);
extern void perf_counter_mmap(unsigned long addr, unsigned long len, extern void perf_counter_mmap(unsigned long addr, unsigned long len,
unsigned long pgoff, struct file *file); unsigned long pgoff, struct file *file);
extern void perf_counter_munmap(unsigned long addr, unsigned long len,
unsigned long pgoff, struct file *file);
extern void perf_counter_comm(struct task_struct *tsk); extern void perf_counter_comm(struct task_struct *tsk);
extern void perf_counter_fork(struct task_struct *tsk); extern void perf_counter_fork(struct task_struct *tsk);
...@@ -677,10 +672,6 @@ static inline void ...@@ -677,10 +672,6 @@ static inline void
perf_counter_mmap(unsigned long addr, unsigned long len, perf_counter_mmap(unsigned long addr, unsigned long len,
unsigned long pgoff, struct file *file) { } unsigned long pgoff, struct file *file) { }
static inline void
perf_counter_munmap(unsigned long addr, unsigned long len,
unsigned long pgoff, struct file *file) { }
static inline void perf_counter_comm(struct task_struct *tsk) { } static inline void perf_counter_comm(struct task_struct *tsk) { }
static inline void perf_counter_fork(struct task_struct *tsk) { } static inline void perf_counter_fork(struct task_struct *tsk) { }
static inline void perf_counter_init(void) { } static inline void perf_counter_init(void) { }
......
...@@ -41,7 +41,6 @@ static int perf_overcommit __read_mostly = 1; ...@@ -41,7 +41,6 @@ static int perf_overcommit __read_mostly = 1;
static atomic_t nr_counters __read_mostly; static atomic_t nr_counters __read_mostly;
static atomic_t nr_mmap_counters __read_mostly; static atomic_t nr_mmap_counters __read_mostly;
static atomic_t nr_munmap_counters __read_mostly;
static atomic_t nr_comm_counters __read_mostly; static atomic_t nr_comm_counters __read_mostly;
int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */ int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
...@@ -1448,8 +1447,6 @@ static void free_counter(struct perf_counter *counter) ...@@ -1448,8 +1447,6 @@ static void free_counter(struct perf_counter *counter)
atomic_dec(&nr_counters); atomic_dec(&nr_counters);
if (counter->attr.mmap) if (counter->attr.mmap)
atomic_dec(&nr_mmap_counters); atomic_dec(&nr_mmap_counters);
if (counter->attr.munmap)
atomic_dec(&nr_munmap_counters);
if (counter->attr.comm) if (counter->attr.comm)
atomic_dec(&nr_comm_counters); atomic_dec(&nr_comm_counters);
...@@ -2510,7 +2507,7 @@ static void perf_counter_fork_output(struct perf_counter *counter, ...@@ -2510,7 +2507,7 @@ static void perf_counter_fork_output(struct perf_counter *counter,
static int perf_counter_fork_match(struct perf_counter *counter) static int perf_counter_fork_match(struct perf_counter *counter)
{ {
if (counter->attr.comm || counter->attr.mmap || counter->attr.munmap) if (counter->attr.comm || counter->attr.mmap)
return 1; return 1;
return 0; return 0;
...@@ -2557,8 +2554,7 @@ void perf_counter_fork(struct task_struct *task) ...@@ -2557,8 +2554,7 @@ void perf_counter_fork(struct task_struct *task)
struct perf_fork_event fork_event; struct perf_fork_event fork_event;
if (!atomic_read(&nr_comm_counters) && if (!atomic_read(&nr_comm_counters) &&
!atomic_read(&nr_mmap_counters) && !atomic_read(&nr_mmap_counters))
!atomic_read(&nr_munmap_counters))
return; return;
fork_event = (struct perf_fork_event){ fork_event = (struct perf_fork_event){
...@@ -2722,12 +2718,7 @@ static void perf_counter_mmap_output(struct perf_counter *counter, ...@@ -2722,12 +2718,7 @@ static void perf_counter_mmap_output(struct perf_counter *counter,
static int perf_counter_mmap_match(struct perf_counter *counter, static int perf_counter_mmap_match(struct perf_counter *counter,
struct perf_mmap_event *mmap_event) struct perf_mmap_event *mmap_event)
{ {
if (counter->attr.mmap && if (counter->attr.mmap)
mmap_event->event.header.type == PERF_EVENT_MMAP)
return 1;
if (counter->attr.munmap &&
mmap_event->event.header.type == PERF_EVENT_MUNMAP)
return 1; return 1;
return 0; return 0;
...@@ -2821,27 +2812,6 @@ void perf_counter_mmap(unsigned long addr, unsigned long len, ...@@ -2821,27 +2812,6 @@ void perf_counter_mmap(unsigned long addr, unsigned long len,
perf_counter_mmap_event(&mmap_event); perf_counter_mmap_event(&mmap_event);
} }
void perf_counter_munmap(unsigned long addr, unsigned long len,
unsigned long pgoff, struct file *file)
{
struct perf_mmap_event mmap_event;
if (!atomic_read(&nr_munmap_counters))
return;
mmap_event = (struct perf_mmap_event){
.file = file,
.event = {
.header = { .type = PERF_EVENT_MUNMAP, },
.start = addr,
.len = len,
.pgoff = pgoff,
},
};
perf_counter_mmap_event(&mmap_event);
}
/* /*
* Log sample_period changes so that analyzing tools can re-normalize the * Log sample_period changes so that analyzing tools can re-normalize the
* event flow. * event flow.
...@@ -3525,8 +3495,6 @@ perf_counter_alloc(struct perf_counter_attr *attr, ...@@ -3525,8 +3495,6 @@ perf_counter_alloc(struct perf_counter_attr *attr,
atomic_inc(&nr_counters); atomic_inc(&nr_counters);
if (counter->attr.mmap) if (counter->attr.mmap)
atomic_inc(&nr_mmap_counters); atomic_inc(&nr_mmap_counters);
if (counter->attr.munmap)
atomic_inc(&nr_munmap_counters);
if (counter->attr.comm) if (counter->attr.comm)
atomic_inc(&nr_comm_counters); atomic_inc(&nr_comm_counters);
......
...@@ -1756,12 +1756,6 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) ...@@ -1756,12 +1756,6 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
do { do {
long nrpages = vma_pages(vma); long nrpages = vma_pages(vma);
if (vma->vm_flags & VM_EXEC) {
perf_counter_munmap(vma->vm_start,
nrpages << PAGE_SHIFT,
vma->vm_pgoff, vma->vm_file);
}
mm->total_vm -= nrpages; mm->total_vm -= nrpages;
vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
vma = remove_vma(vma); vma = remove_vma(vma);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册