internal.h 5.3 KB
Newer Older
1 2 3
#ifndef _KERNEL_EVENTS_INTERNAL_H
#define _KERNEL_EVENTS_INTERNAL_H

4
#include <linux/hardirq.h>
5
#include <linux/uaccess.h>
6 7 8

/* Buffer handling */

9 10 11 12 13
#define RING_BUFFER_WRITABLE		0x01

struct ring_buffer {
	atomic_t			refcount;
	struct rcu_head			rcu_head;
14
	struct irq_work			irq_work;
15 16 17 18 19
#ifdef CONFIG_PERF_USE_VMALLOC
	struct work_struct		work;
	int				page_order;	/* allocation order  */
#endif
	int				nr_pages;	/* nr of data pages  */
20
	int				overwrite;	/* can overwrite itself */
21 22 23 24 25 26 27 28 29 30

	atomic_t			poll;		/* POLL_ for wakeups */

	local_t				head;		/* write position    */
	local_t				nest;		/* nested writers    */
	local_t				events;		/* event limit       */
	local_t				wakeup;		/* wakeup stamp      */
	local_t				lost;		/* nr records lost   */

	long				watermark;	/* wakeup watermark  */
31
	long				aux_watermark;
32 33 34
	/* poll crap */
	spinlock_t			event_lock;
	struct list_head		event_list;
35

36 37
	atomic_t			mmap_count;
	unsigned long			mmap_locked;
P
Peter Zijlstra 已提交
38 39
	struct user_struct		*mmap_user;

40
	/* AUX area */
41 42
	local_t				aux_head;
	local_t				aux_nest;
43
	local_t				aux_wakeup;
44 45
	unsigned long			aux_pgoff;
	int				aux_nr_pages;
46
	int				aux_overwrite;
47 48 49 50 51 52 53
	atomic_t			aux_mmap_count;
	unsigned long			aux_mmap_locked;
	void				(*free_aux)(void *);
	atomic_t			aux_refcount;
	void				**aux_pages;
	void				*aux_priv;

54 55 56 57 58
	struct perf_event_mmap_page	*user_page;
	void				*data_pages[0];
};

extern void rb_free(struct ring_buffer *rb);
59 60 61 62 63 64 65 66 67

static inline void rb_free_rcu(struct rcu_head *rcu_head)
{
	struct ring_buffer *rb;

	rb = container_of(rcu_head, struct ring_buffer, rcu_head);
	rb_free(rb);
}

68 69 70
extern struct ring_buffer *
rb_alloc(int nr_pages, long watermark, int cpu, int flags);
extern void perf_event_wakeup(struct perf_event *event);
71
extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
72
			pgoff_t pgoff, int nr_pages, long watermark, int flags);
73
extern void rb_free_aux(struct ring_buffer *rb);
74 75
extern struct ring_buffer *ring_buffer_get(struct perf_event *event);
extern void ring_buffer_put(struct ring_buffer *rb);
76 77 78 79 80

static inline bool rb_has_aux(struct ring_buffer *rb)
{
	return !!rb->aux_nr_pages;
}
81

A
Alexander Shishkin 已提交
82 83 84
void perf_event_aux_event(struct perf_event *event, unsigned long head,
			  unsigned long size, u64 flags);

85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
extern struct page *
perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);

#ifdef CONFIG_PERF_USE_VMALLOC
/*
 * Back perf_mmap() with vmalloc memory.
 *
 * Required for architectures that have d-cache aliasing issues.
 */

static inline int page_order(struct ring_buffer *rb)
{
	return rb->page_order;
}

#else

static inline int page_order(struct ring_buffer *rb)
{
	return 0;
}
#endif

108
static inline unsigned long perf_data_size(struct ring_buffer *rb)
109 110 111 112
{
	return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
}

113 114 115 116 117
static inline unsigned long perf_aux_size(struct ring_buffer *rb)
{
	return rb->aux_nr_pages << PAGE_SHIFT;
}

118
#define DEFINE_OUTPUT_COPY(func_name, memcpy_func)			\
119
static inline unsigned long						\
120
func_name(struct perf_output_handle *handle,				\
121
	  const void *buf, unsigned long len)				\
122 123 124 125
{									\
	unsigned long size, written;					\
									\
	do {								\
126
		size    = min(handle->size, len);			\
127
		written = memcpy_func(handle->addr, buf, size);		\
128
		written = size - written;				\
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
									\
		len -= written;						\
		handle->addr += written;				\
		buf += written;						\
		handle->size -= written;				\
		if (!handle->size) {					\
			struct ring_buffer *rb = handle->rb;		\
									\
			handle->page++;					\
			handle->page &= rb->nr_pages - 1;		\
			handle->addr = rb->data_pages[handle->page];	\
			handle->size = PAGE_SIZE << page_order(rb);	\
		}							\
	} while (len && written == size);				\
									\
	return len;							\
}

147 148
static inline unsigned long
memcpy_common(void *dst, const void *src, unsigned long n)
149
{
150
	memcpy(dst, src, n);
151
	return 0;
152 153
}

154 155
DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)

156 157 158 159 160
static inline unsigned long
memcpy_skip(void *dst, const void *src, unsigned long n)
{
	return 0;
}
161

162
DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
163

164
#ifndef arch_perf_out_copy_user
165 166 167 168 169 170 171 172 173 174 175 176 177
#define arch_perf_out_copy_user arch_perf_out_copy_user

static inline unsigned long
arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
{
	unsigned long ret;

	pagefault_disable();
	ret = __copy_from_user_inatomic(dst, src, n);
	pagefault_enable();

	return ret;
}
178 179 180 181
#endif

DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)

182
/* Callchain handling */
183 184
extern struct perf_callchain_entry *
perf_callchain(struct perf_event *event, struct pt_regs *regs);
185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215
extern int get_callchain_buffers(void);
extern void put_callchain_buffers(void);

static inline int get_recursion_context(int *recursion)
{
	int rctx;

	if (in_nmi())
		rctx = 3;
	else if (in_irq())
		rctx = 2;
	else if (in_softirq())
		rctx = 1;
	else
		rctx = 0;

	if (recursion[rctx])
		return -1;

	recursion[rctx]++;
	barrier();

	return rctx;
}

static inline void put_recursion_context(int *recursion, int rctx)
{
	barrier();
	recursion[rctx]--;
}

216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
#ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
static inline bool arch_perf_have_user_stack_dump(void)
{
	return true;
}

#define perf_user_stack_pointer(regs) user_stack_pointer(regs)
#else
static inline bool arch_perf_have_user_stack_dump(void)
{
	return false;
}

#define perf_user_stack_pointer(regs) 0
#endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */

232
#endif /* _KERNEL_EVENTS_INTERNAL_H */