internal.h 4.1 KB
Newer Older
1 2 3
#ifndef _KERNEL_EVENTS_INTERNAL_H
#define _KERNEL_EVENTS_INTERNAL_H

4
#include <linux/hardirq.h>
5
#include <linux/uaccess.h>
6 7 8

/* Buffer handling */

9 10 11 12 13 14 15 16 17 18
#define RING_BUFFER_WRITABLE		0x01

struct ring_buffer {
	atomic_t			refcount;
	struct rcu_head			rcu_head;
#ifdef CONFIG_PERF_USE_VMALLOC
	struct work_struct		work;
	int				page_order;	/* allocation order  */
#endif
	int				nr_pages;	/* nr of data pages  */
19
	int				overwrite;	/* can overwrite itself */
20 21 22 23 24 25 26 27 28 29

	atomic_t			poll;		/* POLL_ for wakeups */

	local_t				head;		/* write position    */
	local_t				nest;		/* nested writers    */
	local_t				events;		/* event limit       */
	local_t				wakeup;		/* wakeup stamp      */
	local_t				lost;		/* nr records lost   */

	long				watermark;	/* wakeup watermark  */
30 31 32
	/* poll crap */
	spinlock_t			event_lock;
	struct list_head		event_list;
33

34 35
	atomic_t			mmap_count;
	unsigned long			mmap_locked;
P
Peter Zijlstra 已提交
36 37
	struct user_struct		*mmap_user;

38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
	struct perf_event_mmap_page	*user_page;
	void				*data_pages[0];
};

extern void rb_free(struct ring_buffer *rb);
extern struct ring_buffer *
rb_alloc(int nr_pages, long watermark, int cpu, int flags);
extern void perf_event_wakeup(struct perf_event *event);

extern void
perf_event_header__init_id(struct perf_event_header *header,
			   struct perf_sample_data *data,
			   struct perf_event *event);
extern void
perf_event__output_id_sample(struct perf_event *event,
			     struct perf_output_handle *handle,
			     struct perf_sample_data *sample);

extern struct page *
perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);

#ifdef CONFIG_PERF_USE_VMALLOC
/*
 * Back perf_mmap() with vmalloc memory.
 *
 * Required for architectures that have d-cache aliasing issues.
 */

static inline int page_order(struct ring_buffer *rb)
{
	return rb->page_order;
}

#else

static inline int page_order(struct ring_buffer *rb)
{
	return 0;
}
#endif

79
static inline unsigned long perf_data_size(struct ring_buffer *rb)
80 81 82 83
{
	return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
}

84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
#define DEFINE_OUTPUT_COPY(func_name, memcpy_func)			\
static inline unsigned int						\
func_name(struct perf_output_handle *handle,				\
	  const void *buf, unsigned int len)				\
{									\
	unsigned long size, written;					\
									\
	do {								\
		size = min_t(unsigned long, handle->size, len);		\
									\
		written = memcpy_func(handle->addr, buf, size);		\
									\
		len -= written;						\
		handle->addr += written;				\
		buf += written;						\
		handle->size -= written;				\
		if (!handle->size) {					\
			struct ring_buffer *rb = handle->rb;		\
									\
			handle->page++;					\
			handle->page &= rb->nr_pages - 1;		\
			handle->addr = rb->data_pages[handle->page];	\
			handle->size = PAGE_SIZE << page_order(rb);	\
		}							\
	} while (len && written == size);				\
									\
	return len;							\
}

static inline int memcpy_common(void *dst, const void *src, size_t n)
114
{
115 116
	memcpy(dst, src, n);
	return n;
117 118
}

119 120
DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)

121 122 123 124
#define MEMCPY_SKIP(dst, src, n) (n)

DEFINE_OUTPUT_COPY(__output_skip, MEMCPY_SKIP)

125 126 127 128 129 130
#ifndef arch_perf_out_copy_user
#define arch_perf_out_copy_user __copy_from_user_inatomic
#endif

DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)

131
/* Callchain handling */
132 133
extern struct perf_callchain_entry *
perf_callchain(struct perf_event *event, struct pt_regs *regs);
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
extern int get_callchain_buffers(void);
extern void put_callchain_buffers(void);

static inline int get_recursion_context(int *recursion)
{
	int rctx;

	if (in_nmi())
		rctx = 3;
	else if (in_irq())
		rctx = 2;
	else if (in_softirq())
		rctx = 1;
	else
		rctx = 0;

	if (recursion[rctx])
		return -1;

	recursion[rctx]++;
	barrier();

	return rctx;
}

static inline void put_recursion_context(int *recursion, int rctx)
{
	barrier();
	recursion[rctx]--;
}

165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
#ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
static inline bool arch_perf_have_user_stack_dump(void)
{
	return true;
}

#define perf_user_stack_pointer(regs) user_stack_pointer(regs)
#else
static inline bool arch_perf_have_user_stack_dump(void)
{
	return false;
}

#define perf_user_stack_pointer(regs) 0
#endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */

181
#endif /* _KERNEL_EVENTS_INTERNAL_H */