lpevents.c 8.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * Copyright (C) 2001 Mike Corrigan  IBM Corporation
3
 *
L
Linus Torvalds 已提交
4 5 6 7 8 9 10 11 12
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 */

#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/sched.h>
13
#include <linux/bootmem.h>
14 15
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
S
Stephen Rothwell 已提交
16 17
#include <linux/module.h>

L
Linus Torvalds 已提交
18 19 20 21 22
#include <asm/system.h>
#include <asm/paca.h>
#include <asm/iSeries/ItLpQueue.h>
#include <asm/iSeries/HvLpEvent.h>
#include <asm/iSeries/HvCallEvent.h>
23
#include <asm/iSeries/ItLpNaca.h>
L
Linus Torvalds 已提交
24

25 26 27 28 29 30
/*
 * The LpQueue is used to pass event data from the hypervisor to
 * the partition.  This is where I/O interrupt events are communicated.
 *
 * It is written to by the hypervisor so cannot end up in the BSS.
 */
31
struct hvlpevent_queue hvlpevent_queue __attribute__((__section__(".data")));
32

33 34 35
DEFINE_PER_CPU(unsigned long[HvLpEvent_Type_NumTypes], hvlpevent_counts);

static char *event_types[HvLpEvent_Type_NumTypes] = {
36 37 38 39 40 41 42 43 44
	"Hypervisor",
	"Machine Facilities",
	"Session Manager",
	"SPD I/O",
	"Virtual Bus",
	"PCI I/O",
	"RIO I/O",
	"Virtual Lan",
	"Virtual I/O"
45 46
};

L
Linus Torvalds 已提交
47
/* Array of LpEvent handler functions */
48 49
static LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
static unsigned lpEventHandlerPaths[HvLpEvent_Type_NumTypes];
L
Linus Torvalds 已提交
50

51
static struct HvLpEvent * get_next_hvlpevent(void)
L
Linus Torvalds 已提交
52
{
53 54 55 56
	struct HvLpEvent * event;
	event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr;

	if (event->xFlags.xValid) {
L
Linus Torvalds 已提交
57 58 59
		/* rmb() needed only for weakly consistent machines (regatta) */
		rmb();
		/* Set pointer to next potential event */
60 61 62
		hvlpevent_queue.xSlicCurEventPtr += ((event->xSizeMinus1 +
				LpEventAlign) / LpEventAlign) * LpEventAlign;

L
Linus Torvalds 已提交
63
		/* Wrap to beginning if no room at end */
64 65 66 67 68 69 70
		if (hvlpevent_queue.xSlicCurEventPtr >
				hvlpevent_queue.xSlicLastValidEventPtr) {
			hvlpevent_queue.xSlicCurEventPtr =
				hvlpevent_queue.xSlicEventStackPtr;
		}
	} else {
		event = NULL;
L
Linus Torvalds 已提交
71 72
	}

73
	return event;
L
Linus Torvalds 已提交
74 75
}

76
static unsigned long spread_lpevents = NR_CPUS;
77

78
int hvlpevent_is_pending(void)
L
Linus Torvalds 已提交
79
{
80 81 82 83 84
	struct HvLpEvent *next_event;

	if (smp_processor_id() >= spread_lpevents)
		return 0;

85
	next_event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr;
86 87 88

	return next_event->xFlags.xValid |
		hvlpevent_queue.xPlicOverflowIntPending;
L
Linus Torvalds 已提交
89 90
}

91
static void hvlpevent_clear_valid(struct HvLpEvent * event)
L
Linus Torvalds 已提交
92
{
93 94 95
	/* Tell the Hypervisor that we're done with this event.
	 * Also clear bits within this event that might look like valid bits.
	 * ie. on 64-byte boundaries.
96
	 */
97
	struct HvLpEvent *tmp;
98 99
	unsigned extra = ((event->xSizeMinus1 + LpEventAlign) /
						 LpEventAlign) - 1;
100

101 102
	switch (extra) {
	case 3:
103 104
		tmp = (struct HvLpEvent*)((char*)event + 3 * LpEventAlign);
		tmp->xFlags.xValid = 0;
105
	case 2:
106 107
		tmp = (struct HvLpEvent*)((char*)event + 2 * LpEventAlign);
		tmp->xFlags.xValid = 0;
108
	case 1:
109 110
		tmp = (struct HvLpEvent*)((char*)event + 1 * LpEventAlign);
		tmp->xFlags.xValid = 0;
L
Linus Torvalds 已提交
111
	}
112

L
Linus Torvalds 已提交
113
	mb();
114

L
Linus Torvalds 已提交
115 116 117
	event->xFlags.xValid = 0;
}

118
void process_hvlpevents(struct pt_regs *regs)
L
Linus Torvalds 已提交
119
{
120
	struct HvLpEvent * event;
L
Linus Torvalds 已提交
121 122

	/* If we have recursed, just return */
123
	if (!spin_trylock(&hvlpevent_queue.lock))
124
		return;
125

L
Linus Torvalds 已提交
126
	for (;;) {
127 128
		event = get_next_hvlpevent();
		if (event) {
129
			/* Call appropriate handler here, passing
L
Linus Torvalds 已提交
130 131 132 133
			 * a pointer to the LpEvent.  The handler
			 * must make a copy of the LpEvent if it
			 * needs it in a bottom half. (perhaps for
			 * an ACK)
134 135
			 *
			 *  Handlers are responsible for ACK processing
L
Linus Torvalds 已提交
136 137 138 139 140
			 *
			 * The Hypervisor guarantees that LpEvents will
			 * only be delivered with types that we have
			 * registered for, so no type check is necessary
			 * here!
141
			 */
142 143 144 145 146
			if (event->xType < HvLpEvent_Type_NumTypes)
				__get_cpu_var(hvlpevent_counts)[event->xType]++;
			if (event->xType < HvLpEvent_Type_NumTypes &&
					lpEventHandler[event->xType])
				lpEventHandler[event->xType](event, regs);
L
Linus Torvalds 已提交
147
			else
148
				printk(KERN_INFO "Unexpected Lp Event type=%d\n", event->xType );
149

150
			hvlpevent_clear_valid(event);
151
		} else if (hvlpevent_queue.xPlicOverflowIntPending)
L
Linus Torvalds 已提交
152 153 154 155
			/*
			 * No more valid events. If overflow events are
			 * pending process them
			 */
156
			HvCallEvent_getOverflowLpEvents(hvlpevent_queue.xIndex);
L
Linus Torvalds 已提交
157 158 159 160
		else
			break;
	}

161
	spin_unlock(&hvlpevent_queue.lock);
L
Linus Torvalds 已提交
162
}
163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182

static int set_spread_lpevents(char *str)
{
	unsigned long val = simple_strtoul(str, NULL, 0);

	/*
	 * The parameter is the number of processors to share in processing
	 * lp events.
	 */
	if (( val > 0) && (val <= NR_CPUS)) {
		spread_lpevents = val;
		printk("lpevent processing spread over %ld processors\n", val);
	} else {
		printk("invalid spread_lpevents %ld\n", val);
	}

	return 1;
}
__setup("spread_lpevents=", set_spread_lpevents);

183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
void setup_hvlpevent_queue(void)
{
	void *eventStack;

	/*
	 * Allocate a page for the Event Stack. The Hypervisor needs the
	 * absolute real address, so we subtract out the KERNELBASE and add
	 * in the absolute real address of the kernel load area.
	 */
	eventStack = alloc_bootmem_pages(LpEventStackSize);
	memset(eventStack, 0, LpEventStackSize);

	/* Invoke the hypervisor to initialize the event stack */
	HvCallEvent_setLpEventStack(0, eventStack, LpEventStackSize);

198 199 200
	hvlpevent_queue.xSlicEventStackPtr = (char *)eventStack;
	hvlpevent_queue.xSlicCurEventPtr = (char *)eventStack;
	hvlpevent_queue.xSlicLastValidEventPtr = (char *)eventStack +
201
					(LpEventStackSize - LpEventMaxSize);
202
	hvlpevent_queue.xIndex = 0;
203
}
204

205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
/* Register a handler for an LpEvent type */
int HvLpEvent_registerHandler(HvLpEvent_Type eventType, LpEventHandler handler)
{
	if (eventType < HvLpEvent_Type_NumTypes) {
		lpEventHandler[eventType] = handler;
		return 0;
	}
	return 1;
}
EXPORT_SYMBOL(HvLpEvent_registerHandler);

int HvLpEvent_unregisterHandler(HvLpEvent_Type eventType)
{
	might_sleep();

	if (eventType < HvLpEvent_Type_NumTypes) {
		if (!lpEventHandlerPaths[eventType]) {
			lpEventHandler[eventType] = NULL;
			/*
			 * We now sleep until all other CPUs have scheduled.
			 * This ensures that the deletion is seen by all
			 * other CPUs, and that the deleted handler isn't
			 * still running on another CPU when we return.
			 */
			synchronize_rcu();
			return 0;
		}
	}
	return 1;
}
EXPORT_SYMBOL(HvLpEvent_unregisterHandler);

/*
 * lpIndex is the partition index of the target partition.
 * needed only for VirtualIo, VirtualLan and SessionMgr.  Zero
 * indicates to use our partition index - for the other types.
 */
int HvLpEvent_openPath(HvLpEvent_Type eventType, HvLpIndex lpIndex)
{
	if ((eventType < HvLpEvent_Type_NumTypes) &&
			lpEventHandler[eventType]) {
		if (lpIndex == 0)
			lpIndex = itLpNaca.xLpIndex;
		HvCallEvent_openLpEventPath(lpIndex, eventType);
		++lpEventHandlerPaths[eventType];
		return 0;
	}
	return 1;
}

int HvLpEvent_closePath(HvLpEvent_Type eventType, HvLpIndex lpIndex)
{
	if ((eventType < HvLpEvent_Type_NumTypes) &&
			lpEventHandler[eventType] &&
			lpEventHandlerPaths[eventType]) {
		if (lpIndex == 0)
			lpIndex = itLpNaca.xLpIndex;
		HvCallEvent_closeLpEventPath(lpIndex, eventType);
		--lpEventHandlerPaths[eventType];
		return 0;
	}
	return 1;
}

269 270
static int proc_lpevents_show(struct seq_file *m, void *v)
{
271 272 273 274 275 276 277 278 279 280 281 282 283
	int cpu, i;
	unsigned long sum;
	static unsigned long cpu_totals[NR_CPUS];

	/* FIXME: do we care that there's no locking here? */
	sum = 0;
	for_each_online_cpu(cpu) {
		cpu_totals[cpu] = 0;
		for (i = 0; i < HvLpEvent_Type_NumTypes; i++) {
			cpu_totals[cpu] += per_cpu(hvlpevent_counts, cpu)[i];
		}
		sum += cpu_totals[cpu];
	}
284 285

	seq_printf(m, "LpEventQueue 0\n");
286
	seq_printf(m, "  events processed:\t%lu\n", sum);
287

288 289 290 291 292 293
	for (i = 0; i < HvLpEvent_Type_NumTypes; ++i) {
		sum = 0;
		for_each_online_cpu(cpu) {
			sum += per_cpu(hvlpevent_counts, cpu)[i];
		}

294
		seq_printf(m, "    %-20s %10lu\n", event_types[i], sum);
295
	}
296 297 298

	seq_printf(m, "\n  events processed by processor:\n");

299 300 301
	for_each_online_cpu(cpu) {
		seq_printf(m, "    CPU%02d  %10lu\n", cpu, cpu_totals[cpu]);
	}
302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329

	return 0;
}

static int proc_lpevents_open(struct inode *inode, struct file *file)
{
	return single_open(file, proc_lpevents_show, NULL);
}

static struct file_operations proc_lpevents_operations = {
	.open		= proc_lpevents_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};

static int __init proc_lpevents_init(void)
{
	struct proc_dir_entry *e;

	e = create_proc_entry("iSeries/lpevents", S_IFREG|S_IRUGO, NULL);
	if (e)
		e->proc_fops = &proc_lpevents_operations;

	return 0;
}
__initcall(proc_lpevents_init);