blob: 2a9f81ea27d6b0ab2bb8933ff7f20ada4c82d35b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (C) 2001 Mike Corrigan IBM Corporation
Michael Ellerman38fcdcfe2005-06-30 15:16:28 +10003 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/stddef.h>
11#include <linux/kernel.h>
12#include <linux/sched.h>
Michael Ellerman512d31d2005-06-30 15:08:27 +100013#include <linux/bootmem.h>
Michael Ellerman7b013282005-06-30 15:08:44 +100014#include <linux/seq_file.h>
15#include <linux/proc_fs.h>
Stephen Rothwellcabb5582005-09-30 16:16:52 +100016#include <linux/module.h>
17
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <asm/system.h>
19#include <asm/paca.h>
Kelly Daly8875ccf2005-11-02 14:13:34 +110020#include <asm/iseries/it_lp_queue.h>
Kelly Dalye45423e2005-11-02 12:08:31 +110021#include <asm/iseries/hv_lp_event.h>
Kelly Dalyc0a8d052005-11-02 11:11:11 +110022#include <asm/iseries/hv_call_event.h>
Kelly Dalyf218aab2005-11-02 13:51:41 +110023#include <asm/iseries/it_lp_naca.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
Michael Ellermanab354b62005-06-30 15:12:21 +100025/*
26 * The LpQueue is used to pass event data from the hypervisor to
27 * the partition. This is where I/O interrupt events are communicated.
28 *
29 * It is written to by the hypervisor so cannot end up in the BSS.
30 */
Michael Ellermana61874642005-06-30 15:15:32 +100031struct hvlpevent_queue hvlpevent_queue __attribute__((__section__(".data")));
Michael Ellermanab354b62005-06-30 15:12:21 +100032
Michael Ellermaned094152005-06-30 15:16:09 +100033DEFINE_PER_CPU(unsigned long[HvLpEvent_Type_NumTypes], hvlpevent_counts);
34
35static char *event_types[HvLpEvent_Type_NumTypes] = {
Michael Ellerman9b047022005-06-30 15:16:18 +100036 "Hypervisor",
37 "Machine Facilities",
38 "Session Manager",
39 "SPD I/O",
40 "Virtual Bus",
41 "PCI I/O",
42 "RIO I/O",
43 "Virtual Lan",
44 "Virtual I/O"
Michael Ellerman7b013282005-06-30 15:08:44 +100045};
46
Linus Torvalds1da177e2005-04-16 15:20:36 -070047/* Array of LpEvent handler functions */
Stephen Rothwell544cbba2005-09-28 02:18:47 +100048static LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
49static unsigned lpEventHandlerPaths[HvLpEvent_Type_NumTypes];
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Michael Ellerman937b31b2005-06-30 15:15:42 +100051static struct HvLpEvent * get_next_hvlpevent(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070052{
Michael Ellermanffe1b7e2005-06-30 15:16:48 +100053 struct HvLpEvent * event;
Stephen Rothwell612f02d2006-06-28 11:49:10 +100054 event = (struct HvLpEvent *)hvlpevent_queue.hq_current_event;
Michael Ellermanffe1b7e2005-06-30 15:16:48 +100055
Stephen Rothwell677f8c02006-01-12 13:47:43 +110056 if (hvlpevent_is_valid(event)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 /* rmb() needed only for weakly consistent machines (regatta) */
58 rmb();
59 /* Set pointer to next potential event */
Stephen Rothwell612f02d2006-06-28 11:49:10 +100060 hvlpevent_queue.hq_current_event += ((event->xSizeMinus1 +
61 IT_LP_EVENT_ALIGN) / IT_LP_EVENT_ALIGN) *
62 IT_LP_EVENT_ALIGN;
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Michael Ellermanffe1b7e2005-06-30 15:16:48 +100064 /* Wrap to beginning if no room at end */
Stephen Rothwell612f02d2006-06-28 11:49:10 +100065 if (hvlpevent_queue.hq_current_event >
66 hvlpevent_queue.hq_last_event) {
67 hvlpevent_queue.hq_current_event =
68 hvlpevent_queue.hq_event_stack;
Michael Ellermanffe1b7e2005-06-30 15:16:48 +100069 }
70 } else {
71 event = NULL;
72 }
73
74 return event;
Linus Torvalds1da177e2005-04-16 15:20:36 -070075}
76
Michael Ellerman0c885c12005-06-30 15:07:33 +100077static unsigned long spread_lpevents = NR_CPUS;
Michael Ellermanbea248f2005-06-30 15:07:09 +100078
Michael Ellerman937b31b2005-06-30 15:15:42 +100079int hvlpevent_is_pending(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070080{
Michael Ellermanbea248f2005-06-30 15:07:09 +100081 struct HvLpEvent *next_event;
82
83 if (smp_processor_id() >= spread_lpevents)
84 return 0;
85
Stephen Rothwell612f02d2006-06-28 11:49:10 +100086 next_event = (struct HvLpEvent *)hvlpevent_queue.hq_current_event;
Michael Ellermanffe1b7e2005-06-30 15:16:48 +100087
Stephen Rothwell677f8c02006-01-12 13:47:43 +110088 return hvlpevent_is_valid(next_event) ||
Stephen Rothwell612f02d2006-06-28 11:49:10 +100089 hvlpevent_queue.hq_overflow_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -070090}
91
Michael Ellerman38fcdcfe2005-06-30 15:16:28 +100092static void hvlpevent_clear_valid(struct HvLpEvent * event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070093{
Michael Ellermanffe1b7e2005-06-30 15:16:48 +100094 /* Tell the Hypervisor that we're done with this event.
95 * Also clear bits within this event that might look like valid bits.
96 * ie. on 64-byte boundaries.
Michael Ellerman38fcdcfe2005-06-30 15:16:28 +100097 */
Michael Ellermanffe1b7e2005-06-30 15:16:48 +100098 struct HvLpEvent *tmp;
Stephen Rothwell612f02d2006-06-28 11:49:10 +100099 unsigned extra = ((event->xSizeMinus1 + IT_LP_EVENT_ALIGN) /
100 IT_LP_EVENT_ALIGN) - 1;
Michael Ellermanffe1b7e2005-06-30 15:16:48 +1000101
Michael Ellerman38fcdcfe2005-06-30 15:16:28 +1000102 switch (extra) {
103 case 3:
Stephen Rothwell612f02d2006-06-28 11:49:10 +1000104 tmp = (struct HvLpEvent*)((char*)event + 3 * IT_LP_EVENT_ALIGN);
Stephen Rothwell677f8c02006-01-12 13:47:43 +1100105 hvlpevent_invalidate(tmp);
Michael Ellerman38fcdcfe2005-06-30 15:16:28 +1000106 case 2:
Stephen Rothwell612f02d2006-06-28 11:49:10 +1000107 tmp = (struct HvLpEvent*)((char*)event + 2 * IT_LP_EVENT_ALIGN);
Stephen Rothwell677f8c02006-01-12 13:47:43 +1100108 hvlpevent_invalidate(tmp);
Michael Ellerman38fcdcfe2005-06-30 15:16:28 +1000109 case 1:
Stephen Rothwell612f02d2006-06-28 11:49:10 +1000110 tmp = (struct HvLpEvent*)((char*)event + 1 * IT_LP_EVENT_ALIGN);
Stephen Rothwell677f8c02006-01-12 13:47:43 +1100111 hvlpevent_invalidate(tmp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 }
Michael Ellermanffe1b7e2005-06-30 15:16:48 +1000113
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 mb();
Michael Ellermanffe1b7e2005-06-30 15:16:48 +1000115
Stephen Rothwell677f8c02006-01-12 13:47:43 +1100116 hvlpevent_invalidate(event);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117}
118
Michael Ellerman74889802005-06-30 15:15:53 +1000119void process_hvlpevents(struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120{
Michael Ellermanffe1b7e2005-06-30 15:16:48 +1000121 struct HvLpEvent * event;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
123 /* If we have recursed, just return */
Stephen Rothwell612f02d2006-06-28 11:49:10 +1000124 if (!spin_trylock(&hvlpevent_queue.hq_lock))
Michael Ellerman74889802005-06-30 15:15:53 +1000125 return;
Michael Ellerman38fcdcfe2005-06-30 15:16:28 +1000126
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 for (;;) {
Michael Ellermanffe1b7e2005-06-30 15:16:48 +1000128 event = get_next_hvlpevent();
129 if (event) {
Michael Ellerman38fcdcfe2005-06-30 15:16:28 +1000130 /* Call appropriate handler here, passing
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 * a pointer to the LpEvent. The handler
132 * must make a copy of the LpEvent if it
133 * needs it in a bottom half. (perhaps for
134 * an ACK)
Michael Ellerman38fcdcfe2005-06-30 15:16:28 +1000135 *
136 * Handlers are responsible for ACK processing
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 *
138 * The Hypervisor guarantees that LpEvents will
139 * only be delivered with types that we have
140 * registered for, so no type check is necessary
141 * here!
Michael Ellerman38fcdcfe2005-06-30 15:16:28 +1000142 */
Michael Ellermanffe1b7e2005-06-30 15:16:48 +1000143 if (event->xType < HvLpEvent_Type_NumTypes)
144 __get_cpu_var(hvlpevent_counts)[event->xType]++;
145 if (event->xType < HvLpEvent_Type_NumTypes &&
146 lpEventHandler[event->xType])
147 lpEventHandler[event->xType](event, regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 else
Michael Ellermanffe1b7e2005-06-30 15:16:48 +1000149 printk(KERN_INFO "Unexpected Lp Event type=%d\n", event->xType );
Michael Ellerman38fcdcfe2005-06-30 15:16:28 +1000150
Michael Ellermanffe1b7e2005-06-30 15:16:48 +1000151 hvlpevent_clear_valid(event);
Stephen Rothwell612f02d2006-06-28 11:49:10 +1000152 } else if (hvlpevent_queue.hq_overflow_pending)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 /*
154 * No more valid events. If overflow events are
155 * pending process them
156 */
Stephen Rothwell612f02d2006-06-28 11:49:10 +1000157 HvCallEvent_getOverflowLpEvents(hvlpevent_queue.hq_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 else
159 break;
160 }
161
Stephen Rothwell612f02d2006-06-28 11:49:10 +1000162 spin_unlock(&hvlpevent_queue.hq_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163}
Michael Ellerman0c885c12005-06-30 15:07:33 +1000164
165static int set_spread_lpevents(char *str)
166{
167 unsigned long val = simple_strtoul(str, NULL, 0);
168
169 /*
170 * The parameter is the number of processors to share in processing
171 * lp events.
172 */
173 if (( val > 0) && (val <= NR_CPUS)) {
174 spread_lpevents = val;
175 printk("lpevent processing spread over %ld processors\n", val);
176 } else {
177 printk("invalid spread_lpevents %ld\n", val);
178 }
179
180 return 1;
181}
182__setup("spread_lpevents=", set_spread_lpevents);
183
Michael Ellerman512d31d2005-06-30 15:08:27 +1000184void setup_hvlpevent_queue(void)
185{
186 void *eventStack;
187
Stephen Rothwell612f02d2006-06-28 11:49:10 +1000188 spin_lock_init(&hvlpevent_queue.hq_lock);
Michael Ellermanbd6ef572006-02-20 19:07:31 +1100189
Stephen Rothwell7c7eb282005-10-24 15:21:52 +1000190 /* Allocate a page for the Event Stack. */
Stephen Rothwell612f02d2006-06-28 11:49:10 +1000191 eventStack = alloc_bootmem_pages(IT_LP_EVENT_STACK_SIZE);
192 memset(eventStack, 0, IT_LP_EVENT_STACK_SIZE);
Michael Ellerman512d31d2005-06-30 15:08:27 +1000193
194 /* Invoke the hypervisor to initialize the event stack */
Stephen Rothwell612f02d2006-06-28 11:49:10 +1000195 HvCallEvent_setLpEventStack(0, eventStack, IT_LP_EVENT_STACK_SIZE);
Michael Ellerman512d31d2005-06-30 15:08:27 +1000196
Stephen Rothwell612f02d2006-06-28 11:49:10 +1000197 hvlpevent_queue.hq_event_stack = eventStack;
198 hvlpevent_queue.hq_current_event = eventStack;
199 hvlpevent_queue.hq_last_event = (char *)eventStack +
200 (IT_LP_EVENT_STACK_SIZE - IT_LP_EVENT_MAX_SIZE);
201 hvlpevent_queue.hq_index = 0;
Michael Ellerman512d31d2005-06-30 15:08:27 +1000202}
Michael Ellerman7b013282005-06-30 15:08:44 +1000203
Stephen Rothwell544cbba2005-09-28 02:18:47 +1000204/* Register a handler for an LpEvent type */
205int HvLpEvent_registerHandler(HvLpEvent_Type eventType, LpEventHandler handler)
206{
207 if (eventType < HvLpEvent_Type_NumTypes) {
208 lpEventHandler[eventType] = handler;
209 return 0;
210 }
211 return 1;
212}
213EXPORT_SYMBOL(HvLpEvent_registerHandler);
214
215int HvLpEvent_unregisterHandler(HvLpEvent_Type eventType)
216{
217 might_sleep();
218
219 if (eventType < HvLpEvent_Type_NumTypes) {
220 if (!lpEventHandlerPaths[eventType]) {
221 lpEventHandler[eventType] = NULL;
222 /*
223 * We now sleep until all other CPUs have scheduled.
224 * This ensures that the deletion is seen by all
225 * other CPUs, and that the deleted handler isn't
226 * still running on another CPU when we return.
227 */
228 synchronize_rcu();
229 return 0;
230 }
231 }
232 return 1;
233}
234EXPORT_SYMBOL(HvLpEvent_unregisterHandler);
235
236/*
237 * lpIndex is the partition index of the target partition.
238 * needed only for VirtualIo, VirtualLan and SessionMgr. Zero
239 * indicates to use our partition index - for the other types.
240 */
241int HvLpEvent_openPath(HvLpEvent_Type eventType, HvLpIndex lpIndex)
242{
243 if ((eventType < HvLpEvent_Type_NumTypes) &&
244 lpEventHandler[eventType]) {
245 if (lpIndex == 0)
246 lpIndex = itLpNaca.xLpIndex;
247 HvCallEvent_openLpEventPath(lpIndex, eventType);
248 ++lpEventHandlerPaths[eventType];
249 return 0;
250 }
251 return 1;
252}
253
254int HvLpEvent_closePath(HvLpEvent_Type eventType, HvLpIndex lpIndex)
255{
256 if ((eventType < HvLpEvent_Type_NumTypes) &&
257 lpEventHandler[eventType] &&
258 lpEventHandlerPaths[eventType]) {
259 if (lpIndex == 0)
260 lpIndex = itLpNaca.xLpIndex;
261 HvCallEvent_closeLpEventPath(lpIndex, eventType);
262 --lpEventHandlerPaths[eventType];
263 return 0;
264 }
265 return 1;
266}
267
Michael Ellerman7b013282005-06-30 15:08:44 +1000268static int proc_lpevents_show(struct seq_file *m, void *v)
269{
Michael Ellermaned094152005-06-30 15:16:09 +1000270 int cpu, i;
271 unsigned long sum;
272 static unsigned long cpu_totals[NR_CPUS];
273
274 /* FIXME: do we care that there's no locking here? */
275 sum = 0;
276 for_each_online_cpu(cpu) {
277 cpu_totals[cpu] = 0;
278 for (i = 0; i < HvLpEvent_Type_NumTypes; i++) {
279 cpu_totals[cpu] += per_cpu(hvlpevent_counts, cpu)[i];
280 }
281 sum += cpu_totals[cpu];
282 }
Michael Ellerman7b013282005-06-30 15:08:44 +1000283
284 seq_printf(m, "LpEventQueue 0\n");
Michael Ellermaned094152005-06-30 15:16:09 +1000285 seq_printf(m, " events processed:\t%lu\n", sum);
Michael Ellerman7b013282005-06-30 15:08:44 +1000286
Michael Ellermaned094152005-06-30 15:16:09 +1000287 for (i = 0; i < HvLpEvent_Type_NumTypes; ++i) {
288 sum = 0;
289 for_each_online_cpu(cpu) {
290 sum += per_cpu(hvlpevent_counts, cpu)[i];
291 }
292
Michael Ellerman9b047022005-06-30 15:16:18 +1000293 seq_printf(m, " %-20s %10lu\n", event_types[i], sum);
Michael Ellermaned094152005-06-30 15:16:09 +1000294 }
Michael Ellerman7b013282005-06-30 15:08:44 +1000295
296 seq_printf(m, "\n events processed by processor:\n");
297
Michael Ellermaned094152005-06-30 15:16:09 +1000298 for_each_online_cpu(cpu) {
299 seq_printf(m, " CPU%02d %10lu\n", cpu, cpu_totals[cpu]);
300 }
Michael Ellerman7b013282005-06-30 15:08:44 +1000301
302 return 0;
303}
304
305static int proc_lpevents_open(struct inode *inode, struct file *file)
306{
307 return single_open(file, proc_lpevents_show, NULL);
308}
309
310static struct file_operations proc_lpevents_operations = {
311 .open = proc_lpevents_open,
312 .read = seq_read,
313 .llseek = seq_lseek,
314 .release = single_release,
315};
316
317static int __init proc_lpevents_init(void)
318{
319 struct proc_dir_entry *e;
320
321 e = create_proc_entry("iSeries/lpevents", S_IFREG|S_IRUGO, NULL);
322 if (e)
323 e->proc_fops = &proc_lpevents_operations;
324
325 return 0;
326}
327__initcall(proc_lpevents_init);
328