Path: blob/master/arch/powerpc/platforms/iseries/lpevents.c
10820 views
/*1* Copyright (C) 2001 Mike Corrigan IBM Corporation2*3* This program is free software; you can redistribute it and/or modify4* it under the terms of the GNU General Public License as published by5* the Free Software Foundation; either version 2 of the License, or6* (at your option) any later version.7*/89#include <linux/stddef.h>10#include <linux/kernel.h>11#include <linux/sched.h>12#include <linux/bootmem.h>13#include <linux/seq_file.h>14#include <linux/proc_fs.h>15#include <linux/module.h>1617#include <asm/system.h>18#include <asm/paca.h>19#include <asm/firmware.h>20#include <asm/iseries/it_lp_queue.h>21#include <asm/iseries/hv_lp_event.h>22#include <asm/iseries/hv_call_event.h>23#include "it_lp_naca.h"2425/*26* The LpQueue is used to pass event data from the hypervisor to27* the partition. This is where I/O interrupt events are communicated.28*29* It is written to by the hypervisor so cannot end up in the BSS.30*/31struct hvlpevent_queue hvlpevent_queue __attribute__((__section__(".data")));3233DEFINE_PER_CPU(unsigned long[HvLpEvent_Type_NumTypes], hvlpevent_counts);3435static char *event_types[HvLpEvent_Type_NumTypes] = {36"Hypervisor",37"Machine Facilities",38"Session Manager",39"SPD I/O",40"Virtual Bus",41"PCI I/O",42"RIO I/O",43"Virtual Lan",44"Virtual I/O"45};4647/* Array of LpEvent handler functions */48static LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];49static unsigned lpEventHandlerPaths[HvLpEvent_Type_NumTypes];5051static struct HvLpEvent * get_next_hvlpevent(void)52{53struct HvLpEvent * event;54event = (struct HvLpEvent *)hvlpevent_queue.hq_current_event;5556if (hvlpevent_is_valid(event)) {57/* rmb() needed only for weakly consistent machines (regatta) */58rmb();59/* Set pointer to next potential event */60hvlpevent_queue.hq_current_event += ((event->xSizeMinus1 +61IT_LP_EVENT_ALIGN) / IT_LP_EVENT_ALIGN) *62IT_LP_EVENT_ALIGN;6364/* Wrap to beginning if no room at end */65if (hvlpevent_queue.hq_current_event >66hvlpevent_queue.hq_last_event) {67hvlpevent_queue.hq_current_event =68hvlpevent_queue.hq_event_stack;69}70} else {71event = NULL;72}7374return event;75}7677static unsigned long spread_lpevents = NR_CPUS;7879int hvlpevent_is_pending(void)80{81struct HvLpEvent *next_event;8283if (smp_processor_id() >= spread_lpevents)84return 0;8586next_event = (struct HvLpEvent *)hvlpevent_queue.hq_current_event;8788return hvlpevent_is_valid(next_event) ||89hvlpevent_queue.hq_overflow_pending;90}9192static void hvlpevent_clear_valid(struct HvLpEvent * event)93{94/* Tell the Hypervisor that we're done with this event.95* Also clear bits within this event that might look like valid bits.96* ie. on 64-byte boundaries.97*/98struct HvLpEvent *tmp;99unsigned extra = ((event->xSizeMinus1 + IT_LP_EVENT_ALIGN) /100IT_LP_EVENT_ALIGN) - 1;101102switch (extra) {103case 3:104tmp = (struct HvLpEvent*)((char*)event + 3 * IT_LP_EVENT_ALIGN);105hvlpevent_invalidate(tmp);106case 2:107tmp = (struct HvLpEvent*)((char*)event + 2 * IT_LP_EVENT_ALIGN);108hvlpevent_invalidate(tmp);109case 1:110tmp = (struct HvLpEvent*)((char*)event + 1 * IT_LP_EVENT_ALIGN);111hvlpevent_invalidate(tmp);112}113114mb();115116hvlpevent_invalidate(event);117}118119void process_hvlpevents(void)120{121struct HvLpEvent * event;122123restart:124/* If we have recursed, just return */125if (!spin_trylock(&hvlpevent_queue.hq_lock))126return;127128for (;;) {129event = get_next_hvlpevent();130if (event) {131/* Call appropriate handler here, passing132* a pointer to the LpEvent. The handler133* must make a copy of the LpEvent if it134* needs it in a bottom half. (perhaps for135* an ACK)136*137* Handlers are responsible for ACK processing138*139* The Hypervisor guarantees that LpEvents will140* only be delivered with types that we have141* registered for, so no type check is necessary142* here!143*/144if (event->xType < HvLpEvent_Type_NumTypes)145__get_cpu_var(hvlpevent_counts)[event->xType]++;146if (event->xType < HvLpEvent_Type_NumTypes &&147lpEventHandler[event->xType])148lpEventHandler[event->xType](event);149else {150u8 type = event->xType;151152/*153* Don't printk in the spinlock as printk154* may require ack events form the HV to send155* any characters there.156*/157hvlpevent_clear_valid(event);158spin_unlock(&hvlpevent_queue.hq_lock);159printk(KERN_INFO160"Unexpected Lp Event type=%d\n", type);161goto restart;162}163164hvlpevent_clear_valid(event);165} else if (hvlpevent_queue.hq_overflow_pending)166/*167* No more valid events. If overflow events are168* pending process them169*/170HvCallEvent_getOverflowLpEvents(hvlpevent_queue.hq_index);171else172break;173}174175spin_unlock(&hvlpevent_queue.hq_lock);176}177178static int set_spread_lpevents(char *str)179{180unsigned long val = simple_strtoul(str, NULL, 0);181182/*183* The parameter is the number of processors to share in processing184* lp events.185*/186if (( val > 0) && (val <= NR_CPUS)) {187spread_lpevents = val;188printk("lpevent processing spread over %ld processors\n", val);189} else {190printk("invalid spread_lpevents %ld\n", val);191}192193return 1;194}195__setup("spread_lpevents=", set_spread_lpevents);196197void __init setup_hvlpevent_queue(void)198{199void *eventStack;200201spin_lock_init(&hvlpevent_queue.hq_lock);202203/* Allocate a page for the Event Stack. */204eventStack = alloc_bootmem_pages(IT_LP_EVENT_STACK_SIZE);205memset(eventStack, 0, IT_LP_EVENT_STACK_SIZE);206207/* Invoke the hypervisor to initialize the event stack */208HvCallEvent_setLpEventStack(0, eventStack, IT_LP_EVENT_STACK_SIZE);209210hvlpevent_queue.hq_event_stack = eventStack;211hvlpevent_queue.hq_current_event = eventStack;212hvlpevent_queue.hq_last_event = (char *)eventStack +213(IT_LP_EVENT_STACK_SIZE - IT_LP_EVENT_MAX_SIZE);214hvlpevent_queue.hq_index = 0;215}216217/* Register a handler for an LpEvent type */218int HvLpEvent_registerHandler(HvLpEvent_Type eventType, LpEventHandler handler)219{220if (eventType < HvLpEvent_Type_NumTypes) {221lpEventHandler[eventType] = handler;222return 0;223}224return 1;225}226EXPORT_SYMBOL(HvLpEvent_registerHandler);227228int HvLpEvent_unregisterHandler(HvLpEvent_Type eventType)229{230might_sleep();231232if (eventType < HvLpEvent_Type_NumTypes) {233if (!lpEventHandlerPaths[eventType]) {234lpEventHandler[eventType] = NULL;235/*236* We now sleep until all other CPUs have scheduled.237* This ensures that the deletion is seen by all238* other CPUs, and that the deleted handler isn't239* still running on another CPU when we return.240*/241synchronize_sched();242return 0;243}244}245return 1;246}247EXPORT_SYMBOL(HvLpEvent_unregisterHandler);248249/*250* lpIndex is the partition index of the target partition.251* needed only for VirtualIo, VirtualLan and SessionMgr. Zero252* indicates to use our partition index - for the other types.253*/254int HvLpEvent_openPath(HvLpEvent_Type eventType, HvLpIndex lpIndex)255{256if ((eventType < HvLpEvent_Type_NumTypes) &&257lpEventHandler[eventType]) {258if (lpIndex == 0)259lpIndex = itLpNaca.xLpIndex;260HvCallEvent_openLpEventPath(lpIndex, eventType);261++lpEventHandlerPaths[eventType];262return 0;263}264return 1;265}266267int HvLpEvent_closePath(HvLpEvent_Type eventType, HvLpIndex lpIndex)268{269if ((eventType < HvLpEvent_Type_NumTypes) &&270lpEventHandler[eventType] &&271lpEventHandlerPaths[eventType]) {272if (lpIndex == 0)273lpIndex = itLpNaca.xLpIndex;274HvCallEvent_closeLpEventPath(lpIndex, eventType);275--lpEventHandlerPaths[eventType];276return 0;277}278return 1;279}280281static int proc_lpevents_show(struct seq_file *m, void *v)282{283int cpu, i;284unsigned long sum;285static unsigned long cpu_totals[NR_CPUS];286287/* FIXME: do we care that there's no locking here? */288sum = 0;289for_each_online_cpu(cpu) {290cpu_totals[cpu] = 0;291for (i = 0; i < HvLpEvent_Type_NumTypes; i++) {292cpu_totals[cpu] += per_cpu(hvlpevent_counts, cpu)[i];293}294sum += cpu_totals[cpu];295}296297seq_printf(m, "LpEventQueue 0\n");298seq_printf(m, " events processed:\t%lu\n", sum);299300for (i = 0; i < HvLpEvent_Type_NumTypes; ++i) {301sum = 0;302for_each_online_cpu(cpu) {303sum += per_cpu(hvlpevent_counts, cpu)[i];304}305306seq_printf(m, " %-20s %10lu\n", event_types[i], sum);307}308309seq_printf(m, "\n events processed by processor:\n");310311for_each_online_cpu(cpu) {312seq_printf(m, " CPU%02d %10lu\n", cpu, cpu_totals[cpu]);313}314315return 0;316}317318static int proc_lpevents_open(struct inode *inode, struct file *file)319{320return single_open(file, proc_lpevents_show, NULL);321}322323static const struct file_operations proc_lpevents_operations = {324.open = proc_lpevents_open,325.read = seq_read,326.llseek = seq_lseek,327.release = single_release,328};329330static int __init proc_lpevents_init(void)331{332if (!firmware_has_feature(FW_FEATURE_ISERIES))333return 0;334335proc_create("iSeries/lpevents", S_IFREG|S_IRUGO, NULL,336&proc_lpevents_operations);337return 0;338}339__initcall(proc_lpevents_init);340341342343