#include <linux/cpuhotplug.h>
#include <linux/sched/task.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <asm/asm-extable.h>
#include <asm/asm-offsets.h>
#include <asm/pfault.h>
#include <asm/diag.h>
#define __SUBCODE_MASK 0x0600
#define __PF_RES_FIELD 0x8000000000000000UL
static int pfault_disable;
static int __init nopfault(char *str)
{
pfault_disable = 1;
return 1;
}
early_param("nopfault", nopfault);
struct pfault_refbk {
u16 refdiagc;
u16 reffcode;
u16 refdwlen;
u16 refversn;
u64 refgaddr;
u64 refselmk;
u64 refcmpmk;
u64 reserved;
};
static struct pfault_refbk pfault_init_refbk = {
.refdiagc = 0x258,
.reffcode = 0,
.refdwlen = 5,
.refversn = 2,
.refgaddr = __LC_LPP,
.refselmk = 1UL << 48,
.refcmpmk = 1UL << 48,
.reserved = __PF_RES_FIELD
};
int __pfault_init(void)
{
int rc = -EOPNOTSUPP;
if (pfault_disable)
return rc;
diag_stat_inc(DIAG_STAT_X258);
asm_inline volatile(
" diag %[refbk],%[rc],0x258\n"
"0: nopr %%r7\n"
EX_TABLE(0b, 0b)
: [rc] "+d" (rc)
: [refbk] "a" (&pfault_init_refbk), "m" (pfault_init_refbk)
: "cc");
return rc;
}
static struct pfault_refbk pfault_fini_refbk = {
.refdiagc = 0x258,
.reffcode = 1,
.refdwlen = 5,
.refversn = 2,
};
void __pfault_fini(void)
{
if (pfault_disable)
return;
diag_stat_inc(DIAG_STAT_X258);
asm_inline volatile(
" diag %[refbk],0,0x258\n"
"0: nopr %%r7\n"
EX_TABLE(0b, 0b)
:
: [refbk] "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk)
: "cc");
}
static DEFINE_SPINLOCK(pfault_lock);
static LIST_HEAD(pfault_list);
#define PF_COMPLETE 0x0080
static void pfault_interrupt(struct ext_code ext_code,
unsigned int param32, unsigned long param64)
{
struct task_struct *tsk;
__u16 subcode;
pid_t pid;
subcode = ext_code.subcode;
if ((subcode & 0xff00) != __SUBCODE_MASK)
return;
inc_irq_stat(IRQEXT_PFL);
pid = param64 & LPP_PID_MASK;
rcu_read_lock();
tsk = find_task_by_pid_ns(pid, &init_pid_ns);
if (tsk)
get_task_struct(tsk);
rcu_read_unlock();
if (!tsk)
return;
spin_lock(&pfault_lock);
if (subcode & PF_COMPLETE) {
if (tsk->thread.pfault_wait == 1) {
tsk->thread.pfault_wait = 0;
list_del(&tsk->thread.list);
wake_up_process(tsk);
put_task_struct(tsk);
} else {
if (task_is_running(tsk))
tsk->thread.pfault_wait = -1;
}
} else {
if (WARN_ON_ONCE(tsk != current))
goto out;
if (tsk->thread.pfault_wait == 1) {
goto block;
} else if (tsk->thread.pfault_wait == -1) {
tsk->thread.pfault_wait = 0;
} else {
get_task_struct(tsk);
tsk->thread.pfault_wait = 1;
list_add(&tsk->thread.list, &pfault_list);
block:
__set_current_state(TASK_UNINTERRUPTIBLE);
set_tsk_need_resched(tsk);
set_preempt_need_resched();
}
}
out:
spin_unlock(&pfault_lock);
put_task_struct(tsk);
}
static int pfault_cpu_dead(unsigned int cpu)
{
struct thread_struct *thread, *next;
struct task_struct *tsk;
spin_lock_irq(&pfault_lock);
list_for_each_entry_safe(thread, next, &pfault_list, list) {
thread->pfault_wait = 0;
list_del(&thread->list);
tsk = container_of(thread, struct task_struct, thread);
wake_up_process(tsk);
put_task_struct(tsk);
}
spin_unlock_irq(&pfault_lock);
return 0;
}
static int __init pfault_irq_init(void)
{
int rc;
rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
if (rc)
goto out_extint;
rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
if (rc)
goto out_pfault;
irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
NULL, pfault_cpu_dead);
return 0;
out_pfault:
unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
out_extint:
pfault_disable = 1;
return rc;
}
early_initcall(pfault_irq_init);