// SPDX-License-Identifier: GPL-2.0-or-later1/*2*3* Copyright (C) IBM Corporation, 20114*5* Authors: Sukadev Bhattiprolu <[email protected]>6* Anton Blanchard <[email protected]>7*/8#include <linux/uaccess.h>9#include <linux/hardirq.h>10#include <asm/switch_to.h>1112int enter_vmx_usercopy(void)13{14if (in_interrupt())15return 0;1617preempt_disable();18/*19* We need to disable page faults as they can call schedule and20* thus make us lose the VMX context. So on page faults, we just21* fail which will cause a fallback to the normal non-vmx copy.22*/23pagefault_disable();2425enable_kernel_altivec();2627return 1;28}2930/*31* This function must return 0 because we tail call optimise when calling32* from __copy_tofrom_user_power7 which returns 0 on success.33*/34int exit_vmx_usercopy(void)35{36disable_kernel_altivec();37pagefault_enable();38preempt_enable_no_resched();39/*40* Must never explicitly call schedule (including preempt_enable())41* while in a kuap-unlocked user copy, because the AMR register will42* not be saved and restored across context switch. However preempt43* kernels need to be preempted as soon as possible if need_resched is44* set and we are preemptible. The hack here is to schedule a45* decrementer to fire here and reschedule for us if necessary.46*/47if (need_irq_preemption() && need_resched())48set_dec(1);49return 0;50}5152int enter_vmx_ops(void)53{54if (in_interrupt())55return 0;5657preempt_disable();5859enable_kernel_altivec();6061return 1;62}6364/*65* All calls to this function will be optimised into tail calls. We are66* passed a pointer to the destination which we return as required by a67* memcpy implementation.68*/69void *exit_vmx_ops(void *dest)70{71disable_kernel_altivec();72preempt_enable();73return dest;74}757677