Path: blob/master/arch/powerpc/platforms/cell/spufs/run.c
26498 views
// SPDX-License-Identifier: GPL-2.01#define DEBUG23#include <linux/wait.h>4#include <linux/ptrace.h>56#include <asm/spu.h>7#include <asm/spu_priv1.h>8#include <asm/io.h>9#include <asm/unistd.h>1011#include "spufs.h"1213/* interrupt-level stop callback function. */14void spufs_stop_callback(struct spu *spu, int irq)15{16struct spu_context *ctx = spu->ctx;1718/*19* It should be impossible to preempt a context while an exception20* is being processed, since the context switch code is specially21* coded to deal with interrupts ... But, just in case, sanity check22* the context pointer. It is OK to return doing nothing since23* the exception will be regenerated when the context is resumed.24*/25if (ctx) {26/* Copy exception arguments into module specific structure */27switch(irq) {28case 0 :29ctx->csa.class_0_pending = spu->class_0_pending;30ctx->csa.class_0_dar = spu->class_0_dar;31break;32case 1 :33ctx->csa.class_1_dsisr = spu->class_1_dsisr;34ctx->csa.class_1_dar = spu->class_1_dar;35break;36case 2 :37break;38}3940/* ensure that the exception status has hit memory before a41* thread waiting on the context's stop queue is woken */42smp_wmb();4344wake_up_all(&ctx->stop_wq);45}46}4748int spu_stopped(struct spu_context *ctx, u32 *stat)49{50u64 dsisr;51u32 stopped;5253stopped = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |54SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;5556top:57*stat = ctx->ops->status_read(ctx);58if (*stat & stopped) {59/*60* If the spu hasn't finished stopping, we need to61* re-read the register to get the stopped value.62*/63if (*stat & SPU_STATUS_RUNNING)64goto top;65return 1;66}6768if (test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags))69return 1;7071dsisr = ctx->csa.class_1_dsisr;72if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))73return 1;7475if (ctx->csa.class_0_pending)76return 1;7778return 0;79}8081static int spu_setup_isolated(struct spu_context *ctx)82{83int ret;84u64 __iomem *mfc_cntl;85u64 sr1;86u32 status;87unsigned long timeout;88const u32 status_loading = SPU_STATUS_RUNNING89| SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS;9091ret = -ENODEV;92if (!isolated_loader)93goto out;9495/*96* We need to exclude userspace access to the context.97*98* To protect against memory access we invalidate all ptes99* and make sure the pagefault handlers block on the mutex.100*/101spu_unmap_mappings(ctx);102103mfc_cntl = &ctx->spu->priv2->mfc_control_RW;104105/* purge the MFC DMA queue to ensure no spurious accesses before we106* enter kernel mode */107timeout = jiffies + HZ;108out_be64(mfc_cntl, MFC_CNTL_PURGE_DMA_REQUEST);109while ((in_be64(mfc_cntl) & MFC_CNTL_PURGE_DMA_STATUS_MASK)110!= MFC_CNTL_PURGE_DMA_COMPLETE) {111if (time_after(jiffies, timeout)) {112printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n",113__func__);114ret = -EIO;115goto out;116}117cond_resched();118}119120/* clear purge status */121out_be64(mfc_cntl, 0);122123/* put the SPE in kernel mode to allow access to the loader */124sr1 = spu_mfc_sr1_get(ctx->spu);125sr1 &= ~MFC_STATE1_PROBLEM_STATE_MASK;126spu_mfc_sr1_set(ctx->spu, sr1);127128/* start the loader */129ctx->ops->signal1_write(ctx, (unsigned long)isolated_loader >> 32);130ctx->ops->signal2_write(ctx,131(unsigned long)isolated_loader & 0xffffffff);132133ctx->ops->runcntl_write(ctx,134SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);135136ret = 0;137timeout = jiffies + HZ;138while (((status = ctx->ops->status_read(ctx)) & status_loading) ==139status_loading) {140if (time_after(jiffies, timeout)) {141printk(KERN_ERR "%s: timeout waiting for loader\n",142__func__);143ret = -EIO;144goto out_drop_priv;145}146cond_resched();147}148149if (!(status & SPU_STATUS_RUNNING)) {150/* If isolated LOAD has failed: run SPU, we will get a stop-and151* signal later. */152pr_debug("%s: isolated LOAD failed\n", __func__);153ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);154ret = -EACCES;155goto out_drop_priv;156}157158if (!(status & SPU_STATUS_ISOLATED_STATE)) {159/* This isn't allowed by the CBEA, but check anyway */160pr_debug("%s: SPU fell out of isolated mode?\n", __func__);161ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP);162ret = -EINVAL;163goto out_drop_priv;164}165166out_drop_priv:167/* Finished accessing the loader. Drop kernel mode */168sr1 |= MFC_STATE1_PROBLEM_STATE_MASK;169spu_mfc_sr1_set(ctx->spu, sr1);170171out:172return ret;173}174175static int spu_run_init(struct spu_context *ctx, u32 *npc)176{177unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;178int ret;179180spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);181182/*183* NOSCHED is synchronous scheduling with respect to the caller.184* The caller waits for the context to be loaded.185*/186if (ctx->flags & SPU_CREATE_NOSCHED) {187if (ctx->state == SPU_STATE_SAVED) {188ret = spu_activate(ctx, 0);189if (ret)190return ret;191}192}193194/*195* Apply special setup as required.196*/197if (ctx->flags & SPU_CREATE_ISOLATE) {198if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) {199ret = spu_setup_isolated(ctx);200if (ret)201return ret;202}203204/*205* If userspace has set the runcntrl register (eg, to206* issue an isolated exit), we need to re-set it here207*/208runcntl = ctx->ops->runcntl_read(ctx) &209(SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);210if (runcntl == 0)211runcntl = SPU_RUNCNTL_RUNNABLE;212} else {213unsigned long privcntl;214215if (test_thread_flag(TIF_SINGLESTEP))216privcntl = SPU_PRIVCNTL_MODE_SINGLE_STEP;217else218privcntl = SPU_PRIVCNTL_MODE_NORMAL;219220ctx->ops->privcntl_write(ctx, privcntl);221ctx->ops->npc_write(ctx, *npc);222}223224ctx->ops->runcntl_write(ctx, runcntl);225226if (ctx->flags & SPU_CREATE_NOSCHED) {227spuctx_switch_state(ctx, SPU_UTIL_USER);228} else {229230if (ctx->state == SPU_STATE_SAVED) {231ret = spu_activate(ctx, 0);232if (ret)233return ret;234} else {235spuctx_switch_state(ctx, SPU_UTIL_USER);236}237}238239set_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags);240return 0;241}242243static int spu_run_fini(struct spu_context *ctx, u32 *npc,244u32 *status)245{246int ret = 0;247248spu_del_from_rq(ctx);249250*status = ctx->ops->status_read(ctx);251*npc = ctx->ops->npc_read(ctx);252253spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);254clear_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags);255spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, *status);256spu_release(ctx);257258if (signal_pending(current))259ret = -ERESTARTSYS;260261return ret;262}263264/*265* SPU syscall restarting is tricky because we violate the basic266* assumption that the signal handler is running on the interrupted267* thread. Here instead, the handler runs on PowerPC user space code,268* while the syscall was called from the SPU.269* This means we can only do a very rough approximation of POSIX270* signal semantics.271*/272static int spu_handle_restartsys(struct spu_context *ctx, long *spu_ret,273unsigned int *npc)274{275int ret;276277switch (*spu_ret) {278case -ERESTARTSYS:279case -ERESTARTNOINTR:280/*281* Enter the regular syscall restarting for282* sys_spu_run, then restart the SPU syscall283* callback.284*/285*npc -= 8;286ret = -ERESTARTSYS;287break;288case -ERESTARTNOHAND:289case -ERESTART_RESTARTBLOCK:290/*291* Restart block is too hard for now, just return -EINTR292* to the SPU.293* ERESTARTNOHAND comes from sys_pause, we also return294* -EINTR from there.295* Assume that we need to be restarted ourselves though.296*/297*spu_ret = -EINTR;298ret = -ERESTARTSYS;299break;300default:301printk(KERN_WARNING "%s: unexpected return code %ld\n",302__func__, *spu_ret);303ret = 0;304}305return ret;306}307308static int spu_process_callback(struct spu_context *ctx)309{310struct spu_syscall_block s;311u32 ls_pointer, npc;312void __iomem *ls;313long spu_ret;314int ret;315316/* get syscall block from local store */317npc = ctx->ops->npc_read(ctx) & ~3;318ls = (void __iomem *)ctx->ops->get_ls(ctx);319ls_pointer = in_be32(ls + npc);320if (ls_pointer > (LS_SIZE - sizeof(s)))321return -EFAULT;322memcpy_fromio(&s, ls + ls_pointer, sizeof(s));323324/* do actual syscall without pinning the spu */325ret = 0;326spu_ret = -ENOSYS;327npc += 4;328329if (s.nr_ret < NR_syscalls) {330spu_release(ctx);331/* do actual system call from here */332spu_ret = spu_sys_callback(&s);333if (spu_ret <= -ERESTARTSYS) {334ret = spu_handle_restartsys(ctx, &spu_ret, &npc);335}336mutex_lock(&ctx->state_mutex);337if (ret == -ERESTARTSYS)338return ret;339}340341/* need to re-get the ls, as it may have changed when we released the342* spu */343ls = (void __iomem *)ctx->ops->get_ls(ctx);344345/* write result, jump over indirect pointer */346memcpy_toio(ls + ls_pointer, &spu_ret, sizeof(spu_ret));347ctx->ops->npc_write(ctx, npc);348ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);349return ret;350}351352long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)353{354int ret;355u32 status;356357if (mutex_lock_interruptible(&ctx->run_mutex))358return -ERESTARTSYS;359360ctx->event_return = 0;361362ret = spu_acquire(ctx);363if (ret)364goto out_unlock;365366spu_enable_spu(ctx);367368spu_update_sched_info(ctx);369370ret = spu_run_init(ctx, npc);371if (ret) {372spu_release(ctx);373goto out;374}375376do {377ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status));378if (unlikely(ret)) {379/*380* This is nasty: we need the state_mutex for all the381* bookkeeping even if the syscall was interrupted by382* a signal. ewww.383*/384mutex_lock(&ctx->state_mutex);385break;386}387if (unlikely(test_and_clear_bit(SPU_SCHED_NOTIFY_ACTIVE,388&ctx->sched_flags))) {389if (!(status & SPU_STATUS_STOPPED_BY_STOP))390continue;391}392393spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);394395if ((status & SPU_STATUS_STOPPED_BY_STOP) &&396(status >> SPU_STOP_STATUS_SHIFT == 0x2104)) {397ret = spu_process_callback(ctx);398if (ret)399break;400status &= ~SPU_STATUS_STOPPED_BY_STOP;401}402ret = spufs_handle_class1(ctx);403if (ret)404break;405406ret = spufs_handle_class0(ctx);407if (ret)408break;409410if (signal_pending(current))411ret = -ERESTARTSYS;412} while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |413SPU_STATUS_STOPPED_BY_HALT |414SPU_STATUS_SINGLE_STEP)));415416spu_disable_spu(ctx);417ret = spu_run_fini(ctx, npc, &status);418spu_yield(ctx);419420if ((status & SPU_STATUS_STOPPED_BY_STOP) &&421(((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100))422ctx->stats.libassist++;423424if ((ret == 0) ||425((ret == -ERESTARTSYS) &&426((status & SPU_STATUS_STOPPED_BY_HALT) ||427(status & SPU_STATUS_SINGLE_STEP) ||428((status & SPU_STATUS_STOPPED_BY_STOP) &&429(status >> SPU_STOP_STATUS_SHIFT != 0x2104)))))430ret = status;431432/* Note: we don't need to force_sig SIGTRAP on single-step433* since we have TIF_SINGLESTEP set, thus the kernel will do434* it upon return from the syscall anyway.435*/436if (unlikely(status & SPU_STATUS_SINGLE_STEP))437ret = -ERESTARTSYS;438439else if (unlikely((status & SPU_STATUS_STOPPED_BY_STOP)440&& (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff)) {441force_sig(SIGTRAP);442ret = -ERESTARTSYS;443}444445out:446*event = ctx->event_return;447out_unlock:448mutex_unlock(&ctx->run_mutex);449return ret;450}451452453