Path: blob/master/arch/powerpc/platforms/cell/spufs/backing_ops.c
26498 views
// SPDX-License-Identifier: GPL-2.0-or-later1/* backing_ops.c - query/set operations on saved SPU context.2*3* Copyright (C) IBM 20054* Author: Mark Nutter <[email protected]>5*6* These register operations allow SPUFS to operate on saved7* SPU contexts rather than hardware.8*/910#include <linux/errno.h>11#include <linux/sched.h>12#include <linux/kernel.h>13#include <linux/mm.h>14#include <linux/vmalloc.h>15#include <linux/smp.h>16#include <linux/stddef.h>17#include <linux/unistd.h>18#include <linux/poll.h>1920#include <asm/io.h>21#include <asm/spu.h>22#include <asm/spu_csa.h>23#include <asm/spu_info.h>24#include <asm/mmu_context.h>25#include "spufs.h"2627/*28* Reads/writes to various problem and priv2 registers require29* state changes, i.e. generate SPU events, modify channel30* counts, etc.31*/3233static void gen_spu_event(struct spu_context *ctx, u32 event)34{35u64 ch0_cnt;36u64 ch0_data;37u64 ch1_data;3839ch0_cnt = ctx->csa.spu_chnlcnt_RW[0];40ch0_data = ctx->csa.spu_chnldata_RW[0];41ch1_data = ctx->csa.spu_chnldata_RW[1];42ctx->csa.spu_chnldata_RW[0] |= event;43if ((ch0_cnt == 0) && !(ch0_data & event) && (ch1_data & event)) {44ctx->csa.spu_chnlcnt_RW[0] = 1;45}46}4748static int spu_backing_mbox_read(struct spu_context *ctx, u32 * data)49{50u32 mbox_stat;51int ret = 0;5253spin_lock(&ctx->csa.register_lock);54mbox_stat = ctx->csa.prob.mb_stat_R;55if (mbox_stat & 0x0000ff) {56/* Read the first available word.57* Implementation note: the depth58* of pu_mb_R is currently 1.59*/60*data = ctx->csa.prob.pu_mb_R;61ctx->csa.prob.mb_stat_R &= ~(0x0000ff);62ctx->csa.spu_chnlcnt_RW[28] = 1;63gen_spu_event(ctx, MFC_PU_MAILBOX_AVAILABLE_EVENT);64ret = 4;65}66spin_unlock(&ctx->csa.register_lock);67return ret;68}6970static u32 spu_backing_mbox_stat_read(struct spu_context *ctx)71{72return ctx->csa.prob.mb_stat_R;73}7475static __poll_t spu_backing_mbox_stat_poll(struct spu_context *ctx,76__poll_t events)77{78__poll_t ret;79u32 stat;8081ret = 0;82spin_lock_irq(&ctx->csa.register_lock);83stat = ctx->csa.prob.mb_stat_R;8485/* if the requested event is there, return the poll86mask, otherwise enable the interrupt to get notified,87but first mark any pending interrupts as done so88we don't get woken up unnecessarily */8990if (events & (EPOLLIN | EPOLLRDNORM)) {91if (stat & 0xff0000)92ret |= EPOLLIN | EPOLLRDNORM;93else {94ctx->csa.priv1.int_stat_class2_RW &=95~CLASS2_MAILBOX_INTR;96ctx->csa.priv1.int_mask_class2_RW |=97CLASS2_ENABLE_MAILBOX_INTR;98}99}100if (events & (EPOLLOUT | EPOLLWRNORM)) {101if (stat & 0x00ff00)102ret = EPOLLOUT | EPOLLWRNORM;103else {104ctx->csa.priv1.int_stat_class2_RW &=105~CLASS2_MAILBOX_THRESHOLD_INTR;106ctx->csa.priv1.int_mask_class2_RW |=107CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR;108}109}110spin_unlock_irq(&ctx->csa.register_lock);111return ret;112}113114static int spu_backing_ibox_read(struct spu_context *ctx, u32 * data)115{116int ret;117118spin_lock(&ctx->csa.register_lock);119if (ctx->csa.prob.mb_stat_R & 0xff0000) {120/* Read the first available word.121* Implementation note: the depth122* of puint_mb_R is currently 1.123*/124*data = ctx->csa.priv2.puint_mb_R;125ctx->csa.prob.mb_stat_R &= ~(0xff0000);126ctx->csa.spu_chnlcnt_RW[30] = 1;127gen_spu_event(ctx, MFC_PU_INT_MAILBOX_AVAILABLE_EVENT);128ret = 4;129} else {130/* make sure we get woken up by the interrupt */131ctx->csa.priv1.int_mask_class2_RW |= CLASS2_ENABLE_MAILBOX_INTR;132ret = 0;133}134spin_unlock(&ctx->csa.register_lock);135return ret;136}137138static int spu_backing_wbox_write(struct spu_context *ctx, u32 data)139{140int ret;141142spin_lock(&ctx->csa.register_lock);143if ((ctx->csa.prob.mb_stat_R) & 0x00ff00) {144int slot = ctx->csa.spu_chnlcnt_RW[29];145int avail = (ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8;146147/* We have space to write wbox_data.148* Implementation note: the depth149* of spu_mb_W is currently 4.150*/151BUG_ON(avail != (4 - slot));152ctx->csa.spu_mailbox_data[slot] = data;153ctx->csa.spu_chnlcnt_RW[29] = ++slot;154ctx->csa.prob.mb_stat_R &= ~(0x00ff00);155ctx->csa.prob.mb_stat_R |= (((4 - slot) & 0xff) << 8);156gen_spu_event(ctx, MFC_SPU_MAILBOX_WRITTEN_EVENT);157ret = 4;158} else {159/* make sure we get woken up by the interrupt when space160becomes available */161ctx->csa.priv1.int_mask_class2_RW |=162CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR;163ret = 0;164}165spin_unlock(&ctx->csa.register_lock);166return ret;167}168169static u32 spu_backing_signal1_read(struct spu_context *ctx)170{171return ctx->csa.spu_chnldata_RW[3];172}173174static void spu_backing_signal1_write(struct spu_context *ctx, u32 data)175{176spin_lock(&ctx->csa.register_lock);177if (ctx->csa.priv2.spu_cfg_RW & 0x1)178ctx->csa.spu_chnldata_RW[3] |= data;179else180ctx->csa.spu_chnldata_RW[3] = data;181ctx->csa.spu_chnlcnt_RW[3] = 1;182gen_spu_event(ctx, MFC_SIGNAL_1_EVENT);183spin_unlock(&ctx->csa.register_lock);184}185186static u32 spu_backing_signal2_read(struct spu_context *ctx)187{188return ctx->csa.spu_chnldata_RW[4];189}190191static void spu_backing_signal2_write(struct spu_context *ctx, u32 data)192{193spin_lock(&ctx->csa.register_lock);194if (ctx->csa.priv2.spu_cfg_RW & 0x2)195ctx->csa.spu_chnldata_RW[4] |= data;196else197ctx->csa.spu_chnldata_RW[4] = data;198ctx->csa.spu_chnlcnt_RW[4] = 1;199gen_spu_event(ctx, MFC_SIGNAL_2_EVENT);200spin_unlock(&ctx->csa.register_lock);201}202203static void spu_backing_signal1_type_set(struct spu_context *ctx, u64 val)204{205u64 tmp;206207spin_lock(&ctx->csa.register_lock);208tmp = ctx->csa.priv2.spu_cfg_RW;209if (val)210tmp |= 1;211else212tmp &= ~1;213ctx->csa.priv2.spu_cfg_RW = tmp;214spin_unlock(&ctx->csa.register_lock);215}216217static u64 spu_backing_signal1_type_get(struct spu_context *ctx)218{219return ((ctx->csa.priv2.spu_cfg_RW & 1) != 0);220}221222static void spu_backing_signal2_type_set(struct spu_context *ctx, u64 val)223{224u64 tmp;225226spin_lock(&ctx->csa.register_lock);227tmp = ctx->csa.priv2.spu_cfg_RW;228if (val)229tmp |= 2;230else231tmp &= ~2;232ctx->csa.priv2.spu_cfg_RW = tmp;233spin_unlock(&ctx->csa.register_lock);234}235236static u64 spu_backing_signal2_type_get(struct spu_context *ctx)237{238return ((ctx->csa.priv2.spu_cfg_RW & 2) != 0);239}240241static u32 spu_backing_npc_read(struct spu_context *ctx)242{243return ctx->csa.prob.spu_npc_RW;244}245246static void spu_backing_npc_write(struct spu_context *ctx, u32 val)247{248ctx->csa.prob.spu_npc_RW = val;249}250251static u32 spu_backing_status_read(struct spu_context *ctx)252{253return ctx->csa.prob.spu_status_R;254}255256static char *spu_backing_get_ls(struct spu_context *ctx)257{258return ctx->csa.lscsa->ls;259}260261static void spu_backing_privcntl_write(struct spu_context *ctx, u64 val)262{263ctx->csa.priv2.spu_privcntl_RW = val;264}265266static u32 spu_backing_runcntl_read(struct spu_context *ctx)267{268return ctx->csa.prob.spu_runcntl_RW;269}270271static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val)272{273spin_lock(&ctx->csa.register_lock);274ctx->csa.prob.spu_runcntl_RW = val;275if (val & SPU_RUNCNTL_RUNNABLE) {276ctx->csa.prob.spu_status_R &=277~SPU_STATUS_STOPPED_BY_STOP &278~SPU_STATUS_STOPPED_BY_HALT &279~SPU_STATUS_SINGLE_STEP &280~SPU_STATUS_INVALID_INSTR &281~SPU_STATUS_INVALID_CH;282ctx->csa.prob.spu_status_R |= SPU_STATUS_RUNNING;283} else {284ctx->csa.prob.spu_status_R &= ~SPU_STATUS_RUNNING;285}286spin_unlock(&ctx->csa.register_lock);287}288289static void spu_backing_runcntl_stop(struct spu_context *ctx)290{291spu_backing_runcntl_write(ctx, SPU_RUNCNTL_STOP);292}293294static void spu_backing_master_start(struct spu_context *ctx)295{296struct spu_state *csa = &ctx->csa;297u64 sr1;298299spin_lock(&csa->register_lock);300sr1 = csa->priv1.mfc_sr1_RW | MFC_STATE1_MASTER_RUN_CONTROL_MASK;301csa->priv1.mfc_sr1_RW = sr1;302spin_unlock(&csa->register_lock);303}304305static void spu_backing_master_stop(struct spu_context *ctx)306{307struct spu_state *csa = &ctx->csa;308u64 sr1;309310spin_lock(&csa->register_lock);311sr1 = csa->priv1.mfc_sr1_RW & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;312csa->priv1.mfc_sr1_RW = sr1;313spin_unlock(&csa->register_lock);314}315316static int spu_backing_set_mfc_query(struct spu_context * ctx, u32 mask,317u32 mode)318{319struct spu_problem_collapsed *prob = &ctx->csa.prob;320int ret;321322spin_lock(&ctx->csa.register_lock);323ret = -EAGAIN;324if (prob->dma_querytype_RW)325goto out;326ret = 0;327/* FIXME: what are the side-effects of this? */328prob->dma_querymask_RW = mask;329prob->dma_querytype_RW = mode;330/* In the current implementation, the SPU context is always331* acquired in runnable state when new bits are added to the332* mask (tagwait), so it's sufficient just to mask333* dma_tagstatus_R with the 'mask' parameter here.334*/335ctx->csa.prob.dma_tagstatus_R &= mask;336out:337spin_unlock(&ctx->csa.register_lock);338339return ret;340}341342static u32 spu_backing_read_mfc_tagstatus(struct spu_context * ctx)343{344return ctx->csa.prob.dma_tagstatus_R;345}346347static u32 spu_backing_get_mfc_free_elements(struct spu_context *ctx)348{349return ctx->csa.prob.dma_qstatus_R;350}351352static int spu_backing_send_mfc_command(struct spu_context *ctx,353struct mfc_dma_command *cmd)354{355int ret;356357spin_lock(&ctx->csa.register_lock);358ret = -EAGAIN;359/* FIXME: set up priv2->puq */360spin_unlock(&ctx->csa.register_lock);361362return ret;363}364365static void spu_backing_restart_dma(struct spu_context *ctx)366{367ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND;368}369370struct spu_context_ops spu_backing_ops = {371.mbox_read = spu_backing_mbox_read,372.mbox_stat_read = spu_backing_mbox_stat_read,373.mbox_stat_poll = spu_backing_mbox_stat_poll,374.ibox_read = spu_backing_ibox_read,375.wbox_write = spu_backing_wbox_write,376.signal1_read = spu_backing_signal1_read,377.signal1_write = spu_backing_signal1_write,378.signal2_read = spu_backing_signal2_read,379.signal2_write = spu_backing_signal2_write,380.signal1_type_set = spu_backing_signal1_type_set,381.signal1_type_get = spu_backing_signal1_type_get,382.signal2_type_set = spu_backing_signal2_type_set,383.signal2_type_get = spu_backing_signal2_type_get,384.npc_read = spu_backing_npc_read,385.npc_write = spu_backing_npc_write,386.status_read = spu_backing_status_read,387.get_ls = spu_backing_get_ls,388.privcntl_write = spu_backing_privcntl_write,389.runcntl_read = spu_backing_runcntl_read,390.runcntl_write = spu_backing_runcntl_write,391.runcntl_stop = spu_backing_runcntl_stop,392.master_start = spu_backing_master_start,393.master_stop = spu_backing_master_stop,394.set_mfc_query = spu_backing_set_mfc_query,395.read_mfc_tagstatus = spu_backing_read_mfc_tagstatus,396.get_mfc_free_elements = spu_backing_get_mfc_free_elements,397.send_mfc_command = spu_backing_send_mfc_command,398.restart_dma = spu_backing_restart_dma,399};400401402