Path: blob/master/arch/powerpc/platforms/cell/spufs/switch.c
26498 views
// SPDX-License-Identifier: GPL-2.0-or-later1/*2* spu_switch.c3*4* (C) Copyright IBM Corp. 20055*6* Author: Mark Nutter <[email protected]>7*8* Host-side part of SPU context switch sequence outlined in9* Synergistic Processor Element, Book IV.10*11* A fully premptive switch of an SPE is very expensive in terms12* of time and system resources. SPE Book IV indicates that SPE13* allocation should follow a "serially reusable device" model,14* in which the SPE is assigned a task until it completes. When15* this is not possible, this sequence may be used to premptively16* save, and then later (optionally) restore the context of a17* program executing on an SPE.18*/1920#include <linux/export.h>21#include <linux/errno.h>22#include <linux/hardirq.h>23#include <linux/sched.h>24#include <linux/kernel.h>25#include <linux/mm.h>26#include <linux/vmalloc.h>27#include <linux/smp.h>28#include <linux/stddef.h>29#include <linux/unistd.h>3031#include <asm/io.h>32#include <asm/spu.h>33#include <asm/spu_priv1.h>34#include <asm/spu_csa.h>35#include <asm/mmu_context.h>3637#include "spufs.h"3839#include "spu_save_dump.h"40#include "spu_restore_dump.h"4142#if 043#define POLL_WHILE_TRUE(_c) { \44do { \45} while (_c); \46}47#else48#define RELAX_SPIN_COUNT 100049#define POLL_WHILE_TRUE(_c) { \50do { \51int _i; \52for (_i=0; _i<RELAX_SPIN_COUNT && (_c); _i++) { \53cpu_relax(); \54} \55if (unlikely(_c)) yield(); \56else break; \57} while (_c); \58}59#endif /* debug */6061#define POLL_WHILE_FALSE(_c) POLL_WHILE_TRUE(!(_c))6263static inline void acquire_spu_lock(struct spu *spu)64{65/* Save, Step 1:66* Restore, Step 1:67* Acquire SPU-specific mutual exclusion lock.68* TBD.69*/70}7172static inline void release_spu_lock(struct spu *spu)73{74/* Restore, Step 76:75* Release SPU-specific mutual exclusion lock.76* TBD.77*/78}7980static inline int check_spu_isolate(struct spu_state *csa, struct spu *spu)81{82struct spu_problem __iomem *prob = spu->problem;83u32 isolate_state;8485/* Save, Step 2:86* Save, Step 6:87* If SPU_Status[E,L,IS] any field is '1', this88* SPU is in isolate state and cannot be context89* saved at this time.90*/91isolate_state = SPU_STATUS_ISOLATED_STATE |92SPU_STATUS_ISOLATED_LOAD_STATUS | SPU_STATUS_ISOLATED_EXIT_STATUS;93return (in_be32(&prob->spu_status_R) & isolate_state) ? 1 : 0;94}9596static inline void disable_interrupts(struct spu_state *csa, struct spu *spu)97{98/* Save, Step 3:99* Restore, Step 2:100* Save INT_Mask_class0 in CSA.101* Write INT_MASK_class0 with value of 0.102* Save INT_Mask_class1 in CSA.103* Write INT_MASK_class1 with value of 0.104* Save INT_Mask_class2 in CSA.105* Write INT_MASK_class2 with value of 0.106* Synchronize all three interrupts to be sure107* we no longer execute a handler on another CPU.108*/109spin_lock_irq(&spu->register_lock);110if (csa) {111csa->priv1.int_mask_class0_RW = spu_int_mask_get(spu, 0);112csa->priv1.int_mask_class1_RW = spu_int_mask_get(spu, 1);113csa->priv1.int_mask_class2_RW = spu_int_mask_get(spu, 2);114}115spu_int_mask_set(spu, 0, 0ul);116spu_int_mask_set(spu, 1, 0ul);117spu_int_mask_set(spu, 2, 0ul);118eieio();119spin_unlock_irq(&spu->register_lock);120121/*122* This flag needs to be set before calling synchronize_irq so123* that the update will be visible to the relevant handlers124* via a simple load.125*/126set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);127clear_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags);128synchronize_irq(spu->irqs[0]);129synchronize_irq(spu->irqs[1]);130synchronize_irq(spu->irqs[2]);131}132133static inline void set_watchdog_timer(struct spu_state *csa, struct spu *spu)134{135/* Save, Step 4:136* Restore, Step 25.137* Set a software watchdog timer, which specifies the138* maximum allowable time for a context save sequence.139*140* For present, this implementation will not set a global141* watchdog timer, as virtualization & variable system load142* may cause unpredictable execution times.143*/144}145146static inline void inhibit_user_access(struct spu_state *csa, struct spu *spu)147{148/* Save, Step 5:149* Restore, Step 3:150* Inhibit user-space access (if provided) to this151* SPU by unmapping the virtual pages assigned to152* the SPU memory-mapped I/O (MMIO) for problem153* state. TBD.154*/155}156157static inline void set_switch_pending(struct spu_state *csa, struct spu *spu)158{159/* Save, Step 7:160* Restore, Step 5:161* Set a software context switch pending flag.162* Done above in Step 3 - disable_interrupts().163*/164}165166static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu)167{168struct spu_priv2 __iomem *priv2 = spu->priv2;169170/* Save, Step 8:171* Suspend DMA and save MFC_CNTL.172*/173switch (in_be64(&priv2->mfc_control_RW) &174MFC_CNTL_SUSPEND_DMA_STATUS_MASK) {175case MFC_CNTL_SUSPEND_IN_PROGRESS:176POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &177MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==178MFC_CNTL_SUSPEND_COMPLETE);179fallthrough;180case MFC_CNTL_SUSPEND_COMPLETE:181if (csa)182csa->priv2.mfc_control_RW =183in_be64(&priv2->mfc_control_RW) |184MFC_CNTL_SUSPEND_DMA_QUEUE;185break;186case MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION:187out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);188POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &189MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==190MFC_CNTL_SUSPEND_COMPLETE);191if (csa)192csa->priv2.mfc_control_RW =193in_be64(&priv2->mfc_control_RW) &194~MFC_CNTL_SUSPEND_DMA_QUEUE &195~MFC_CNTL_SUSPEND_MASK;196break;197}198}199200static inline void save_spu_runcntl(struct spu_state *csa, struct spu *spu)201{202struct spu_problem __iomem *prob = spu->problem;203204/* Save, Step 9:205* Save SPU_Runcntl in the CSA. This value contains206* the "Application Desired State".207*/208csa->prob.spu_runcntl_RW = in_be32(&prob->spu_runcntl_RW);209}210211static inline void save_mfc_sr1(struct spu_state *csa, struct spu *spu)212{213/* Save, Step 10:214* Save MFC_SR1 in the CSA.215*/216csa->priv1.mfc_sr1_RW = spu_mfc_sr1_get(spu);217}218219static inline void save_spu_status(struct spu_state *csa, struct spu *spu)220{221struct spu_problem __iomem *prob = spu->problem;222223/* Save, Step 11:224* Read SPU_Status[R], and save to CSA.225*/226if ((in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) == 0) {227csa->prob.spu_status_R = in_be32(&prob->spu_status_R);228} else {229u32 stopped;230231out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);232eieio();233POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &234SPU_STATUS_RUNNING);235stopped =236SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |237SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;238if ((in_be32(&prob->spu_status_R) & stopped) == 0)239csa->prob.spu_status_R = SPU_STATUS_RUNNING;240else241csa->prob.spu_status_R = in_be32(&prob->spu_status_R);242}243}244245static inline void save_mfc_stopped_status(struct spu_state *csa,246struct spu *spu)247{248struct spu_priv2 __iomem *priv2 = spu->priv2;249const u64 mask = MFC_CNTL_DECREMENTER_RUNNING |250MFC_CNTL_DMA_QUEUES_EMPTY;251252/* Save, Step 12:253* Read MFC_CNTL[Ds]. Update saved copy of254* CSA.MFC_CNTL[Ds].255*256* update: do the same with MFC_CNTL[Q].257*/258csa->priv2.mfc_control_RW &= ~mask;259csa->priv2.mfc_control_RW |= in_be64(&priv2->mfc_control_RW) & mask;260}261262static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu)263{264struct spu_priv2 __iomem *priv2 = spu->priv2;265266/* Save, Step 13:267* Write MFC_CNTL[Dh] set to a '1' to halt268* the decrementer.269*/270out_be64(&priv2->mfc_control_RW,271MFC_CNTL_DECREMENTER_HALTED | MFC_CNTL_SUSPEND_MASK);272eieio();273}274275static inline void save_timebase(struct spu_state *csa, struct spu *spu)276{277/* Save, Step 14:278* Read PPE Timebase High and Timebase low registers279* and save in CSA. TBD.280*/281csa->suspend_time = get_cycles();282}283284static inline void remove_other_spu_access(struct spu_state *csa,285struct spu *spu)286{287/* Save, Step 15:288* Remove other SPU access to this SPU by unmapping289* this SPU's pages from their address space. TBD.290*/291}292293static inline void do_mfc_mssync(struct spu_state *csa, struct spu *spu)294{295struct spu_problem __iomem *prob = spu->problem;296297/* Save, Step 16:298* Restore, Step 11.299* Write SPU_MSSync register. Poll SPU_MSSync[P]300* for a value of 0.301*/302out_be64(&prob->spc_mssync_RW, 1UL);303POLL_WHILE_TRUE(in_be64(&prob->spc_mssync_RW) & MS_SYNC_PENDING);304}305306static inline void issue_mfc_tlbie(struct spu_state *csa, struct spu *spu)307{308/* Save, Step 17:309* Restore, Step 12.310* Restore, Step 48.311* Write TLB_Invalidate_Entry[IS,VPN,L,Lp]=0 register.312* Then issue a PPE sync instruction.313*/314spu_tlb_invalidate(spu);315mb();316}317318static inline void handle_pending_interrupts(struct spu_state *csa,319struct spu *spu)320{321/* Save, Step 18:322* Handle any pending interrupts from this SPU323* here. This is OS or hypervisor specific. One324* option is to re-enable interrupts to handle any325* pending interrupts, with the interrupt handlers326* recognizing the software Context Switch Pending327* flag, to ensure the SPU execution or MFC command328* queue is not restarted. TBD.329*/330}331332static inline void save_mfc_queues(struct spu_state *csa, struct spu *spu)333{334struct spu_priv2 __iomem *priv2 = spu->priv2;335int i;336337/* Save, Step 19:338* If MFC_Cntl[Se]=0 then save339* MFC command queues.340*/341if ((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DMA_QUEUES_EMPTY) == 0) {342for (i = 0; i < 8; i++) {343csa->priv2.puq[i].mfc_cq_data0_RW =344in_be64(&priv2->puq[i].mfc_cq_data0_RW);345csa->priv2.puq[i].mfc_cq_data1_RW =346in_be64(&priv2->puq[i].mfc_cq_data1_RW);347csa->priv2.puq[i].mfc_cq_data2_RW =348in_be64(&priv2->puq[i].mfc_cq_data2_RW);349csa->priv2.puq[i].mfc_cq_data3_RW =350in_be64(&priv2->puq[i].mfc_cq_data3_RW);351}352for (i = 0; i < 16; i++) {353csa->priv2.spuq[i].mfc_cq_data0_RW =354in_be64(&priv2->spuq[i].mfc_cq_data0_RW);355csa->priv2.spuq[i].mfc_cq_data1_RW =356in_be64(&priv2->spuq[i].mfc_cq_data1_RW);357csa->priv2.spuq[i].mfc_cq_data2_RW =358in_be64(&priv2->spuq[i].mfc_cq_data2_RW);359csa->priv2.spuq[i].mfc_cq_data3_RW =360in_be64(&priv2->spuq[i].mfc_cq_data3_RW);361}362}363}364365static inline void save_ppu_querymask(struct spu_state *csa, struct spu *spu)366{367struct spu_problem __iomem *prob = spu->problem;368369/* Save, Step 20:370* Save the PPU_QueryMask register371* in the CSA.372*/373csa->prob.dma_querymask_RW = in_be32(&prob->dma_querymask_RW);374}375376static inline void save_ppu_querytype(struct spu_state *csa, struct spu *spu)377{378struct spu_problem __iomem *prob = spu->problem;379380/* Save, Step 21:381* Save the PPU_QueryType register382* in the CSA.383*/384csa->prob.dma_querytype_RW = in_be32(&prob->dma_querytype_RW);385}386387static inline void save_ppu_tagstatus(struct spu_state *csa, struct spu *spu)388{389struct spu_problem __iomem *prob = spu->problem;390391/* Save the Prxy_TagStatus register in the CSA.392*393* It is unnecessary to restore dma_tagstatus_R, however,394* dma_tagstatus_R in the CSA is accessed via backing_ops, so395* we must save it.396*/397csa->prob.dma_tagstatus_R = in_be32(&prob->dma_tagstatus_R);398}399400static inline void save_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)401{402struct spu_priv2 __iomem *priv2 = spu->priv2;403404/* Save, Step 22:405* Save the MFC_CSR_TSQ register406* in the LSCSA.407*/408csa->priv2.spu_tag_status_query_RW =409in_be64(&priv2->spu_tag_status_query_RW);410}411412static inline void save_mfc_csr_cmd(struct spu_state *csa, struct spu *spu)413{414struct spu_priv2 __iomem *priv2 = spu->priv2;415416/* Save, Step 23:417* Save the MFC_CSR_CMD1 and MFC_CSR_CMD2418* registers in the CSA.419*/420csa->priv2.spu_cmd_buf1_RW = in_be64(&priv2->spu_cmd_buf1_RW);421csa->priv2.spu_cmd_buf2_RW = in_be64(&priv2->spu_cmd_buf2_RW);422}423424static inline void save_mfc_csr_ato(struct spu_state *csa, struct spu *spu)425{426struct spu_priv2 __iomem *priv2 = spu->priv2;427428/* Save, Step 24:429* Save the MFC_CSR_ATO register in430* the CSA.431*/432csa->priv2.spu_atomic_status_RW = in_be64(&priv2->spu_atomic_status_RW);433}434435static inline void save_mfc_tclass_id(struct spu_state *csa, struct spu *spu)436{437/* Save, Step 25:438* Save the MFC_TCLASS_ID register in439* the CSA.440*/441csa->priv1.mfc_tclass_id_RW = spu_mfc_tclass_id_get(spu);442}443444static inline void set_mfc_tclass_id(struct spu_state *csa, struct spu *spu)445{446/* Save, Step 26:447* Restore, Step 23.448* Write the MFC_TCLASS_ID register with449* the value 0x10000000.450*/451spu_mfc_tclass_id_set(spu, 0x10000000);452eieio();453}454455static inline void purge_mfc_queue(struct spu_state *csa, struct spu *spu)456{457struct spu_priv2 __iomem *priv2 = spu->priv2;458459/* Save, Step 27:460* Restore, Step 14.461* Write MFC_CNTL[Pc]=1 (purge queue).462*/463out_be64(&priv2->mfc_control_RW,464MFC_CNTL_PURGE_DMA_REQUEST |465MFC_CNTL_SUSPEND_MASK);466eieio();467}468469static inline void wait_purge_complete(struct spu_state *csa, struct spu *spu)470{471struct spu_priv2 __iomem *priv2 = spu->priv2;472473/* Save, Step 28:474* Poll MFC_CNTL[Ps] until value '11' is read475* (purge complete).476*/477POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &478MFC_CNTL_PURGE_DMA_STATUS_MASK) ==479MFC_CNTL_PURGE_DMA_COMPLETE);480}481482static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu)483{484/* Save, Step 30:485* Restore, Step 18:486* Write MFC_SR1 with MFC_SR1[D=0,S=1] and487* MFC_SR1[TL,R,Pr,T] set correctly for the488* OS specific environment.489*490* Implementation note: The SPU-side code491* for save/restore is privileged, so the492* MFC_SR1[Pr] bit is not set.493*494*/495spu_mfc_sr1_set(spu, (MFC_STATE1_MASTER_RUN_CONTROL_MASK |496MFC_STATE1_RELOCATE_MASK |497MFC_STATE1_BUS_TLBIE_MASK));498}499500static inline void save_spu_npc(struct spu_state *csa, struct spu *spu)501{502struct spu_problem __iomem *prob = spu->problem;503504/* Save, Step 31:505* Save SPU_NPC in the CSA.506*/507csa->prob.spu_npc_RW = in_be32(&prob->spu_npc_RW);508}509510static inline void save_spu_privcntl(struct spu_state *csa, struct spu *spu)511{512struct spu_priv2 __iomem *priv2 = spu->priv2;513514/* Save, Step 32:515* Save SPU_PrivCntl in the CSA.516*/517csa->priv2.spu_privcntl_RW = in_be64(&priv2->spu_privcntl_RW);518}519520static inline void reset_spu_privcntl(struct spu_state *csa, struct spu *spu)521{522struct spu_priv2 __iomem *priv2 = spu->priv2;523524/* Save, Step 33:525* Restore, Step 16:526* Write SPU_PrivCntl[S,Le,A] fields reset to 0.527*/528out_be64(&priv2->spu_privcntl_RW, 0UL);529eieio();530}531532static inline void save_spu_lslr(struct spu_state *csa, struct spu *spu)533{534struct spu_priv2 __iomem *priv2 = spu->priv2;535536/* Save, Step 34:537* Save SPU_LSLR in the CSA.538*/539csa->priv2.spu_lslr_RW = in_be64(&priv2->spu_lslr_RW);540}541542static inline void reset_spu_lslr(struct spu_state *csa, struct spu *spu)543{544struct spu_priv2 __iomem *priv2 = spu->priv2;545546/* Save, Step 35:547* Restore, Step 17.548* Reset SPU_LSLR.549*/550out_be64(&priv2->spu_lslr_RW, LS_ADDR_MASK);551eieio();552}553554static inline void save_spu_cfg(struct spu_state *csa, struct spu *spu)555{556struct spu_priv2 __iomem *priv2 = spu->priv2;557558/* Save, Step 36:559* Save SPU_Cfg in the CSA.560*/561csa->priv2.spu_cfg_RW = in_be64(&priv2->spu_cfg_RW);562}563564static inline void save_pm_trace(struct spu_state *csa, struct spu *spu)565{566/* Save, Step 37:567* Save PM_Trace_Tag_Wait_Mask in the CSA.568* Not performed by this implementation.569*/570}571572static inline void save_mfc_rag(struct spu_state *csa, struct spu *spu)573{574/* Save, Step 38:575* Save RA_GROUP_ID register and the576* RA_ENABLE reigster in the CSA.577*/578csa->priv1.resource_allocation_groupID_RW =579spu_resource_allocation_groupID_get(spu);580csa->priv1.resource_allocation_enable_RW =581spu_resource_allocation_enable_get(spu);582}583584static inline void save_ppu_mb_stat(struct spu_state *csa, struct spu *spu)585{586struct spu_problem __iomem *prob = spu->problem;587588/* Save, Step 39:589* Save MB_Stat register in the CSA.590*/591csa->prob.mb_stat_R = in_be32(&prob->mb_stat_R);592}593594static inline void save_ppu_mb(struct spu_state *csa, struct spu *spu)595{596struct spu_problem __iomem *prob = spu->problem;597598/* Save, Step 40:599* Save the PPU_MB register in the CSA.600*/601csa->prob.pu_mb_R = in_be32(&prob->pu_mb_R);602}603604static inline void save_ppuint_mb(struct spu_state *csa, struct spu *spu)605{606struct spu_priv2 __iomem *priv2 = spu->priv2;607608/* Save, Step 41:609* Save the PPUINT_MB register in the CSA.610*/611csa->priv2.puint_mb_R = in_be64(&priv2->puint_mb_R);612}613614static inline void save_ch_part1(struct spu_state *csa, struct spu *spu)615{616struct spu_priv2 __iomem *priv2 = spu->priv2;617u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };618int i;619620/* Save, Step 42:621*/622623/* Save CH 1, without channel count */624out_be64(&priv2->spu_chnlcntptr_RW, 1);625csa->spu_chnldata_RW[1] = in_be64(&priv2->spu_chnldata_RW);626627/* Save the following CH: [0,3,4,24,25,27] */628for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {629idx = ch_indices[i];630out_be64(&priv2->spu_chnlcntptr_RW, idx);631eieio();632csa->spu_chnldata_RW[idx] = in_be64(&priv2->spu_chnldata_RW);633csa->spu_chnlcnt_RW[idx] = in_be64(&priv2->spu_chnlcnt_RW);634out_be64(&priv2->spu_chnldata_RW, 0UL);635out_be64(&priv2->spu_chnlcnt_RW, 0UL);636eieio();637}638}639640static inline void save_spu_mb(struct spu_state *csa, struct spu *spu)641{642struct spu_priv2 __iomem *priv2 = spu->priv2;643int i;644645/* Save, Step 43:646* Save SPU Read Mailbox Channel.647*/648out_be64(&priv2->spu_chnlcntptr_RW, 29UL);649eieio();650csa->spu_chnlcnt_RW[29] = in_be64(&priv2->spu_chnlcnt_RW);651for (i = 0; i < 4; i++) {652csa->spu_mailbox_data[i] = in_be64(&priv2->spu_chnldata_RW);653}654out_be64(&priv2->spu_chnlcnt_RW, 0UL);655eieio();656}657658static inline void save_mfc_cmd(struct spu_state *csa, struct spu *spu)659{660struct spu_priv2 __iomem *priv2 = spu->priv2;661662/* Save, Step 44:663* Save MFC_CMD Channel.664*/665out_be64(&priv2->spu_chnlcntptr_RW, 21UL);666eieio();667csa->spu_chnlcnt_RW[21] = in_be64(&priv2->spu_chnlcnt_RW);668eieio();669}670671static inline void reset_ch(struct spu_state *csa, struct spu *spu)672{673struct spu_priv2 __iomem *priv2 = spu->priv2;674u64 ch_indices[4] = { 21UL, 23UL, 28UL, 30UL };675u64 ch_counts[4] = { 16UL, 1UL, 1UL, 1UL };676u64 idx;677int i;678679/* Save, Step 45:680* Reset the following CH: [21, 23, 28, 30]681*/682for (i = 0; i < 4; i++) {683idx = ch_indices[i];684out_be64(&priv2->spu_chnlcntptr_RW, idx);685eieio();686out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);687eieio();688}689}690691static inline void resume_mfc_queue(struct spu_state *csa, struct spu *spu)692{693struct spu_priv2 __iomem *priv2 = spu->priv2;694695/* Save, Step 46:696* Restore, Step 25.697* Write MFC_CNTL[Sc]=0 (resume queue processing).698*/699out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE);700}701702static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu,703unsigned int *code, int code_size)704{705/* Save, Step 47:706* Restore, Step 30.707* If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All708* register, then initialize SLB_VSID and SLB_ESID709* to provide access to SPU context save code and710* LSCSA.711*712* This implementation places both the context713* switch code and LSCSA in kernel address space.714*715* Further this implementation assumes that the716* MFC_SR1[R]=1 (in other words, assume that717* translation is desired by OS environment).718*/719spu_invalidate_slbs(spu);720spu_setup_kernel_slbs(spu, csa->lscsa, code, code_size);721}722723static inline void set_switch_active(struct spu_state *csa, struct spu *spu)724{725/* Save, Step 48:726* Restore, Step 23.727* Change the software context switch pending flag728* to context switch active. This implementation does729* not uses a switch active flag.730*731* Now that we have saved the mfc in the csa, we can add in the732* restart command if an exception occurred.733*/734if (test_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags))735csa->priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND;736clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);737mb();738}739740static inline void enable_interrupts(struct spu_state *csa, struct spu *spu)741{742unsigned long class1_mask = CLASS1_ENABLE_SEGMENT_FAULT_INTR |743CLASS1_ENABLE_STORAGE_FAULT_INTR;744745/* Save, Step 49:746* Restore, Step 22:747* Reset and then enable interrupts, as748* needed by OS.749*750* This implementation enables only class1751* (translation) interrupts.752*/753spin_lock_irq(&spu->register_lock);754spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);755spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK);756spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);757spu_int_mask_set(spu, 0, 0ul);758spu_int_mask_set(spu, 1, class1_mask);759spu_int_mask_set(spu, 2, 0ul);760spin_unlock_irq(&spu->register_lock);761}762763static inline int send_mfc_dma(struct spu *spu, unsigned long ea,764unsigned int ls_offset, unsigned int size,765unsigned int tag, unsigned int rclass,766unsigned int cmd)767{768struct spu_problem __iomem *prob = spu->problem;769union mfc_tag_size_class_cmd command;770unsigned int transfer_size;771volatile unsigned int status = 0x0;772773while (size > 0) {774transfer_size =775(size > MFC_MAX_DMA_SIZE) ? MFC_MAX_DMA_SIZE : size;776command.u.mfc_size = transfer_size;777command.u.mfc_tag = tag;778command.u.mfc_rclassid = rclass;779command.u.mfc_cmd = cmd;780do {781out_be32(&prob->mfc_lsa_W, ls_offset);782out_be64(&prob->mfc_ea_W, ea);783out_be64(&prob->mfc_union_W.all64, command.all64);784status =785in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32);786if (unlikely(status & 0x2)) {787cpu_relax();788}789} while (status & 0x3);790size -= transfer_size;791ea += transfer_size;792ls_offset += transfer_size;793}794return 0;795}796797static inline void save_ls_16kb(struct spu_state *csa, struct spu *spu)798{799unsigned long addr = (unsigned long)&csa->lscsa->ls[0];800unsigned int ls_offset = 0x0;801unsigned int size = 16384;802unsigned int tag = 0;803unsigned int rclass = 0;804unsigned int cmd = MFC_PUT_CMD;805806/* Save, Step 50:807* Issue a DMA command to copy the first 16K bytes808* of local storage to the CSA.809*/810send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);811}812813static inline void set_spu_npc(struct spu_state *csa, struct spu *spu)814{815struct spu_problem __iomem *prob = spu->problem;816817/* Save, Step 51:818* Restore, Step 31.819* Write SPU_NPC[IE]=0 and SPU_NPC[LSA] to entry820* point address of context save code in local821* storage.822*823* This implementation uses SPU-side save/restore824* programs with entry points at LSA of 0.825*/826out_be32(&prob->spu_npc_RW, 0);827eieio();828}829830static inline void set_signot1(struct spu_state *csa, struct spu *spu)831{832struct spu_problem __iomem *prob = spu->problem;833union {834u64 ull;835u32 ui[2];836} addr64;837838/* Save, Step 52:839* Restore, Step 32:840* Write SPU_Sig_Notify_1 register with upper 32-bits841* of the CSA.LSCSA effective address.842*/843addr64.ull = (u64) csa->lscsa;844out_be32(&prob->signal_notify1, addr64.ui[0]);845eieio();846}847848static inline void set_signot2(struct spu_state *csa, struct spu *spu)849{850struct spu_problem __iomem *prob = spu->problem;851union {852u64 ull;853u32 ui[2];854} addr64;855856/* Save, Step 53:857* Restore, Step 33:858* Write SPU_Sig_Notify_2 register with lower 32-bits859* of the CSA.LSCSA effective address.860*/861addr64.ull = (u64) csa->lscsa;862out_be32(&prob->signal_notify2, addr64.ui[1]);863eieio();864}865866static inline void send_save_code(struct spu_state *csa, struct spu *spu)867{868unsigned long addr = (unsigned long)&spu_save_code[0];869unsigned int ls_offset = 0x0;870unsigned int size = sizeof(spu_save_code);871unsigned int tag = 0;872unsigned int rclass = 0;873unsigned int cmd = MFC_GETFS_CMD;874875/* Save, Step 54:876* Issue a DMA command to copy context save code877* to local storage and start SPU.878*/879send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);880}881882static inline void set_ppu_querymask(struct spu_state *csa, struct spu *spu)883{884struct spu_problem __iomem *prob = spu->problem;885886/* Save, Step 55:887* Restore, Step 38.888* Write PPU_QueryMask=1 (enable Tag Group 0)889* and issue eieio instruction.890*/891out_be32(&prob->dma_querymask_RW, MFC_TAGID_TO_TAGMASK(0));892eieio();893}894895static inline void wait_tag_complete(struct spu_state *csa, struct spu *spu)896{897struct spu_problem __iomem *prob = spu->problem;898u32 mask = MFC_TAGID_TO_TAGMASK(0);899unsigned long flags;900901/* Save, Step 56:902* Restore, Step 39.903* Restore, Step 39.904* Restore, Step 46.905* Poll PPU_TagStatus[gn] until 01 (Tag group 0 complete)906* or write PPU_QueryType[TS]=01 and wait for Tag Group907* Complete Interrupt. Write INT_Stat_Class0 or908* INT_Stat_Class2 with value of 'handled'.909*/910POLL_WHILE_FALSE(in_be32(&prob->dma_tagstatus_R) & mask);911912local_irq_save(flags);913spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);914spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);915local_irq_restore(flags);916}917918static inline void wait_spu_stopped(struct spu_state *csa, struct spu *spu)919{920struct spu_problem __iomem *prob = spu->problem;921unsigned long flags;922923/* Save, Step 57:924* Restore, Step 40.925* Poll until SPU_Status[R]=0 or wait for SPU Class 0926* or SPU Class 2 interrupt. Write INT_Stat_class0927* or INT_Stat_class2 with value of handled.928*/929POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);930931local_irq_save(flags);932spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);933spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);934local_irq_restore(flags);935}936937static inline int check_save_status(struct spu_state *csa, struct spu *spu)938{939struct spu_problem __iomem *prob = spu->problem;940u32 complete;941942/* Save, Step 54:943* If SPU_Status[P]=1 and SPU_Status[SC] = "success",944* context save succeeded, otherwise context save945* failed.946*/947complete = ((SPU_SAVE_COMPLETE << SPU_STOP_STATUS_SHIFT) |948SPU_STATUS_STOPPED_BY_STOP);949return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0;950}951952static inline void terminate_spu_app(struct spu_state *csa, struct spu *spu)953{954/* Restore, Step 4:955* If required, notify the "using application" that956* the SPU task has been terminated. TBD.957*/958}959960static inline void suspend_mfc_and_halt_decr(struct spu_state *csa,961struct spu *spu)962{963struct spu_priv2 __iomem *priv2 = spu->priv2;964965/* Restore, Step 7:966* Write MFC_Cntl[Dh,Sc,Sm]='1','1','0' to suspend967* the queue and halt the decrementer.968*/969out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE |970MFC_CNTL_DECREMENTER_HALTED);971eieio();972}973974static inline void wait_suspend_mfc_complete(struct spu_state *csa,975struct spu *spu)976{977struct spu_priv2 __iomem *priv2 = spu->priv2;978979/* Restore, Step 8:980* Restore, Step 47.981* Poll MFC_CNTL[Ss] until 11 is returned.982*/983POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &984MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==985MFC_CNTL_SUSPEND_COMPLETE);986}987988static inline int suspend_spe(struct spu_state *csa, struct spu *spu)989{990struct spu_problem __iomem *prob = spu->problem;991992/* Restore, Step 9:993* If SPU_Status[R]=1, stop SPU execution994* and wait for stop to complete.995*996* Returns 1 if SPU_Status[R]=1 on entry.997* 0 otherwise998*/999if (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) {1000if (in_be32(&prob->spu_status_R) &1001SPU_STATUS_ISOLATED_EXIT_STATUS) {1002POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &1003SPU_STATUS_RUNNING);1004}1005if ((in_be32(&prob->spu_status_R) &1006SPU_STATUS_ISOLATED_LOAD_STATUS)1007|| (in_be32(&prob->spu_status_R) &1008SPU_STATUS_ISOLATED_STATE)) {1009out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);1010eieio();1011POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &1012SPU_STATUS_RUNNING);1013out_be32(&prob->spu_runcntl_RW, 0x2);1014eieio();1015POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &1016SPU_STATUS_RUNNING);1017}1018if (in_be32(&prob->spu_status_R) &1019SPU_STATUS_WAITING_FOR_CHANNEL) {1020out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);1021eieio();1022POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &1023SPU_STATUS_RUNNING);1024}1025return 1;1026}1027return 0;1028}10291030static inline void clear_spu_status(struct spu_state *csa, struct spu *spu)1031{1032struct spu_problem __iomem *prob = spu->problem;10331034/* Restore, Step 10:1035* If SPU_Status[R]=0 and SPU_Status[E,L,IS]=1,1036* release SPU from isolate state.1037*/1038if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) {1039if (in_be32(&prob->spu_status_R) &1040SPU_STATUS_ISOLATED_EXIT_STATUS) {1041spu_mfc_sr1_set(spu,1042MFC_STATE1_MASTER_RUN_CONTROL_MASK);1043eieio();1044out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);1045eieio();1046POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &1047SPU_STATUS_RUNNING);1048}1049if ((in_be32(&prob->spu_status_R) &1050SPU_STATUS_ISOLATED_LOAD_STATUS)1051|| (in_be32(&prob->spu_status_R) &1052SPU_STATUS_ISOLATED_STATE)) {1053spu_mfc_sr1_set(spu,1054MFC_STATE1_MASTER_RUN_CONTROL_MASK);1055eieio();1056out_be32(&prob->spu_runcntl_RW, 0x2);1057eieio();1058POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &1059SPU_STATUS_RUNNING);1060}1061}1062}10631064static inline void reset_ch_part1(struct spu_state *csa, struct spu *spu)1065{1066struct spu_priv2 __iomem *priv2 = spu->priv2;1067u64 ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };1068u64 idx;1069int i;10701071/* Restore, Step 20:1072*/10731074/* Reset CH 1 */1075out_be64(&priv2->spu_chnlcntptr_RW, 1);1076out_be64(&priv2->spu_chnldata_RW, 0UL);10771078/* Reset the following CH: [0,3,4,24,25,27] */1079for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {1080idx = ch_indices[i];1081out_be64(&priv2->spu_chnlcntptr_RW, idx);1082eieio();1083out_be64(&priv2->spu_chnldata_RW, 0UL);1084out_be64(&priv2->spu_chnlcnt_RW, 0UL);1085eieio();1086}1087}10881089static inline void reset_ch_part2(struct spu_state *csa, struct spu *spu)1090{1091struct spu_priv2 __iomem *priv2 = spu->priv2;1092u64 ch_indices[5] = { 21UL, 23UL, 28UL, 29UL, 30UL };1093u64 ch_counts[5] = { 16UL, 1UL, 1UL, 0UL, 1UL };1094u64 idx;1095int i;10961097/* Restore, Step 21:1098* Reset the following CH: [21, 23, 28, 29, 30]1099*/1100for (i = 0; i < 5; i++) {1101idx = ch_indices[i];1102out_be64(&priv2->spu_chnlcntptr_RW, idx);1103eieio();1104out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);1105eieio();1106}1107}11081109static inline void setup_spu_status_part1(struct spu_state *csa,1110struct spu *spu)1111{1112u32 status_P = SPU_STATUS_STOPPED_BY_STOP;1113u32 status_I = SPU_STATUS_INVALID_INSTR;1114u32 status_H = SPU_STATUS_STOPPED_BY_HALT;1115u32 status_S = SPU_STATUS_SINGLE_STEP;1116u32 status_S_I = SPU_STATUS_SINGLE_STEP | SPU_STATUS_INVALID_INSTR;1117u32 status_S_P = SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_STOP;1118u32 status_P_H = SPU_STATUS_STOPPED_BY_HALT |SPU_STATUS_STOPPED_BY_STOP;1119u32 status_P_I = SPU_STATUS_STOPPED_BY_STOP |SPU_STATUS_INVALID_INSTR;1120u32 status_code;11211122/* Restore, Step 27:1123* If the CSA.SPU_Status[I,S,H,P]=1 then add the correct1124* instruction sequence to the end of the SPU based restore1125* code (after the "context restored" stop and signal) to1126* restore the correct SPU status.1127*1128* NOTE: Rather than modifying the SPU executable, we1129* instead add a new 'stopped_status' field to the1130* LSCSA. The SPU-side restore reads this field and1131* takes the appropriate action when exiting.1132*/11331134status_code =1135(csa->prob.spu_status_R >> SPU_STOP_STATUS_SHIFT) & 0xFFFF;1136if ((csa->prob.spu_status_R & status_P_I) == status_P_I) {11371138/* SPU_Status[P,I]=1 - Illegal Instruction followed1139* by Stop and Signal instruction, followed by 'br -4'.1140*1141*/1142csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_I;1143csa->lscsa->stopped_status.slot[1] = status_code;11441145} else if ((csa->prob.spu_status_R & status_P_H) == status_P_H) {11461147/* SPU_Status[P,H]=1 - Halt Conditional, followed1148* by Stop and Signal instruction, followed by1149* 'br -4'.1150*/1151csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_H;1152csa->lscsa->stopped_status.slot[1] = status_code;11531154} else if ((csa->prob.spu_status_R & status_S_P) == status_S_P) {11551156/* SPU_Status[S,P]=1 - Stop and Signal instruction1157* followed by 'br -4'.1158*/1159csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_P;1160csa->lscsa->stopped_status.slot[1] = status_code;11611162} else if ((csa->prob.spu_status_R & status_S_I) == status_S_I) {11631164/* SPU_Status[S,I]=1 - Illegal instruction followed1165* by 'br -4'.1166*/1167csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_I;1168csa->lscsa->stopped_status.slot[1] = status_code;11691170} else if ((csa->prob.spu_status_R & status_P) == status_P) {11711172/* SPU_Status[P]=1 - Stop and Signal instruction1173* followed by 'br -4'.1174*/1175csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P;1176csa->lscsa->stopped_status.slot[1] = status_code;11771178} else if ((csa->prob.spu_status_R & status_H) == status_H) {11791180/* SPU_Status[H]=1 - Halt Conditional, followed1181* by 'br -4'.1182*/1183csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_H;11841185} else if ((csa->prob.spu_status_R & status_S) == status_S) {11861187/* SPU_Status[S]=1 - Two nop instructions.1188*/1189csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S;11901191} else if ((csa->prob.spu_status_R & status_I) == status_I) {11921193/* SPU_Status[I]=1 - Illegal instruction followed1194* by 'br -4'.1195*/1196csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_I;11971198}1199}12001201static inline void setup_spu_status_part2(struct spu_state *csa,1202struct spu *spu)1203{1204u32 mask;12051206/* Restore, Step 28:1207* If the CSA.SPU_Status[I,S,H,P,R]=0 then1208* add a 'br *' instruction to the end of1209* the SPU based restore code.1210*1211* NOTE: Rather than modifying the SPU executable, we1212* instead add a new 'stopped_status' field to the1213* LSCSA. The SPU-side restore reads this field and1214* takes the appropriate action when exiting.1215*/1216mask = SPU_STATUS_INVALID_INSTR |1217SPU_STATUS_SINGLE_STEP |1218SPU_STATUS_STOPPED_BY_HALT |1219SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;1220if (!(csa->prob.spu_status_R & mask)) {1221csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_R;1222}1223}12241225static inline void restore_mfc_rag(struct spu_state *csa, struct spu *spu)1226{1227/* Restore, Step 29:1228* Restore RA_GROUP_ID register and the1229* RA_ENABLE reigster from the CSA.1230*/1231spu_resource_allocation_groupID_set(spu,1232csa->priv1.resource_allocation_groupID_RW);1233spu_resource_allocation_enable_set(spu,1234csa->priv1.resource_allocation_enable_RW);1235}12361237static inline void send_restore_code(struct spu_state *csa, struct spu *spu)1238{1239unsigned long addr = (unsigned long)&spu_restore_code[0];1240unsigned int ls_offset = 0x0;1241unsigned int size = sizeof(spu_restore_code);1242unsigned int tag = 0;1243unsigned int rclass = 0;1244unsigned int cmd = MFC_GETFS_CMD;12451246/* Restore, Step 37:1247* Issue MFC DMA command to copy context1248* restore code to local storage.1249*/1250send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);1251}12521253static inline void setup_decr(struct spu_state *csa, struct spu *spu)1254{1255/* Restore, Step 34:1256* If CSA.MFC_CNTL[Ds]=1 (decrementer was1257* running) then adjust decrementer, set1258* decrementer running status in LSCSA,1259* and set decrementer "wrapped" status1260* in LSCSA.1261*/1262if (csa->priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) {1263cycles_t resume_time = get_cycles();1264cycles_t delta_time = resume_time - csa->suspend_time;12651266csa->lscsa->decr_status.slot[0] = SPU_DECR_STATUS_RUNNING;1267if (csa->lscsa->decr.slot[0] < delta_time) {1268csa->lscsa->decr_status.slot[0] |=1269SPU_DECR_STATUS_WRAPPED;1270}12711272csa->lscsa->decr.slot[0] -= delta_time;1273} else {1274csa->lscsa->decr_status.slot[0] = 0;1275}1276}12771278static inline void setup_ppu_mb(struct spu_state *csa, struct spu *spu)1279{1280/* Restore, Step 35:1281* Copy the CSA.PU_MB data into the LSCSA.1282*/1283csa->lscsa->ppu_mb.slot[0] = csa->prob.pu_mb_R;1284}12851286static inline void setup_ppuint_mb(struct spu_state *csa, struct spu *spu)1287{1288/* Restore, Step 36:1289* Copy the CSA.PUINT_MB data into the LSCSA.1290*/1291csa->lscsa->ppuint_mb.slot[0] = csa->priv2.puint_mb_R;1292}12931294static inline int check_restore_status(struct spu_state *csa, struct spu *spu)1295{1296struct spu_problem __iomem *prob = spu->problem;1297u32 complete;12981299/* Restore, Step 40:1300* If SPU_Status[P]=1 and SPU_Status[SC] = "success",1301* context restore succeeded, otherwise context restore1302* failed.1303*/1304complete = ((SPU_RESTORE_COMPLETE << SPU_STOP_STATUS_SHIFT) |1305SPU_STATUS_STOPPED_BY_STOP);1306return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0;1307}13081309static inline void restore_spu_privcntl(struct spu_state *csa, struct spu *spu)1310{1311struct spu_priv2 __iomem *priv2 = spu->priv2;13121313/* Restore, Step 41:1314* Restore SPU_PrivCntl from the CSA.1315*/1316out_be64(&priv2->spu_privcntl_RW, csa->priv2.spu_privcntl_RW);1317eieio();1318}13191320static inline void restore_status_part1(struct spu_state *csa, struct spu *spu)1321{1322struct spu_problem __iomem *prob = spu->problem;1323u32 mask;13241325/* Restore, Step 42:1326* If any CSA.SPU_Status[I,S,H,P]=1, then1327* restore the error or single step state.1328*/1329mask = SPU_STATUS_INVALID_INSTR |1330SPU_STATUS_SINGLE_STEP |1331SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;1332if (csa->prob.spu_status_R & mask) {1333out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);1334eieio();1335POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &1336SPU_STATUS_RUNNING);1337}1338}13391340static inline void restore_status_part2(struct spu_state *csa, struct spu *spu)1341{1342struct spu_problem __iomem *prob = spu->problem;1343u32 mask;13441345/* Restore, Step 43:1346* If all CSA.SPU_Status[I,S,H,P,R]=0 then write1347* SPU_RunCntl[R0R1]='01', wait for SPU_Status[R]=1,1348* then write '00' to SPU_RunCntl[R0R1] and wait1349* for SPU_Status[R]=0.1350*/1351mask = SPU_STATUS_INVALID_INSTR |1352SPU_STATUS_SINGLE_STEP |1353SPU_STATUS_STOPPED_BY_HALT |1354SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;1355if (!(csa->prob.spu_status_R & mask)) {1356out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);1357eieio();1358POLL_WHILE_FALSE(in_be32(&prob->spu_status_R) &1359SPU_STATUS_RUNNING);1360out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);1361eieio();1362POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &1363SPU_STATUS_RUNNING);1364}1365}13661367static inline void restore_ls_16kb(struct spu_state *csa, struct spu *spu)1368{1369unsigned long addr = (unsigned long)&csa->lscsa->ls[0];1370unsigned int ls_offset = 0x0;1371unsigned int size = 16384;1372unsigned int tag = 0;1373unsigned int rclass = 0;1374unsigned int cmd = MFC_GET_CMD;13751376/* Restore, Step 44:1377* Issue a DMA command to restore the first1378* 16kb of local storage from CSA.1379*/1380send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);1381}13821383static inline void suspend_mfc(struct spu_state *csa, struct spu *spu)1384{1385struct spu_priv2 __iomem *priv2 = spu->priv2;13861387/* Restore, Step 47.1388* Write MFC_Cntl[Sc,Sm]='1','0' to suspend1389* the queue.1390*/1391out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);1392eieio();1393}13941395static inline void clear_interrupts(struct spu_state *csa, struct spu *spu)1396{1397/* Restore, Step 49:1398* Write INT_MASK_class0 with value of 0.1399* Write INT_MASK_class1 with value of 0.1400* Write INT_MASK_class2 with value of 0.1401* Write INT_STAT_class0 with value of -1.1402* Write INT_STAT_class1 with value of -1.1403* Write INT_STAT_class2 with value of -1.1404*/1405spin_lock_irq(&spu->register_lock);1406spu_int_mask_set(spu, 0, 0ul);1407spu_int_mask_set(spu, 1, 0ul);1408spu_int_mask_set(spu, 2, 0ul);1409spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);1410spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK);1411spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);1412spin_unlock_irq(&spu->register_lock);1413}14141415static inline void restore_mfc_queues(struct spu_state *csa, struct spu *spu)1416{1417struct spu_priv2 __iomem *priv2 = spu->priv2;1418int i;14191420/* Restore, Step 50:1421* If MFC_Cntl[Se]!=0 then restore1422* MFC command queues.1423*/1424if ((csa->priv2.mfc_control_RW & MFC_CNTL_DMA_QUEUES_EMPTY_MASK) == 0) {1425for (i = 0; i < 8; i++) {1426out_be64(&priv2->puq[i].mfc_cq_data0_RW,1427csa->priv2.puq[i].mfc_cq_data0_RW);1428out_be64(&priv2->puq[i].mfc_cq_data1_RW,1429csa->priv2.puq[i].mfc_cq_data1_RW);1430out_be64(&priv2->puq[i].mfc_cq_data2_RW,1431csa->priv2.puq[i].mfc_cq_data2_RW);1432out_be64(&priv2->puq[i].mfc_cq_data3_RW,1433csa->priv2.puq[i].mfc_cq_data3_RW);1434}1435for (i = 0; i < 16; i++) {1436out_be64(&priv2->spuq[i].mfc_cq_data0_RW,1437csa->priv2.spuq[i].mfc_cq_data0_RW);1438out_be64(&priv2->spuq[i].mfc_cq_data1_RW,1439csa->priv2.spuq[i].mfc_cq_data1_RW);1440out_be64(&priv2->spuq[i].mfc_cq_data2_RW,1441csa->priv2.spuq[i].mfc_cq_data2_RW);1442out_be64(&priv2->spuq[i].mfc_cq_data3_RW,1443csa->priv2.spuq[i].mfc_cq_data3_RW);1444}1445}1446eieio();1447}14481449static inline void restore_ppu_querymask(struct spu_state *csa, struct spu *spu)1450{1451struct spu_problem __iomem *prob = spu->problem;14521453/* Restore, Step 51:1454* Restore the PPU_QueryMask register from CSA.1455*/1456out_be32(&prob->dma_querymask_RW, csa->prob.dma_querymask_RW);1457eieio();1458}14591460static inline void restore_ppu_querytype(struct spu_state *csa, struct spu *spu)1461{1462struct spu_problem __iomem *prob = spu->problem;14631464/* Restore, Step 52:1465* Restore the PPU_QueryType register from CSA.1466*/1467out_be32(&prob->dma_querytype_RW, csa->prob.dma_querytype_RW);1468eieio();1469}14701471static inline void restore_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)1472{1473struct spu_priv2 __iomem *priv2 = spu->priv2;14741475/* Restore, Step 53:1476* Restore the MFC_CSR_TSQ register from CSA.1477*/1478out_be64(&priv2->spu_tag_status_query_RW,1479csa->priv2.spu_tag_status_query_RW);1480eieio();1481}14821483static inline void restore_mfc_csr_cmd(struct spu_state *csa, struct spu *spu)1484{1485struct spu_priv2 __iomem *priv2 = spu->priv2;14861487/* Restore, Step 54:1488* Restore the MFC_CSR_CMD1 and MFC_CSR_CMD21489* registers from CSA.1490*/1491out_be64(&priv2->spu_cmd_buf1_RW, csa->priv2.spu_cmd_buf1_RW);1492out_be64(&priv2->spu_cmd_buf2_RW, csa->priv2.spu_cmd_buf2_RW);1493eieio();1494}14951496static inline void restore_mfc_csr_ato(struct spu_state *csa, struct spu *spu)1497{1498struct spu_priv2 __iomem *priv2 = spu->priv2;14991500/* Restore, Step 55:1501* Restore the MFC_CSR_ATO register from CSA.1502*/1503out_be64(&priv2->spu_atomic_status_RW, csa->priv2.spu_atomic_status_RW);1504}15051506static inline void restore_mfc_tclass_id(struct spu_state *csa, struct spu *spu)1507{1508/* Restore, Step 56:1509* Restore the MFC_TCLASS_ID register from CSA.1510*/1511spu_mfc_tclass_id_set(spu, csa->priv1.mfc_tclass_id_RW);1512eieio();1513}15141515static inline void set_llr_event(struct spu_state *csa, struct spu *spu)1516{1517u64 ch0_cnt, ch0_data;1518u64 ch1_data;15191520/* Restore, Step 57:1521* Set the Lock Line Reservation Lost Event by:1522* 1. OR CSA.SPU_Event_Status with bit 21 (Lr) set to 1.1523* 2. If CSA.SPU_Channel_0_Count=0 and1524* CSA.SPU_Wr_Event_Mask[Lr]=1 and1525* CSA.SPU_Event_Status[Lr]=0 then set1526* CSA.SPU_Event_Status_Count=1.1527*/1528ch0_cnt = csa->spu_chnlcnt_RW[0];1529ch0_data = csa->spu_chnldata_RW[0];1530ch1_data = csa->spu_chnldata_RW[1];1531csa->spu_chnldata_RW[0] |= MFC_LLR_LOST_EVENT;1532if ((ch0_cnt == 0) && !(ch0_data & MFC_LLR_LOST_EVENT) &&1533(ch1_data & MFC_LLR_LOST_EVENT)) {1534csa->spu_chnlcnt_RW[0] = 1;1535}1536}15371538static inline void restore_decr_wrapped(struct spu_state *csa, struct spu *spu)1539{1540/* Restore, Step 58:1541* If the status of the CSA software decrementer1542* "wrapped" flag is set, OR in a '1' to1543* CSA.SPU_Event_Status[Tm].1544*/1545if (!(csa->lscsa->decr_status.slot[0] & SPU_DECR_STATUS_WRAPPED))1546return;15471548if ((csa->spu_chnlcnt_RW[0] == 0) &&1549(csa->spu_chnldata_RW[1] & 0x20) &&1550!(csa->spu_chnldata_RW[0] & 0x20))1551csa->spu_chnlcnt_RW[0] = 1;15521553csa->spu_chnldata_RW[0] |= 0x20;1554}15551556static inline void restore_ch_part1(struct spu_state *csa, struct spu *spu)1557{1558struct spu_priv2 __iomem *priv2 = spu->priv2;1559u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };1560int i;15611562/* Restore, Step 59:1563* Restore the following CH: [0,3,4,24,25,27]1564*/1565for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {1566idx = ch_indices[i];1567out_be64(&priv2->spu_chnlcntptr_RW, idx);1568eieio();1569out_be64(&priv2->spu_chnldata_RW, csa->spu_chnldata_RW[idx]);1570out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[idx]);1571eieio();1572}1573}15741575static inline void restore_ch_part2(struct spu_state *csa, struct spu *spu)1576{1577struct spu_priv2 __iomem *priv2 = spu->priv2;1578u64 ch_indices[3] = { 9UL, 21UL, 23UL };1579u64 ch_counts[3] = { 1UL, 16UL, 1UL };1580u64 idx;1581int i;15821583/* Restore, Step 60:1584* Restore the following CH: [9,21,23].1585*/1586ch_counts[0] = 1UL;1587ch_counts[1] = csa->spu_chnlcnt_RW[21];1588ch_counts[2] = 1UL;1589for (i = 0; i < 3; i++) {1590idx = ch_indices[i];1591out_be64(&priv2->spu_chnlcntptr_RW, idx);1592eieio();1593out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);1594eieio();1595}1596}15971598static inline void restore_spu_lslr(struct spu_state *csa, struct spu *spu)1599{1600struct spu_priv2 __iomem *priv2 = spu->priv2;16011602/* Restore, Step 61:1603* Restore the SPU_LSLR register from CSA.1604*/1605out_be64(&priv2->spu_lslr_RW, csa->priv2.spu_lslr_RW);1606eieio();1607}16081609static inline void restore_spu_cfg(struct spu_state *csa, struct spu *spu)1610{1611struct spu_priv2 __iomem *priv2 = spu->priv2;16121613/* Restore, Step 62:1614* Restore the SPU_Cfg register from CSA.1615*/1616out_be64(&priv2->spu_cfg_RW, csa->priv2.spu_cfg_RW);1617eieio();1618}16191620static inline void restore_pm_trace(struct spu_state *csa, struct spu *spu)1621{1622/* Restore, Step 63:1623* Restore PM_Trace_Tag_Wait_Mask from CSA.1624* Not performed by this implementation.1625*/1626}16271628static inline void restore_spu_npc(struct spu_state *csa, struct spu *spu)1629{1630struct spu_problem __iomem *prob = spu->problem;16311632/* Restore, Step 64:1633* Restore SPU_NPC from CSA.1634*/1635out_be32(&prob->spu_npc_RW, csa->prob.spu_npc_RW);1636eieio();1637}16381639static inline void restore_spu_mb(struct spu_state *csa, struct spu *spu)1640{1641struct spu_priv2 __iomem *priv2 = spu->priv2;1642int i;16431644/* Restore, Step 65:1645* Restore MFC_RdSPU_MB from CSA.1646*/1647out_be64(&priv2->spu_chnlcntptr_RW, 29UL);1648eieio();1649out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[29]);1650for (i = 0; i < 4; i++) {1651out_be64(&priv2->spu_chnldata_RW, csa->spu_mailbox_data[i]);1652}1653eieio();1654}16551656static inline void check_ppu_mb_stat(struct spu_state *csa, struct spu *spu)1657{1658struct spu_problem __iomem *prob = spu->problem;16591660/* Restore, Step 66:1661* If CSA.MB_Stat[P]=0 (mailbox empty) then1662* read from the PPU_MB register.1663*/1664if ((csa->prob.mb_stat_R & 0xFF) == 0) {1665in_be32(&prob->pu_mb_R);1666eieio();1667}1668}16691670static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu)1671{1672struct spu_priv2 __iomem *priv2 = spu->priv2;16731674/* Restore, Step 66:1675* If CSA.MB_Stat[I]=0 (mailbox empty) then1676* read from the PPUINT_MB register.1677*/1678if ((csa->prob.mb_stat_R & 0xFF0000) == 0) {1679in_be64(&priv2->puint_mb_R);1680eieio();1681spu_int_stat_clear(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);1682eieio();1683}1684}16851686static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu)1687{1688/* Restore, Step 69:1689* Restore the MFC_SR1 register from CSA.1690*/1691spu_mfc_sr1_set(spu, csa->priv1.mfc_sr1_RW);1692eieio();1693}16941695static inline void set_int_route(struct spu_state *csa, struct spu *spu)1696{1697struct spu_context *ctx = spu->ctx;16981699spu_cpu_affinity_set(spu, ctx->last_ran);1700}17011702static inline void restore_other_spu_access(struct spu_state *csa,1703struct spu *spu)1704{1705/* Restore, Step 70:1706* Restore other SPU mappings to this SPU. TBD.1707*/1708}17091710static inline void restore_spu_runcntl(struct spu_state *csa, struct spu *spu)1711{1712struct spu_problem __iomem *prob = spu->problem;17131714/* Restore, Step 71:1715* If CSA.SPU_Status[R]=1 then write1716* SPU_RunCntl[R0R1]='01'.1717*/1718if (csa->prob.spu_status_R & SPU_STATUS_RUNNING) {1719out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);1720eieio();1721}1722}17231724static inline void restore_mfc_cntl(struct spu_state *csa, struct spu *spu)1725{1726struct spu_priv2 __iomem *priv2 = spu->priv2;17271728/* Restore, Step 72:1729* Restore the MFC_CNTL register for the CSA.1730*/1731out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW);1732eieio();17331734/*1735* The queue is put back into the same state that was evident prior to1736* the context switch. The suspend flag is added to the saved state in1737* the csa, if the operational state was suspending or suspended. In1738* this case, the code that suspended the mfc is responsible for1739* continuing it. Note that SPE faults do not change the operational1740* state of the spu.1741*/1742}17431744static inline void enable_user_access(struct spu_state *csa, struct spu *spu)1745{1746/* Restore, Step 73:1747* Enable user-space access (if provided) to this1748* SPU by mapping the virtual pages assigned to1749* the SPU memory-mapped I/O (MMIO) for problem1750* state. TBD.1751*/1752}17531754static inline void reset_switch_active(struct spu_state *csa, struct spu *spu)1755{1756/* Restore, Step 74:1757* Reset the "context switch active" flag.1758* Not performed by this implementation.1759*/1760}17611762static inline void reenable_interrupts(struct spu_state *csa, struct spu *spu)1763{1764/* Restore, Step 75:1765* Re-enable SPU interrupts.1766*/1767spin_lock_irq(&spu->register_lock);1768spu_int_mask_set(spu, 0, csa->priv1.int_mask_class0_RW);1769spu_int_mask_set(spu, 1, csa->priv1.int_mask_class1_RW);1770spu_int_mask_set(spu, 2, csa->priv1.int_mask_class2_RW);1771spin_unlock_irq(&spu->register_lock);1772}17731774static int quiece_spu(struct spu_state *prev, struct spu *spu)1775{1776/*1777* Combined steps 2-18 of SPU context save sequence, which1778* quiesce the SPU state (disable SPU execution, MFC command1779* queues, decrementer, SPU interrupts, etc.).1780*1781* Returns 0 on success.1782* 2 if failed step 2.1783* 6 if failed step 6.1784*/17851786if (check_spu_isolate(prev, spu)) { /* Step 2. */1787return 2;1788}1789disable_interrupts(prev, spu); /* Step 3. */1790set_watchdog_timer(prev, spu); /* Step 4. */1791inhibit_user_access(prev, spu); /* Step 5. */1792if (check_spu_isolate(prev, spu)) { /* Step 6. */1793return 6;1794}1795set_switch_pending(prev, spu); /* Step 7. */1796save_mfc_cntl(prev, spu); /* Step 8. */1797save_spu_runcntl(prev, spu); /* Step 9. */1798save_mfc_sr1(prev, spu); /* Step 10. */1799save_spu_status(prev, spu); /* Step 11. */1800save_mfc_stopped_status(prev, spu); /* Step 12. */1801halt_mfc_decr(prev, spu); /* Step 13. */1802save_timebase(prev, spu); /* Step 14. */1803remove_other_spu_access(prev, spu); /* Step 15. */1804do_mfc_mssync(prev, spu); /* Step 16. */1805issue_mfc_tlbie(prev, spu); /* Step 17. */1806handle_pending_interrupts(prev, spu); /* Step 18. */18071808return 0;1809}18101811static void save_csa(struct spu_state *prev, struct spu *spu)1812{1813/*1814* Combine steps 19-44 of SPU context save sequence, which1815* save regions of the privileged & problem state areas.1816*/18171818save_mfc_queues(prev, spu); /* Step 19. */1819save_ppu_querymask(prev, spu); /* Step 20. */1820save_ppu_querytype(prev, spu); /* Step 21. */1821save_ppu_tagstatus(prev, spu); /* NEW. */1822save_mfc_csr_tsq(prev, spu); /* Step 22. */1823save_mfc_csr_cmd(prev, spu); /* Step 23. */1824save_mfc_csr_ato(prev, spu); /* Step 24. */1825save_mfc_tclass_id(prev, spu); /* Step 25. */1826set_mfc_tclass_id(prev, spu); /* Step 26. */1827save_mfc_cmd(prev, spu); /* Step 26a - moved from 44. */1828purge_mfc_queue(prev, spu); /* Step 27. */1829wait_purge_complete(prev, spu); /* Step 28. */1830setup_mfc_sr1(prev, spu); /* Step 30. */1831save_spu_npc(prev, spu); /* Step 31. */1832save_spu_privcntl(prev, spu); /* Step 32. */1833reset_spu_privcntl(prev, spu); /* Step 33. */1834save_spu_lslr(prev, spu); /* Step 34. */1835reset_spu_lslr(prev, spu); /* Step 35. */1836save_spu_cfg(prev, spu); /* Step 36. */1837save_pm_trace(prev, spu); /* Step 37. */1838save_mfc_rag(prev, spu); /* Step 38. */1839save_ppu_mb_stat(prev, spu); /* Step 39. */1840save_ppu_mb(prev, spu); /* Step 40. */1841save_ppuint_mb(prev, spu); /* Step 41. */1842save_ch_part1(prev, spu); /* Step 42. */1843save_spu_mb(prev, spu); /* Step 43. */1844reset_ch(prev, spu); /* Step 45. */1845}18461847static void save_lscsa(struct spu_state *prev, struct spu *spu)1848{1849/*1850* Perform steps 46-57 of SPU context save sequence,1851* which save regions of the local store and register1852* file.1853*/18541855resume_mfc_queue(prev, spu); /* Step 46. */1856/* Step 47. */1857setup_mfc_slbs(prev, spu, spu_save_code, sizeof(spu_save_code));1858set_switch_active(prev, spu); /* Step 48. */1859enable_interrupts(prev, spu); /* Step 49. */1860save_ls_16kb(prev, spu); /* Step 50. */1861set_spu_npc(prev, spu); /* Step 51. */1862set_signot1(prev, spu); /* Step 52. */1863set_signot2(prev, spu); /* Step 53. */1864send_save_code(prev, spu); /* Step 54. */1865set_ppu_querymask(prev, spu); /* Step 55. */1866wait_tag_complete(prev, spu); /* Step 56. */1867wait_spu_stopped(prev, spu); /* Step 57. */1868}18691870static void force_spu_isolate_exit(struct spu *spu)1871{1872struct spu_problem __iomem *prob = spu->problem;1873struct spu_priv2 __iomem *priv2 = spu->priv2;18741875/* Stop SPE execution and wait for completion. */1876out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);1877iobarrier_rw();1878POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);18791880/* Restart SPE master runcntl. */1881spu_mfc_sr1_set(spu, MFC_STATE1_MASTER_RUN_CONTROL_MASK);1882iobarrier_w();18831884/* Initiate isolate exit request and wait for completion. */1885out_be64(&priv2->spu_privcntl_RW, 4LL);1886iobarrier_w();1887out_be32(&prob->spu_runcntl_RW, 2);1888iobarrier_rw();1889POLL_WHILE_FALSE((in_be32(&prob->spu_status_R)1890& SPU_STATUS_STOPPED_BY_STOP));18911892/* Reset load request to normal. */1893out_be64(&priv2->spu_privcntl_RW, SPU_PRIVCNT_LOAD_REQUEST_NORMAL);1894iobarrier_w();1895}18961897/**1898* stop_spu_isolate1899* Check SPU run-control state and force isolated1900* exit function as necessary.1901*/1902static void stop_spu_isolate(struct spu *spu)1903{1904struct spu_problem __iomem *prob = spu->problem;19051906if (in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_STATE) {1907/* The SPU is in isolated state; the only way1908* to get it out is to perform an isolated1909* exit (clean) operation.1910*/1911force_spu_isolate_exit(spu);1912}1913}19141915static void harvest(struct spu_state *prev, struct spu *spu)1916{1917/*1918* Perform steps 2-25 of SPU context restore sequence,1919* which resets an SPU either after a failed save, or1920* when using SPU for first time.1921*/19221923disable_interrupts(prev, spu); /* Step 2. */1924inhibit_user_access(prev, spu); /* Step 3. */1925terminate_spu_app(prev, spu); /* Step 4. */1926set_switch_pending(prev, spu); /* Step 5. */1927stop_spu_isolate(spu); /* NEW. */1928remove_other_spu_access(prev, spu); /* Step 6. */1929suspend_mfc_and_halt_decr(prev, spu); /* Step 7. */1930wait_suspend_mfc_complete(prev, spu); /* Step 8. */1931if (!suspend_spe(prev, spu)) /* Step 9. */1932clear_spu_status(prev, spu); /* Step 10. */1933do_mfc_mssync(prev, spu); /* Step 11. */1934issue_mfc_tlbie(prev, spu); /* Step 12. */1935handle_pending_interrupts(prev, spu); /* Step 13. */1936purge_mfc_queue(prev, spu); /* Step 14. */1937wait_purge_complete(prev, spu); /* Step 15. */1938reset_spu_privcntl(prev, spu); /* Step 16. */1939reset_spu_lslr(prev, spu); /* Step 17. */1940setup_mfc_sr1(prev, spu); /* Step 18. */1941spu_invalidate_slbs(spu); /* Step 19. */1942reset_ch_part1(prev, spu); /* Step 20. */1943reset_ch_part2(prev, spu); /* Step 21. */1944enable_interrupts(prev, spu); /* Step 22. */1945set_switch_active(prev, spu); /* Step 23. */1946set_mfc_tclass_id(prev, spu); /* Step 24. */1947resume_mfc_queue(prev, spu); /* Step 25. */1948}19491950static void restore_lscsa(struct spu_state *next, struct spu *spu)1951{1952/*1953* Perform steps 26-40 of SPU context restore sequence,1954* which restores regions of the local store and register1955* file.1956*/19571958set_watchdog_timer(next, spu); /* Step 26. */1959setup_spu_status_part1(next, spu); /* Step 27. */1960setup_spu_status_part2(next, spu); /* Step 28. */1961restore_mfc_rag(next, spu); /* Step 29. */1962/* Step 30. */1963setup_mfc_slbs(next, spu, spu_restore_code, sizeof(spu_restore_code));1964set_spu_npc(next, spu); /* Step 31. */1965set_signot1(next, spu); /* Step 32. */1966set_signot2(next, spu); /* Step 33. */1967setup_decr(next, spu); /* Step 34. */1968setup_ppu_mb(next, spu); /* Step 35. */1969setup_ppuint_mb(next, spu); /* Step 36. */1970send_restore_code(next, spu); /* Step 37. */1971set_ppu_querymask(next, spu); /* Step 38. */1972wait_tag_complete(next, spu); /* Step 39. */1973wait_spu_stopped(next, spu); /* Step 40. */1974}19751976static void restore_csa(struct spu_state *next, struct spu *spu)1977{1978/*1979* Combine steps 41-76 of SPU context restore sequence, which1980* restore regions of the privileged & problem state areas.1981*/19821983restore_spu_privcntl(next, spu); /* Step 41. */1984restore_status_part1(next, spu); /* Step 42. */1985restore_status_part2(next, spu); /* Step 43. */1986restore_ls_16kb(next, spu); /* Step 44. */1987wait_tag_complete(next, spu); /* Step 45. */1988suspend_mfc(next, spu); /* Step 46. */1989wait_suspend_mfc_complete(next, spu); /* Step 47. */1990issue_mfc_tlbie(next, spu); /* Step 48. */1991clear_interrupts(next, spu); /* Step 49. */1992restore_mfc_queues(next, spu); /* Step 50. */1993restore_ppu_querymask(next, spu); /* Step 51. */1994restore_ppu_querytype(next, spu); /* Step 52. */1995restore_mfc_csr_tsq(next, spu); /* Step 53. */1996restore_mfc_csr_cmd(next, spu); /* Step 54. */1997restore_mfc_csr_ato(next, spu); /* Step 55. */1998restore_mfc_tclass_id(next, spu); /* Step 56. */1999set_llr_event(next, spu); /* Step 57. */2000restore_decr_wrapped(next, spu); /* Step 58. */2001restore_ch_part1(next, spu); /* Step 59. */2002restore_ch_part2(next, spu); /* Step 60. */2003restore_spu_lslr(next, spu); /* Step 61. */2004restore_spu_cfg(next, spu); /* Step 62. */2005restore_pm_trace(next, spu); /* Step 63. */2006restore_spu_npc(next, spu); /* Step 64. */2007restore_spu_mb(next, spu); /* Step 65. */2008check_ppu_mb_stat(next, spu); /* Step 66. */2009check_ppuint_mb_stat(next, spu); /* Step 67. */2010spu_invalidate_slbs(spu); /* Modified Step 68. */2011restore_mfc_sr1(next, spu); /* Step 69. */2012set_int_route(next, spu); /* NEW */2013restore_other_spu_access(next, spu); /* Step 70. */2014restore_spu_runcntl(next, spu); /* Step 71. */2015restore_mfc_cntl(next, spu); /* Step 72. */2016enable_user_access(next, spu); /* Step 73. */2017reset_switch_active(next, spu); /* Step 74. */2018reenable_interrupts(next, spu); /* Step 75. */2019}20202021static int __do_spu_save(struct spu_state *prev, struct spu *spu)2022{2023int rc;20242025/*2026* SPU context save can be broken into three phases:2027*2028* (a) quiesce [steps 2-16].2029* (b) save of CSA, performed by PPE [steps 17-42]2030* (c) save of LSCSA, mostly performed by SPU [steps 43-52].2031*2032* Returns 0 on success.2033* 2,6 if failed to quiece SPU2034* 53 if SPU-side of save failed.2035*/20362037rc = quiece_spu(prev, spu); /* Steps 2-16. */2038switch (rc) {2039default:2040case 2:2041case 6:2042harvest(prev, spu);2043return rc;2044break;2045case 0:2046break;2047}2048save_csa(prev, spu); /* Steps 17-43. */2049save_lscsa(prev, spu); /* Steps 44-53. */2050return check_save_status(prev, spu); /* Step 54. */2051}20522053static int __do_spu_restore(struct spu_state *next, struct spu *spu)2054{2055int rc;20562057/*2058* SPU context restore can be broken into three phases:2059*2060* (a) harvest (or reset) SPU [steps 2-24].2061* (b) restore LSCSA [steps 25-40], mostly performed by SPU.2062* (c) restore CSA [steps 41-76], performed by PPE.2063*2064* The 'harvest' step is not performed here, but rather2065* as needed below.2066*/20672068restore_lscsa(next, spu); /* Steps 24-39. */2069rc = check_restore_status(next, spu); /* Step 40. */2070switch (rc) {2071default:2072/* Failed. Return now. */2073return rc;2074break;2075case 0:2076/* Fall through to next step. */2077break;2078}2079restore_csa(next, spu);20802081return 0;2082}20832084/**2085* spu_save - SPU context save, with locking.2086* @prev: pointer to SPU context save area, to be saved.2087* @spu: pointer to SPU iomem structure.2088*2089* Acquire locks, perform the save operation then return.2090*/2091int spu_save(struct spu_state *prev, struct spu *spu)2092{2093int rc;20942095acquire_spu_lock(spu); /* Step 1. */2096rc = __do_spu_save(prev, spu); /* Steps 2-53. */2097release_spu_lock(spu);2098if (rc != 0 && rc != 2 && rc != 6) {2099panic("%s failed on SPU[%d], rc=%d.\n",2100__func__, spu->number, rc);2101}2102return 0;2103}2104EXPORT_SYMBOL_GPL(spu_save);21052106/**2107* spu_restore - SPU context restore, with harvest and locking.2108* @new: pointer to SPU context save area, to be restored.2109* @spu: pointer to SPU iomem structure.2110*2111* Perform harvest + restore, as we may not be coming2112* from a previous successful save operation, and the2113* hardware state is unknown.2114*/2115int spu_restore(struct spu_state *new, struct spu *spu)2116{2117int rc;21182119acquire_spu_lock(spu);2120harvest(NULL, spu);2121spu->slb_replace = 0;2122rc = __do_spu_restore(new, spu);2123release_spu_lock(spu);2124if (rc) {2125panic("%s failed on SPU[%d] rc=%d.\n",2126__func__, spu->number, rc);2127}2128return rc;2129}2130EXPORT_SYMBOL_GPL(spu_restore);21312132static void init_prob(struct spu_state *csa)2133{2134csa->spu_chnlcnt_RW[9] = 1;2135csa->spu_chnlcnt_RW[21] = 16;2136csa->spu_chnlcnt_RW[23] = 1;2137csa->spu_chnlcnt_RW[28] = 1;2138csa->spu_chnlcnt_RW[30] = 1;2139csa->prob.spu_runcntl_RW = SPU_RUNCNTL_STOP;2140csa->prob.mb_stat_R = 0x000400;2141}21422143static void init_priv1(struct spu_state *csa)2144{2145/* Enable decode, relocate, tlbie response, master runcntl. */2146csa->priv1.mfc_sr1_RW = MFC_STATE1_LOCAL_STORAGE_DECODE_MASK |2147MFC_STATE1_MASTER_RUN_CONTROL_MASK |2148MFC_STATE1_PROBLEM_STATE_MASK |2149MFC_STATE1_RELOCATE_MASK | MFC_STATE1_BUS_TLBIE_MASK;21502151/* Enable OS-specific set of interrupts. */2152csa->priv1.int_mask_class0_RW = CLASS0_ENABLE_DMA_ALIGNMENT_INTR |2153CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR |2154CLASS0_ENABLE_SPU_ERROR_INTR;2155csa->priv1.int_mask_class1_RW = CLASS1_ENABLE_SEGMENT_FAULT_INTR |2156CLASS1_ENABLE_STORAGE_FAULT_INTR;2157csa->priv1.int_mask_class2_RW = CLASS2_ENABLE_SPU_STOP_INTR |2158CLASS2_ENABLE_SPU_HALT_INTR |2159CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR;2160}21612162static void init_priv2(struct spu_state *csa)2163{2164csa->priv2.spu_lslr_RW = LS_ADDR_MASK;2165csa->priv2.mfc_control_RW = MFC_CNTL_RESUME_DMA_QUEUE |2166MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION |2167MFC_CNTL_DMA_QUEUES_EMPTY_MASK;2168}21692170/**2171* spu_alloc_csa - allocate and initialize an SPU context save area.2172*2173* Allocate and initialize the contents of an SPU context save area.2174* This includes enabling address translation, interrupt masks, etc.,2175* as appropriate for the given OS environment.2176*2177* Note that storage for the 'lscsa' is allocated separately,2178* as it is by far the largest of the context save regions,2179* and may need to be pinned or otherwise specially aligned.2180*/2181int spu_init_csa(struct spu_state *csa)2182{2183int rc;21842185if (!csa)2186return -EINVAL;2187memset(csa, 0, sizeof(struct spu_state));21882189rc = spu_alloc_lscsa(csa);2190if (rc)2191return rc;21922193spin_lock_init(&csa->register_lock);21942195init_prob(csa);2196init_priv1(csa);2197init_priv2(csa);21982199return 0;2200}22012202void spu_fini_csa(struct spu_state *csa)2203{2204spu_free_lscsa(csa);2205}220622072208