/*1* Linux/PA-RISC Project (http://www.parisc-linux.org/)2*3* kernel entry points (interruptions, system call wrappers)4* Copyright (C) 1999,2000 Philipp Rumpf5* Copyright (C) 1999 SuSE GmbH Nuernberg6* Copyright (C) 2000 Hewlett-Packard (John Marvin)7* Copyright (C) 1999 Hewlett-Packard (Frank Rowand)8*9* This program is free software; you can redistribute it and/or modify10* it under the terms of the GNU General Public License as published by11* the Free Software Foundation; either version 2, or (at your option)12* any later version.13*14* This program is distributed in the hope that it will be useful,15* but WITHOUT ANY WARRANTY; without even the implied warranty of16* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the17* GNU General Public License for more details.18*19* You should have received a copy of the GNU General Public License20* along with this program; if not, write to the Free Software21* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.22*/2324#include <asm/asm-offsets.h>2526/* we have the following possibilities to act on an interruption:27* - handle in assembly and use shadowed registers only28* - save registers to kernel stack and handle in assembly or C */293031#include <asm/psw.h>32#include <asm/cache.h> /* for L1_CACHE_SHIFT */33#include <asm/assembly.h> /* for LDREG/STREG defines */34#include <asm/pgtable.h>35#include <asm/signal.h>36#include <asm/unistd.h>37#include <asm/thread_info.h>3839#include <linux/linkage.h>4041#ifdef CONFIG_64BIT42.level 2.0w43#else44.level 2.045#endif4647.import pa_dbit_lock,data4849/* space_to_prot macro creates a prot id from a space id */5051#if (SPACEID_SHIFT) == 052.macro space_to_prot spc prot53depd,z \spc,62,31,\prot54.endm55#else56.macro space_to_prot spc prot57extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot58.endm59#endif6061/* Switch to virtual mapping, trashing only %r1 */62.macro virt_map63/* pcxt_ssm_bug */64rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */65mtsp %r0, %sr466mtsp %r0, %sr567mfsp %sr7, %r168or,= %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */69mtsp %r1, %sr370tovirt_r1 %r2971load32 KERNEL_PSW, %r17273rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */74mtsp %r0, %sr675mtsp %r0, %sr776mtctl %r0, %cr17 /* Clear IIASQ tail */77mtctl %r0, %cr17 /* Clear IIASQ head */78mtctl %r1, %ipsw79load32 4f, %r180mtctl %r1, %cr18 /* Set IIAOQ tail */81ldo 4(%r1), %r182mtctl %r1, %cr18 /* Set IIAOQ head */83rfir84nop854:86.endm8788/*89* The "get_stack" macros are responsible for determining the90* kernel stack value.91*92* If sr7 == 093* Already using a kernel stack, so call the94* get_stack_use_r30 macro to push a pt_regs structure95* on the stack, and store registers there.96* else97* Need to set up a kernel stack, so call the98* get_stack_use_cr30 macro to set up a pointer99* to the pt_regs structure contained within the100* task pointer pointed to by cr30. Set the stack101* pointer to point to the end of the task structure.102*103* Note that we use shadowed registers for temps until104* we can save %r26 and %r29. %r26 is used to preserve105* %r8 (a shadowed register) which temporarily contained106* either the fault type ("code") or the eirr. We need107* to use a non-shadowed register to carry the value over108* the rfir in virt_map. We use %r26 since this value winds109* up being passed as the argument to either do_cpu_irq_mask110* or handle_interruption. %r29 is used to hold a pointer111* the register save area, and once again, it needs to112* be a non-shadowed register so that it survives the rfir.113*114* N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.115*/116117.macro get_stack_use_cr30118119/* we save the registers in the task struct */120121mfctl %cr30, %r1122tophys %r1,%r9123LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */124tophys %r1,%r9125ldo TASK_REGS(%r9),%r9126STREG %r30, PT_GR30(%r9)127STREG %r29,PT_GR29(%r9)128STREG %r26,PT_GR26(%r9)129copy %r9,%r29130mfctl %cr30, %r1131ldo THREAD_SZ_ALGN(%r1), %r30132.endm133134.macro get_stack_use_r30135136/* we put a struct pt_regs on the stack and save the registers there */137138tophys %r30,%r9139STREG %r30,PT_GR30(%r9)140ldo PT_SZ_ALGN(%r30),%r30141STREG %r29,PT_GR29(%r9)142STREG %r26,PT_GR26(%r9)143copy %r9,%r29144.endm145146.macro rest_stack147LDREG PT_GR1(%r29), %r1148LDREG PT_GR30(%r29),%r30149LDREG PT_GR29(%r29),%r29150.endm151152/* default interruption handler153* (calls traps.c:handle_interruption) */154.macro def code155b intr_save156ldi \code, %r8157.align 32158.endm159160/* Interrupt interruption handler161* (calls irq.c:do_cpu_irq_mask) */162.macro extint code163b intr_extint164mfsp %sr7,%r16165.align 32166.endm167168.import os_hpmc, code169170/* HPMC handler */171.macro hpmc code172nop /* must be a NOP, will be patched later */173load32 PA(os_hpmc), %r3174bv,n 0(%r3)175nop176.word 0 /* checksum (will be patched) */177.word PA(os_hpmc) /* address of handler */178.word 0 /* length of handler */179.endm180181/*182* Performance Note: Instructions will be moved up into183* this part of the code later on, once we are sure184* that the tlb miss handlers are close to final form.185*/186187/* Register definitions for tlb miss handler macros */188189va = r8 /* virtual address for which the trap occurred */190spc = r24 /* space for which the trap occurred */191192#ifndef CONFIG_64BIT193194/*195* itlb miss interruption handler (parisc 1.1 - 32 bit)196*/197198.macro itlb_11 code199200mfctl %pcsq, spc201b itlb_miss_11202mfctl %pcoq, va203204.align 32205.endm206#endif207208/*209* itlb miss interruption handler (parisc 2.0)210*/211212.macro itlb_20 code213mfctl %pcsq, spc214#ifdef CONFIG_64BIT215b itlb_miss_20w216#else217b itlb_miss_20218#endif219mfctl %pcoq, va220221.align 32222.endm223224#ifndef CONFIG_64BIT225/*226* naitlb miss interruption handler (parisc 1.1 - 32 bit)227*/228229.macro naitlb_11 code230231mfctl %isr,spc232b naitlb_miss_11233mfctl %ior,va234235.align 32236.endm237#endif238239/*240* naitlb miss interruption handler (parisc 2.0)241*/242243.macro naitlb_20 code244245mfctl %isr,spc246#ifdef CONFIG_64BIT247b naitlb_miss_20w248#else249b naitlb_miss_20250#endif251mfctl %ior,va252253.align 32254.endm255256#ifndef CONFIG_64BIT257/*258* dtlb miss interruption handler (parisc 1.1 - 32 bit)259*/260261.macro dtlb_11 code262263mfctl %isr, spc264b dtlb_miss_11265mfctl %ior, va266267.align 32268.endm269#endif270271/*272* dtlb miss interruption handler (parisc 2.0)273*/274275.macro dtlb_20 code276277mfctl %isr, spc278#ifdef CONFIG_64BIT279b dtlb_miss_20w280#else281b dtlb_miss_20282#endif283mfctl %ior, va284285.align 32286.endm287288#ifndef CONFIG_64BIT289/* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */290291.macro nadtlb_11 code292293mfctl %isr,spc294b nadtlb_miss_11295mfctl %ior,va296297.align 32298.endm299#endif300301/* nadtlb miss interruption handler (parisc 2.0) */302303.macro nadtlb_20 code304305mfctl %isr,spc306#ifdef CONFIG_64BIT307b nadtlb_miss_20w308#else309b nadtlb_miss_20310#endif311mfctl %ior,va312313.align 32314.endm315316#ifndef CONFIG_64BIT317/*318* dirty bit trap interruption handler (parisc 1.1 - 32 bit)319*/320321.macro dbit_11 code322323mfctl %isr,spc324b dbit_trap_11325mfctl %ior,va326327.align 32328.endm329#endif330331/*332* dirty bit trap interruption handler (parisc 2.0)333*/334335.macro dbit_20 code336337mfctl %isr,spc338#ifdef CONFIG_64BIT339b dbit_trap_20w340#else341b dbit_trap_20342#endif343mfctl %ior,va344345.align 32346.endm347348/* In LP64, the space contains part of the upper 32 bits of the349* fault. We have to extract this and place it in the va,350* zeroing the corresponding bits in the space register */351.macro space_adjust spc,va,tmp352#ifdef CONFIG_64BIT353extrd,u \spc,63,SPACEID_SHIFT,\tmp354depd %r0,63,SPACEID_SHIFT,\spc355depd \tmp,31,SPACEID_SHIFT,\va356#endif357.endm358359.import swapper_pg_dir,code360361/* Get the pgd. For faults on space zero (kernel space), this362* is simply swapper_pg_dir. For user space faults, the363* pgd is stored in %cr25 */364.macro get_pgd spc,reg365ldil L%PA(swapper_pg_dir),\reg366ldo R%PA(swapper_pg_dir)(\reg),\reg367or,COND(=) %r0,\spc,%r0368mfctl %cr25,\reg369.endm370371/*372space_check(spc,tmp,fault)373374spc - The space we saw the fault with.375tmp - The place to store the current space.376fault - Function to call on failure.377378Only allow faults on different spaces from the379currently active one if we're the kernel380381*/382.macro space_check spc,tmp,fault383mfsp %sr7,\tmp384or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page385* as kernel, so defeat the space386* check if it is */387copy \spc,\tmp388or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */389cmpb,COND(<>),n \tmp,\spc,\fault390.endm391392/* Look up a PTE in a 2-Level scheme (faulting at each393* level if the entry isn't present394*395* NOTE: we use ldw even for LP64, since the short pointers396* can address up to 1TB397*/398.macro L2_ptep pmd,pte,index,va,fault399#if PT_NLEVELS == 3400extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index401#else402extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index403#endif404dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */405copy %r0,\pte406ldw,s \index(\pmd),\pmd407bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault408dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */409copy \pmd,%r9410SHLREG %r9,PxD_VALUE_SHIFT,\pmd411extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index412dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */413shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd414LDREG %r0(\pmd),\pte /* pmd is now pte */415bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault416.endm417418/* Look up PTE in a 3-Level scheme.419*420* Here we implement a Hybrid L2/L3 scheme: we allocate the421* first pmd adjacent to the pgd. This means that we can422* subtract a constant offset to get to it. The pmd and pgd423* sizes are arranged so that a single pmd covers 4GB (giving424* a full LP64 process access to 8TB) so our lookups are425* effectively L2 for the first 4GB of the kernel (i.e. for426* all ILP32 processes and all the kernel for machines with427* under 4GB of memory) */428.macro L3_ptep pgd,pte,index,va,fault429#if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */430extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index431copy %r0,\pte432extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0433ldw,s \index(\pgd),\pgd434extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0435bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault436extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0437shld \pgd,PxD_VALUE_SHIFT,\index438extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0439copy \index,\pgd440extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0441ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd442#endif443L2_ptep \pgd,\pte,\index,\va,\fault444.endm445446/* Set the _PAGE_ACCESSED bit of the PTE. Be clever and447* don't needlessly dirty the cache line if it was already set */448.macro update_ptep ptep,pte,tmp,tmp1449ldi _PAGE_ACCESSED,\tmp1450or \tmp1,\pte,\tmp451and,COND(<>) \tmp1,\pte,%r0452STREG \tmp,0(\ptep)453.endm454455/* Set the dirty bit (and accessed bit). No need to be456* clever, this is only used from the dirty fault */457.macro update_dirty ptep,pte,tmp458ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp459or \tmp,\pte,\pte460STREG \pte,0(\ptep)461.endm462463/* bitshift difference between a PFN (based on kernel's PAGE_SIZE)464* to a CPU TLB 4k PFN (4k => 12 bits to shift) */465#define PAGE_ADD_SHIFT (PAGE_SHIFT-12)466467/* Drop prot bits and convert to page addr for iitlbt and idtlbt */468.macro convert_for_tlb_insert20 pte469extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\47064-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte471depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\472(63-58)+PAGE_ADD_SHIFT,\pte473.endm474475/* Convert the pte and prot to tlb insertion values. How476* this happens is quite subtle, read below */477.macro make_insert_tlb spc,pte,prot478space_to_prot \spc \prot /* create prot id from space */479/* The following is the real subtlety. This is depositing480* T <-> _PAGE_REFTRAP481* D <-> _PAGE_DIRTY482* B <-> _PAGE_DMB (memory break)483*484* Then incredible subtlety: The access rights are485* _PAGE_GATEWAY _PAGE_EXEC _PAGE_READ486* See 3-14 of the parisc 2.0 manual487*488* Finally, _PAGE_READ goes in the top bit of PL1 (so we489* trigger an access rights trap in user space if the user490* tries to read an unreadable page */491depd \pte,8,7,\prot492493/* PAGE_USER indicates the page can be read with user privileges,494* so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1495* contains _PAGE_READ */496extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0497depdi 7,11,3,\prot498/* If we're a gateway page, drop PL2 back to zero for promotion499* to kernel privilege (so we can execute the page as kernel).500* Any privilege promotion page always denys read and write */501extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0502depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */503504/* Enforce uncacheable pages.505* This should ONLY be use for MMIO on PA 2.0 machines.506* Memory/DMA is cache coherent on all PA2.0 machines we support507* (that means T-class is NOT supported) and the memory controllers508* on most of those machines only handles cache transactions.509*/510extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0511depdi 1,12,1,\prot512513/* Drop prot bits and convert to page addr for iitlbt and idtlbt */514convert_for_tlb_insert20 \pte515.endm516517/* Identical macro to make_insert_tlb above, except it518* makes the tlb entry for the differently formatted pa11519* insertion instructions */520.macro make_insert_tlb_11 spc,pte,prot521zdep \spc,30,15,\prot522dep \pte,8,7,\prot523extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0524depi 1,12,1,\prot525extru,= \pte,_PAGE_USER_BIT,1,%r0526depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */527extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0528depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */529530/* Get rid of prot bits and convert to page addr for iitlba */531532depi 0,31,ASM_PFN_PTE_SHIFT,\pte533SHRREG \pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte534.endm535536/* This is for ILP32 PA2.0 only. The TLB insertion needs537* to extend into I/O space if the address is 0xfXXXXXXX538* so we extend the f's into the top word of the pte in539* this case */540.macro f_extend pte,tmp541extrd,s \pte,42,4,\tmp542addi,<> 1,\tmp,%r0543extrd,s \pte,63,25,\pte544.endm545546/* The alias region is an 8MB aligned 16MB to do clear and547* copy user pages at addresses congruent with the user548* virtual address.549*550* To use the alias page, you set %r26 up with the to TLB551* entry (identifying the physical page) and %r23 up with552* the from tlb entry (or nothing if only a to entry---for553* clear_user_page_asm) */554.macro do_alias spc,tmp,tmp1,va,pte,prot,fault555cmpib,COND(<>),n 0,\spc,\fault556ldil L%(TMPALIAS_MAP_START),\tmp557#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)558/* on LP64, ldi will sign extend into the upper 32 bits,559* which is behaviour we don't want */560depdi 0,31,32,\tmp561#endif562copy \va,\tmp1563depi 0,31,23,\tmp1564cmpb,COND(<>),n \tmp,\tmp1,\fault565mfctl %cr19,\tmp /* iir */566/* get the opcode (first six bits) into \tmp */567extrw,u \tmp,5,6,\tmp568/*569* Only setting the T bit prevents data cache movein570* Setting access rights to zero prevents instruction cache movein571*572* Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go573* to type field and _PAGE_READ goes to top bit of PL1574*/575ldi (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot576/*577* so if the opcode is one (i.e. this is a memory management578* instruction) nullify the next load so \prot is only T.579* Otherwise this is a normal data operation580*/581cmpiclr,= 0x01,\tmp,%r0582ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot583depd,z \prot,8,7,\prot584/*585* OK, it is in the temp alias region, check whether "from" or "to".586* Check "subtle" note in pacache.S re: r23/r26.587*/588#ifdef CONFIG_64BIT589extrd,u,*= \va,41,1,%r0590#else591extrw,u,= \va,9,1,%r0592#endif593or,COND(tr) %r23,%r0,\pte594or %r26,%r0,\pte595.endm596597598/*599* Align fault_vector_20 on 4K boundary so that both600* fault_vector_11 and fault_vector_20 are on the601* same page. This is only necessary as long as we602* write protect the kernel text, which we may stop603* doing once we use large page translations to cover604* the static part of the kernel address space.605*/606607.text608609.align PAGE_SIZE610611ENTRY(fault_vector_20)612/* First vector is invalid (0) */613.ascii "cows can fly"614.byte 0615.align 32616617hpmc 1618def 2619def 3620extint 4621def 5622itlb_20 6623def 7624def 8625def 9626def 10627def 11628def 12629def 13630def 14631dtlb_20 15632naitlb_20 16633nadtlb_20 17634def 18635def 19636dbit_20 20637def 21638def 22639def 23640def 24641def 25642def 26643def 27644def 28645def 29646def 30647def 31648END(fault_vector_20)649650#ifndef CONFIG_64BIT651652.align 2048653654ENTRY(fault_vector_11)655/* First vector is invalid (0) */656.ascii "cows can fly"657.byte 0658.align 32659660hpmc 1661def 2662def 3663extint 4664def 5665itlb_11 6666def 7667def 8668def 9669def 10670def 11671def 12672def 13673def 14674dtlb_11 15675naitlb_11 16676nadtlb_11 17677def 18678def 19679dbit_11 20680def 21681def 22682def 23683def 24684def 25685def 26686def 27687def 28688def 29689def 30690def 31691END(fault_vector_11)692693#endif694/* Fault vector is separately protected and *must* be on its own page */695.align PAGE_SIZE696ENTRY(end_fault_vector)697698.import handle_interruption,code699.import do_cpu_irq_mask,code700701/*702* r26 = function to be called703* r25 = argument to pass in704* r24 = flags for do_fork()705*706* Kernel threads don't ever return, so they don't need707* a true register context. We just save away the arguments708* for copy_thread/ret_ to properly set up the child.709*/710711#define CLONE_VM 0x100 /* Must agree with <linux/sched.h> */712#define CLONE_UNTRACED 0x00800000713714.import do_fork715ENTRY(__kernel_thread)716STREG %r2, -RP_OFFSET(%r30)717718copy %r30, %r1719ldo PT_SZ_ALGN(%r30),%r30720#ifdef CONFIG_64BIT721/* Yo, function pointers in wide mode are little structs... -PB */722ldd 24(%r26), %r2723STREG %r2, PT_GR27(%r1) /* Store childs %dp */724ldd 16(%r26), %r26725726STREG %r22, PT_GR22(%r1) /* save r22 (arg5) */727copy %r0, %r22 /* user_tid */728#endif729STREG %r26, PT_GR26(%r1) /* Store function & argument for child */730STREG %r25, PT_GR25(%r1)731ldil L%CLONE_UNTRACED, %r26732ldo CLONE_VM(%r26), %r26 /* Force CLONE_VM since only init_mm */733or %r26, %r24, %r26 /* will have kernel mappings. */734ldi 1, %r25 /* stack_start, signals kernel thread */735stw %r0, -52(%r30) /* user_tid */736#ifdef CONFIG_64BIT737ldo -16(%r30),%r29 /* Reference param save area */738#endif739BL do_fork, %r2740copy %r1, %r24 /* pt_regs */741742/* Parent Returns here */743744LDREG -PT_SZ_ALGN-RP_OFFSET(%r30), %r2745ldo -PT_SZ_ALGN(%r30), %r30746bv %r0(%r2)747nop748ENDPROC(__kernel_thread)749750/*751* Child Returns here752*753* copy_thread moved args from temp save area set up above754* into task save area.755*/756757ENTRY(ret_from_kernel_thread)758759/* Call schedule_tail first though */760BL schedule_tail, %r2761nop762763LDREG TI_TASK-THREAD_SZ_ALGN(%r30), %r1764LDREG TASK_PT_GR25(%r1), %r26765#ifdef CONFIG_64BIT766LDREG TASK_PT_GR27(%r1), %r27767LDREG TASK_PT_GR22(%r1), %r22768#endif769LDREG TASK_PT_GR26(%r1), %r1770ble 0(%sr7, %r1)771copy %r31, %r2772773#ifdef CONFIG_64BIT774ldo -16(%r30),%r29 /* Reference param save area */775loadgp /* Thread could have been in a module */776#endif777#ifndef CONFIG_64BIT778b sys_exit779#else780load32 sys_exit, %r1781bv %r0(%r1)782#endif783ldi 0, %r26784ENDPROC(ret_from_kernel_thread)785786.import sys_execve, code787ENTRY(__execve)788copy %r2, %r15789copy %r30, %r16790ldo PT_SZ_ALGN(%r30), %r30791STREG %r26, PT_GR26(%r16)792STREG %r25, PT_GR25(%r16)793STREG %r24, PT_GR24(%r16)794#ifdef CONFIG_64BIT795ldo -16(%r30),%r29 /* Reference param save area */796#endif797BL sys_execve, %r2798copy %r16, %r26799800cmpib,=,n 0,%r28,intr_return /* forward */801802/* yes, this will trap and die. */803copy %r15, %r2804copy %r16, %r30805bv %r0(%r2)806nop807ENDPROC(__execve)808809810/*811* struct task_struct *_switch_to(struct task_struct *prev,812* struct task_struct *next)813*814* switch kernel stacks and return prev */815ENTRY(_switch_to)816STREG %r2, -RP_OFFSET(%r30)817818callee_save_float819callee_save820821load32 _switch_to_ret, %r2822823STREG %r2, TASK_PT_KPC(%r26)824LDREG TASK_PT_KPC(%r25), %r2825826STREG %r30, TASK_PT_KSP(%r26)827LDREG TASK_PT_KSP(%r25), %r30828LDREG TASK_THREAD_INFO(%r25), %r25829bv %r0(%r2)830mtctl %r25,%cr30831832_switch_to_ret:833mtctl %r0, %cr0 /* Needed for single stepping */834callee_rest835callee_rest_float836837LDREG -RP_OFFSET(%r30), %r2838bv %r0(%r2)839copy %r26, %r28840ENDPROC(_switch_to)841842/*843* Common rfi return path for interruptions, kernel execve, and844* sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will845* return via this path if the signal was received when the process846* was running; if the process was blocked on a syscall then the847* normal syscall_exit path is used. All syscalls for traced848* proceses exit via intr_restore.849*850* XXX If any syscalls that change a processes space id ever exit851* this way, then we will need to copy %sr3 in to PT_SR[3..7], and852* adjust IASQ[0..1].853*854*/855856.align PAGE_SIZE857858ENTRY(syscall_exit_rfi)859mfctl %cr30,%r16860LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */861ldo TASK_REGS(%r16),%r16862/* Force iaoq to userspace, as the user has had access to our current863* context via sigcontext. Also Filter the PSW for the same reason.864*/865LDREG PT_IAOQ0(%r16),%r19866depi 3,31,2,%r19867STREG %r19,PT_IAOQ0(%r16)868LDREG PT_IAOQ1(%r16),%r19869depi 3,31,2,%r19870STREG %r19,PT_IAOQ1(%r16)871LDREG PT_PSW(%r16),%r19872load32 USER_PSW_MASK,%r1873#ifdef CONFIG_64BIT874load32 USER_PSW_HI_MASK,%r20875depd %r20,31,32,%r1876#endif877and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */878load32 USER_PSW,%r1879or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */880STREG %r19,PT_PSW(%r16)881882/*883* If we aren't being traced, we never saved space registers884* (we don't store them in the sigcontext), so set them885* to "proper" values now (otherwise we'll wind up restoring886* whatever was last stored in the task structure, which might887* be inconsistent if an interrupt occurred while on the gateway888* page). Note that we may be "trashing" values the user put in889* them, but we don't support the user changing them.890*/891892STREG %r0,PT_SR2(%r16)893mfsp %sr3,%r19894STREG %r19,PT_SR0(%r16)895STREG %r19,PT_SR1(%r16)896STREG %r19,PT_SR3(%r16)897STREG %r19,PT_SR4(%r16)898STREG %r19,PT_SR5(%r16)899STREG %r19,PT_SR6(%r16)900STREG %r19,PT_SR7(%r16)901902intr_return:903/* NOTE: Need to enable interrupts incase we schedule. */904ssm PSW_SM_I, %r0905906intr_check_resched:907908/* check for reschedule */909mfctl %cr30,%r1910LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */911bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */912913.import do_notify_resume,code914intr_check_sig:915/* As above */916mfctl %cr30,%r1917LDREG TI_FLAGS(%r1),%r19918ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NOTIFY_RESUME), %r20919and,COND(<>) %r19, %r20, %r0920b,n intr_restore /* skip past if we've nothing to do */921922/* This check is critical to having LWS923* working. The IASQ is zero on the gateway924* page and we cannot deliver any signals until925* we get off the gateway page.926*927* Only do signals if we are returning to user space928*/929LDREG PT_IASQ0(%r16), %r20930cmpib,COND(=),n 0,%r20,intr_restore /* backward */931LDREG PT_IASQ1(%r16), %r20932cmpib,COND(=),n 0,%r20,intr_restore /* backward */933934copy %r0, %r25 /* long in_syscall = 0 */935#ifdef CONFIG_64BIT936ldo -16(%r30),%r29 /* Reference param save area */937#endif938939BL do_notify_resume,%r2940copy %r16, %r26 /* struct pt_regs *regs */941942b,n intr_check_sig943944intr_restore:945copy %r16,%r29946ldo PT_FR31(%r29),%r1947rest_fp %r1948rest_general %r29949950/* inverse of virt_map */951pcxt_ssm_bug952rsm PSW_SM_QUIET,%r0 /* prepare for rfi */953tophys_r1 %r29954955/* Restore space id's and special cr's from PT_REGS956* structure pointed to by r29957*/958rest_specials %r29959960/* IMPORTANT: rest_stack restores r29 last (we are using it)!961* It also restores r1 and r30.962*/963rest_stack964965rfi966nop967968#ifndef CONFIG_PREEMPT969# define intr_do_preempt intr_restore970#endif /* !CONFIG_PREEMPT */971972.import schedule,code973intr_do_resched:974/* Only call schedule on return to userspace. If we're returning975* to kernel space, we may schedule if CONFIG_PREEMPT, otherwise976* we jump back to intr_restore.977*/978LDREG PT_IASQ0(%r16), %r20979cmpib,COND(=) 0, %r20, intr_do_preempt980nop981LDREG PT_IASQ1(%r16), %r20982cmpib,COND(=) 0, %r20, intr_do_preempt983nop984985#ifdef CONFIG_64BIT986ldo -16(%r30),%r29 /* Reference param save area */987#endif988989ldil L%intr_check_sig, %r2990#ifndef CONFIG_64BIT991b schedule992#else993load32 schedule, %r20994bv %r0(%r20)995#endif996ldo R%intr_check_sig(%r2), %r2997998/* preempt the current task on returning to kernel999* mode from an interrupt, iff need_resched is set,1000* and preempt_count is 0. otherwise, we continue on1001* our merry way back to the current running task.1002*/1003#ifdef CONFIG_PREEMPT1004.import preempt_schedule_irq,code1005intr_do_preempt:1006rsm PSW_SM_I, %r0 /* disable interrupts */10071008/* current_thread_info()->preempt_count */1009mfctl %cr30, %r11010LDREG TI_PRE_COUNT(%r1), %r191011cmpib,COND(<>) 0, %r19, intr_restore /* if preempt_count > 0 */1012nop /* prev insn branched backwards */10131014/* check if we interrupted a critical path */1015LDREG PT_PSW(%r16), %r201016bb,<,n %r20, 31 - PSW_SM_I, intr_restore1017nop10181019BL preempt_schedule_irq, %r21020nop10211022b,n intr_restore /* ssm PSW_SM_I done by intr_restore */1023#endif /* CONFIG_PREEMPT */10241025/*1026* External interrupts.1027*/10281029intr_extint:1030cmpib,COND(=),n 0,%r16,1f10311032get_stack_use_cr301033b,n 2f103410351:1036get_stack_use_r3010372:1038save_specials %r291039virt_map1040save_general %r2910411042ldo PT_FR0(%r29), %r241043save_fp %r2410441045loadgp10461047copy %r29, %r26 /* arg0 is pt_regs */1048copy %r29, %r16 /* save pt_regs */10491050ldil L%intr_return, %r210511052#ifdef CONFIG_64BIT1053ldo -16(%r30),%r29 /* Reference param save area */1054#endif10551056b do_cpu_irq_mask1057ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */1058ENDPROC(syscall_exit_rfi)105910601061/* Generic interruptions (illegal insn, unaligned, page fault, etc) */10621063ENTRY(intr_save) /* for os_hpmc */1064mfsp %sr7,%r161065cmpib,COND(=),n 0,%r16,1f1066get_stack_use_cr301067b 2f1068copy %r8,%r26106910701:1071get_stack_use_r301072copy %r8,%r26107310742:1075save_specials %r2910761077/* If this trap is a itlb miss, skip saving/adjusting isr/ior */10781079/*1080* FIXME: 1) Use a #define for the hardwired "6" below (and in1081* traps.c.1082* 2) Once we start executing code above 4 Gb, we need1083* to adjust iasq/iaoq here in the same way we1084* adjust isr/ior below.1085*/10861087cmpib,COND(=),n 6,%r26,skip_save_ior108810891090mfctl %cr20, %r16 /* isr */1091nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */1092mfctl %cr21, %r17 /* ior */109310941095#ifdef CONFIG_64BIT1096/*1097* If the interrupted code was running with W bit off (32 bit),1098* clear the b bits (bits 0 & 1) in the ior.1099* save_specials left ipsw value in r8 for us to test.1100*/1101extrd,u,*<> %r8,PSW_W_BIT,1,%r01102depdi 0,1,2,%r1711031104/*1105* FIXME: This code has hardwired assumptions about the split1106* between space bits and offset bits. This will change1107* when we allow alternate page sizes.1108*/11091110/* adjust isr/ior. */1111extrd,u %r16,63,SPACEID_SHIFT,%r1 /* get high bits from isr for ior */1112depd %r1,31,SPACEID_SHIFT,%r17 /* deposit them into ior */1113depdi 0,63,SPACEID_SHIFT,%r16 /* clear them from isr */1114#endif1115STREG %r16, PT_ISR(%r29)1116STREG %r17, PT_IOR(%r29)111711181119skip_save_ior:1120virt_map1121save_general %r2911221123ldo PT_FR0(%r29), %r251124save_fp %r2511251126loadgp11271128copy %r29, %r25 /* arg1 is pt_regs */1129#ifdef CONFIG_64BIT1130ldo -16(%r30),%r29 /* Reference param save area */1131#endif11321133ldil L%intr_check_sig, %r21134copy %r25, %r16 /* save pt_regs */11351136b handle_interruption1137ldo R%intr_check_sig(%r2), %r21138ENDPROC(intr_save)113911401141/*1142* Note for all tlb miss handlers:1143*1144* cr24 contains a pointer to the kernel address space1145* page directory.1146*1147* cr25 contains a pointer to the current user address1148* space page directory.1149*1150* sr3 will contain the space id of the user address space1151* of the current running thread while that thread is1152* running in the kernel.1153*/11541155/*1156* register number allocations. Note that these are all1157* in the shadowed registers1158*/11591160t0 = r1 /* temporary register 0 */1161va = r8 /* virtual address for which the trap occurred */1162t1 = r9 /* temporary register 1 */1163pte = r16 /* pte/phys page # */1164prot = r17 /* prot bits */1165spc = r24 /* space for which the trap occurred */1166ptp = r25 /* page directory/page table pointer */11671168#ifdef CONFIG_64BIT11691170dtlb_miss_20w:1171space_adjust spc,va,t01172get_pgd spc,ptp1173space_check spc,t0,dtlb_fault11741175L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w11761177update_ptep ptp,pte,t0,t111781179make_insert_tlb spc,pte,prot11801181idtlbt pte,prot11821183rfir1184nop11851186dtlb_check_alias_20w:1187do_alias spc,t0,t1,va,pte,prot,dtlb_fault11881189idtlbt pte,prot11901191rfir1192nop11931194nadtlb_miss_20w:1195space_adjust spc,va,t01196get_pgd spc,ptp1197space_check spc,t0,nadtlb_fault11981199L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w12001201update_ptep ptp,pte,t0,t112021203make_insert_tlb spc,pte,prot12041205idtlbt pte,prot12061207rfir1208nop12091210nadtlb_check_alias_20w:1211do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate12121213idtlbt pte,prot12141215rfir1216nop12171218#else12191220dtlb_miss_11:1221get_pgd spc,ptp12221223space_check spc,t0,dtlb_fault12241225L2_ptep ptp,pte,t0,va,dtlb_check_alias_1112261227update_ptep ptp,pte,t0,t112281229make_insert_tlb_11 spc,pte,prot12301231mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */1232mtsp spc,%sr112331234idtlba pte,(%sr1,va)1235idtlbp prot,(%sr1,va)12361237mtsp t0, %sr1 /* Restore sr1 */12381239rfir1240nop12411242dtlb_check_alias_11:1243do_alias spc,t0,t1,va,pte,prot,dtlb_fault12441245idtlba pte,(va)1246idtlbp prot,(va)12471248rfir1249nop12501251nadtlb_miss_11:1252get_pgd spc,ptp12531254space_check spc,t0,nadtlb_fault12551256L2_ptep ptp,pte,t0,va,nadtlb_check_alias_1112571258update_ptep ptp,pte,t0,t112591260make_insert_tlb_11 spc,pte,prot126112621263mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */1264mtsp spc,%sr112651266idtlba pte,(%sr1,va)1267idtlbp prot,(%sr1,va)12681269mtsp t0, %sr1 /* Restore sr1 */12701271rfir1272nop12731274nadtlb_check_alias_11:1275do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate12761277idtlba pte,(va)1278idtlbp prot,(va)12791280rfir1281nop12821283dtlb_miss_20:1284space_adjust spc,va,t01285get_pgd spc,ptp1286space_check spc,t0,dtlb_fault12871288L2_ptep ptp,pte,t0,va,dtlb_check_alias_2012891290update_ptep ptp,pte,t0,t112911292make_insert_tlb spc,pte,prot12931294f_extend pte,t012951296idtlbt pte,prot12971298rfir1299nop13001301dtlb_check_alias_20:1302do_alias spc,t0,t1,va,pte,prot,dtlb_fault13031304idtlbt pte,prot13051306rfir1307nop13081309nadtlb_miss_20:1310get_pgd spc,ptp13111312space_check spc,t0,nadtlb_fault13131314L2_ptep ptp,pte,t0,va,nadtlb_check_alias_2013151316update_ptep ptp,pte,t0,t113171318make_insert_tlb spc,pte,prot13191320f_extend pte,t013211322idtlbt pte,prot13231324rfir1325nop13261327nadtlb_check_alias_20:1328do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate13291330idtlbt pte,prot13311332rfir1333nop13341335#endif13361337nadtlb_emulate:13381339/*1340* Non access misses can be caused by fdc,fic,pdc,lpa,probe and1341* probei instructions. We don't want to fault for these1342* instructions (not only does it not make sense, it can cause1343* deadlocks, since some flushes are done with the mmap1344* semaphore held). If the translation doesn't exist, we can't1345* insert a translation, so have to emulate the side effects1346* of the instruction. Since we don't insert a translation1347* we can get a lot of faults during a flush loop, so it makes1348* sense to try to do it here with minimum overhead. We only1349* emulate fdc,fic,pdc,probew,prober instructions whose base1350* and index registers are not shadowed. We defer everything1351* else to the "slow" path.1352*/13531354mfctl %cr19,%r9 /* Get iir */13551356/* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.1357Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */13581359/* Checks for fdc,fdce,pdc,"fic,4f" only */1360ldi 0x280,%r161361and %r9,%r16,%r171362cmpb,<>,n %r16,%r17,nadtlb_probe_check1363bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */1364BL get_register,%r251365extrw,u %r9,15,5,%r8 /* Get index register # */1366cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */1367copy %r1,%r241368BL get_register,%r251369extrw,u %r9,10,5,%r8 /* Get base register # */1370cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */1371BL set_register,%r251372add,l %r1,%r24,%r1 /* doesn't affect c/b bits */13731374nadtlb_nullify:1375mfctl %ipsw,%r81376ldil L%PSW_N,%r91377or %r8,%r9,%r8 /* Set PSW_N */1378mtctl %r8,%ipsw13791380rfir1381nop13821383/*1384When there is no translation for the probe address then we1385must nullify the insn and return zero in the target regsiter.1386This will indicate to the calling code that it does not have1387write/read privileges to this address.13881389This should technically work for prober and probew in PA 1.1,1390and also probe,r and probe,w in PA 2.013911392WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!1393THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.13941395*/1396nadtlb_probe_check:1397ldi 0x80,%r161398and %r9,%r16,%r171399cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/1400BL get_register,%r25 /* Find the target register */1401extrw,u %r9,31,5,%r8 /* Get target register */1402cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */1403BL set_register,%r251404copy %r0,%r1 /* Write zero to target register */1405b nadtlb_nullify /* Nullify return insn */1406nop140714081409#ifdef CONFIG_64BIT1410itlb_miss_20w:14111412/*1413* I miss is a little different, since we allow users to fault1414* on the gateway page which is in the kernel address space.1415*/14161417space_adjust spc,va,t01418get_pgd spc,ptp1419space_check spc,t0,itlb_fault14201421L3_ptep ptp,pte,t0,va,itlb_fault14221423update_ptep ptp,pte,t0,t114241425make_insert_tlb spc,pte,prot14261427iitlbt pte,prot14281429rfir1430nop14311432naitlb_miss_20w:14331434/*1435* I miss is a little different, since we allow users to fault1436* on the gateway page which is in the kernel address space.1437*/14381439space_adjust spc,va,t01440get_pgd spc,ptp1441space_check spc,t0,naitlb_fault14421443L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w14441445update_ptep ptp,pte,t0,t114461447make_insert_tlb spc,pte,prot14481449iitlbt pte,prot14501451rfir1452nop14531454naitlb_check_alias_20w:1455do_alias spc,t0,t1,va,pte,prot,naitlb_fault14561457iitlbt pte,prot14581459rfir1460nop14611462#else14631464itlb_miss_11:1465get_pgd spc,ptp14661467space_check spc,t0,itlb_fault14681469L2_ptep ptp,pte,t0,va,itlb_fault14701471update_ptep ptp,pte,t0,t114721473make_insert_tlb_11 spc,pte,prot14741475mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */1476mtsp spc,%sr114771478iitlba pte,(%sr1,va)1479iitlbp prot,(%sr1,va)14801481mtsp t0, %sr1 /* Restore sr1 */14821483rfir1484nop14851486naitlb_miss_11:1487get_pgd spc,ptp14881489space_check spc,t0,naitlb_fault14901491L2_ptep ptp,pte,t0,va,naitlb_check_alias_1114921493update_ptep ptp,pte,t0,t114941495make_insert_tlb_11 spc,pte,prot14961497mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */1498mtsp spc,%sr114991500iitlba pte,(%sr1,va)1501iitlbp prot,(%sr1,va)15021503mtsp t0, %sr1 /* Restore sr1 */15041505rfir1506nop15071508naitlb_check_alias_11:1509do_alias spc,t0,t1,va,pte,prot,itlb_fault15101511iitlba pte,(%sr0, va)1512iitlbp prot,(%sr0, va)15131514rfir1515nop151615171518itlb_miss_20:1519get_pgd spc,ptp15201521space_check spc,t0,itlb_fault15221523L2_ptep ptp,pte,t0,va,itlb_fault15241525update_ptep ptp,pte,t0,t115261527make_insert_tlb spc,pte,prot15281529f_extend pte,t015301531iitlbt pte,prot15321533rfir1534nop15351536naitlb_miss_20:1537get_pgd spc,ptp15381539space_check spc,t0,naitlb_fault15401541L2_ptep ptp,pte,t0,va,naitlb_check_alias_2015421543update_ptep ptp,pte,t0,t115441545make_insert_tlb spc,pte,prot15461547f_extend pte,t015481549iitlbt pte,prot15501551rfir1552nop15531554naitlb_check_alias_20:1555do_alias spc,t0,t1,va,pte,prot,naitlb_fault15561557iitlbt pte,prot15581559rfir1560nop15611562#endif15631564#ifdef CONFIG_64BIT15651566dbit_trap_20w:1567space_adjust spc,va,t01568get_pgd spc,ptp1569space_check spc,t0,dbit_fault15701571L3_ptep ptp,pte,t0,va,dbit_fault15721573#ifdef CONFIG_SMP1574cmpib,COND(=),n 0,spc,dbit_nolock_20w1575load32 PA(pa_dbit_lock),t015761577dbit_spin_20w:1578LDCW 0(t0),t11579cmpib,COND(=) 0,t1,dbit_spin_20w1580nop15811582dbit_nolock_20w:1583#endif1584update_dirty ptp,pte,t115851586make_insert_tlb spc,pte,prot15871588idtlbt pte,prot1589#ifdef CONFIG_SMP1590cmpib,COND(=),n 0,spc,dbit_nounlock_20w1591ldi 1,t11592stw t1,0(t0)15931594dbit_nounlock_20w:1595#endif15961597rfir1598nop1599#else16001601dbit_trap_11:16021603get_pgd spc,ptp16041605space_check spc,t0,dbit_fault16061607L2_ptep ptp,pte,t0,va,dbit_fault16081609#ifdef CONFIG_SMP1610cmpib,COND(=),n 0,spc,dbit_nolock_111611load32 PA(pa_dbit_lock),t016121613dbit_spin_11:1614LDCW 0(t0),t11615cmpib,= 0,t1,dbit_spin_111616nop16171618dbit_nolock_11:1619#endif1620update_dirty ptp,pte,t116211622make_insert_tlb_11 spc,pte,prot16231624mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */1625mtsp spc,%sr116261627idtlba pte,(%sr1,va)1628idtlbp prot,(%sr1,va)16291630mtsp t1, %sr1 /* Restore sr1 */1631#ifdef CONFIG_SMP1632cmpib,COND(=),n 0,spc,dbit_nounlock_111633ldi 1,t11634stw t1,0(t0)16351636dbit_nounlock_11:1637#endif16381639rfir1640nop16411642dbit_trap_20:1643get_pgd spc,ptp16441645space_check spc,t0,dbit_fault16461647L2_ptep ptp,pte,t0,va,dbit_fault16481649#ifdef CONFIG_SMP1650cmpib,COND(=),n 0,spc,dbit_nolock_201651load32 PA(pa_dbit_lock),t016521653dbit_spin_20:1654LDCW 0(t0),t11655cmpib,= 0,t1,dbit_spin_201656nop16571658dbit_nolock_20:1659#endif1660update_dirty ptp,pte,t116611662make_insert_tlb spc,pte,prot16631664f_extend pte,t116651666idtlbt pte,prot16671668#ifdef CONFIG_SMP1669cmpib,COND(=),n 0,spc,dbit_nounlock_201670ldi 1,t11671stw t1,0(t0)16721673dbit_nounlock_20:1674#endif16751676rfir1677nop1678#endif16791680.import handle_interruption,code16811682kernel_bad_space:1683b intr_save1684ldi 31,%r8 /* Use an unused code */16851686dbit_fault:1687b intr_save1688ldi 20,%r816891690itlb_fault:1691b intr_save1692ldi 6,%r816931694nadtlb_fault:1695b intr_save1696ldi 17,%r816971698naitlb_fault:1699b intr_save1700ldi 16,%r817011702dtlb_fault:1703b intr_save1704ldi 15,%r817051706/* Register saving semantics for system calls:17071708%r1 clobbered by system call macro in userspace1709%r2 saved in PT_REGS by gateway page1710%r3 - %r18 preserved by C code (saved by signal code)1711%r19 - %r20 saved in PT_REGS by gateway page1712%r21 - %r22 non-standard syscall args1713stored in kernel stack by gateway page1714%r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page1715%r27 - %r30 saved in PT_REGS by gateway page1716%r31 syscall return pointer1717*/17181719/* Floating point registers (FIXME: what do we do with these?)17201721%fr0 - %fr3 status/exception, not preserved1722%fr4 - %fr7 arguments1723%fr8 - %fr11 not preserved by C code1724%fr12 - %fr21 preserved by C code1725%fr22 - %fr31 not preserved by C code1726*/17271728.macro reg_save regs1729STREG %r3, PT_GR3(\regs)1730STREG %r4, PT_GR4(\regs)1731STREG %r5, PT_GR5(\regs)1732STREG %r6, PT_GR6(\regs)1733STREG %r7, PT_GR7(\regs)1734STREG %r8, PT_GR8(\regs)1735STREG %r9, PT_GR9(\regs)1736STREG %r10,PT_GR10(\regs)1737STREG %r11,PT_GR11(\regs)1738STREG %r12,PT_GR12(\regs)1739STREG %r13,PT_GR13(\regs)1740STREG %r14,PT_GR14(\regs)1741STREG %r15,PT_GR15(\regs)1742STREG %r16,PT_GR16(\regs)1743STREG %r17,PT_GR17(\regs)1744STREG %r18,PT_GR18(\regs)1745.endm17461747.macro reg_restore regs1748LDREG PT_GR3(\regs), %r31749LDREG PT_GR4(\regs), %r41750LDREG PT_GR5(\regs), %r51751LDREG PT_GR6(\regs), %r61752LDREG PT_GR7(\regs), %r71753LDREG PT_GR8(\regs), %r81754LDREG PT_GR9(\regs), %r91755LDREG PT_GR10(\regs),%r101756LDREG PT_GR11(\regs),%r111757LDREG PT_GR12(\regs),%r121758LDREG PT_GR13(\regs),%r131759LDREG PT_GR14(\regs),%r141760LDREG PT_GR15(\regs),%r151761LDREG PT_GR16(\regs),%r161762LDREG PT_GR17(\regs),%r171763LDREG PT_GR18(\regs),%r181764.endm17651766ENTRY(sys_fork_wrapper)1767LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r11768ldo TASK_REGS(%r1),%r11769reg_save %r11770mfctl %cr27, %r31771STREG %r3, PT_CR27(%r1)17721773STREG %r2,-RP_OFFSET(%r30)1774ldo FRAME_SIZE(%r30),%r301775#ifdef CONFIG_64BIT1776ldo -16(%r30),%r29 /* Reference param save area */1777#endif17781779/* These are call-clobbered registers and therefore1780also syscall-clobbered (we hope). */1781STREG %r2,PT_GR19(%r1) /* save for child */1782STREG %r30,PT_GR21(%r1)17831784LDREG PT_GR30(%r1),%r251785copy %r1,%r241786BL sys_clone,%r21787ldi SIGCHLD,%r2617881789LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r21790wrapper_exit:1791ldo -FRAME_SIZE(%r30),%r30 /* get the stackframe */1792LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r11793ldo TASK_REGS(%r1),%r1 /* get pt regs */17941795LDREG PT_CR27(%r1), %r31796mtctl %r3, %cr271797reg_restore %r117981799/* strace expects syscall # to be preserved in r20 */1800ldi __NR_fork,%r201801bv %r0(%r2)1802STREG %r20,PT_GR20(%r1)1803ENDPROC(sys_fork_wrapper)18041805/* Set the return value for the child */1806ENTRY(child_return)1807BL schedule_tail, %r21808nop18091810LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE-FRAME_SIZE(%r30), %r11811LDREG TASK_PT_GR19(%r1),%r21812b wrapper_exit1813copy %r0,%r281814ENDPROC(child_return)181518161817ENTRY(sys_clone_wrapper)1818LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r11819ldo TASK_REGS(%r1),%r1 /* get pt regs */1820reg_save %r11821mfctl %cr27, %r31822STREG %r3, PT_CR27(%r1)18231824STREG %r2,-RP_OFFSET(%r30)1825ldo FRAME_SIZE(%r30),%r301826#ifdef CONFIG_64BIT1827ldo -16(%r30),%r29 /* Reference param save area */1828#endif18291830/* WARNING - Clobbers r19 and r21, userspace must save these! */1831STREG %r2,PT_GR19(%r1) /* save for child */1832STREG %r30,PT_GR21(%r1)1833BL sys_clone,%r21834copy %r1,%r2418351836b wrapper_exit1837LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r21838ENDPROC(sys_clone_wrapper)183918401841ENTRY(sys_vfork_wrapper)1842LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r11843ldo TASK_REGS(%r1),%r1 /* get pt regs */1844reg_save %r11845mfctl %cr27, %r31846STREG %r3, PT_CR27(%r1)18471848STREG %r2,-RP_OFFSET(%r30)1849ldo FRAME_SIZE(%r30),%r301850#ifdef CONFIG_64BIT1851ldo -16(%r30),%r29 /* Reference param save area */1852#endif18531854STREG %r2,PT_GR19(%r1) /* save for child */1855STREG %r30,PT_GR21(%r1)18561857BL sys_vfork,%r21858copy %r1,%r2618591860b wrapper_exit1861LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r21862ENDPROC(sys_vfork_wrapper)186318641865.macro execve_wrapper execve1866LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r11867ldo TASK_REGS(%r1),%r1 /* get pt regs */18681869/*1870* Do we need to save/restore r3-r18 here?1871* I don't think so. why would new thread need old1872* threads registers?1873*/18741875/* %arg0 - %arg3 are already saved for us. */18761877STREG %r2,-RP_OFFSET(%r30)1878ldo FRAME_SIZE(%r30),%r301879#ifdef CONFIG_64BIT1880ldo -16(%r30),%r29 /* Reference param save area */1881#endif1882BL \execve,%r21883copy %r1,%arg018841885ldo -FRAME_SIZE(%r30),%r301886LDREG -RP_OFFSET(%r30),%r218871888/* If exec succeeded we need to load the args */18891890ldo -1024(%r0),%r11891cmpb,>>= %r28,%r1,error_\execve1892copy %r2,%r1918931894error_\execve:1895bv %r0(%r19)1896nop1897.endm18981899.import sys_execve1900ENTRY(sys_execve_wrapper)1901execve_wrapper sys_execve1902ENDPROC(sys_execve_wrapper)19031904#ifdef CONFIG_64BIT1905.import sys32_execve1906ENTRY(sys32_execve_wrapper)1907execve_wrapper sys32_execve1908ENDPROC(sys32_execve_wrapper)1909#endif19101911ENTRY(sys_rt_sigreturn_wrapper)1912LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r261913ldo TASK_REGS(%r26),%r26 /* get pt regs */1914/* Don't save regs, we are going to restore them from sigcontext. */1915STREG %r2, -RP_OFFSET(%r30)1916#ifdef CONFIG_64BIT1917ldo FRAME_SIZE(%r30), %r301918BL sys_rt_sigreturn,%r21919ldo -16(%r30),%r29 /* Reference param save area */1920#else1921BL sys_rt_sigreturn,%r21922ldo FRAME_SIZE(%r30), %r301923#endif19241925ldo -FRAME_SIZE(%r30), %r301926LDREG -RP_OFFSET(%r30), %r219271928/* FIXME: I think we need to restore a few more things here. */1929LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r11930ldo TASK_REGS(%r1),%r1 /* get pt regs */1931reg_restore %r119321933/* If the signal was received while the process was blocked on a1934* syscall, then r2 will take us to syscall_exit; otherwise r2 will1935* take us to syscall_exit_rfi and on to intr_return.1936*/1937bv %r0(%r2)1938LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */1939ENDPROC(sys_rt_sigreturn_wrapper)19401941ENTRY(sys_sigaltstack_wrapper)1942/* Get the user stack pointer */1943LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r11944ldo TASK_REGS(%r1),%r24 /* get pt regs */1945LDREG TASK_PT_GR30(%r24),%r241946STREG %r2, -RP_OFFSET(%r30)1947#ifdef CONFIG_64BIT1948ldo FRAME_SIZE(%r30), %r301949BL do_sigaltstack,%r21950ldo -16(%r30),%r29 /* Reference param save area */1951#else1952BL do_sigaltstack,%r21953ldo FRAME_SIZE(%r30), %r301954#endif19551956ldo -FRAME_SIZE(%r30), %r301957LDREG -RP_OFFSET(%r30), %r21958bv %r0(%r2)1959nop1960ENDPROC(sys_sigaltstack_wrapper)19611962#ifdef CONFIG_64BIT1963ENTRY(sys32_sigaltstack_wrapper)1964/* Get the user stack pointer */1965LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r241966LDREG TASK_PT_GR30(%r24),%r241967STREG %r2, -RP_OFFSET(%r30)1968ldo FRAME_SIZE(%r30), %r301969BL do_sigaltstack32,%r21970ldo -16(%r30),%r29 /* Reference param save area */19711972ldo -FRAME_SIZE(%r30), %r301973LDREG -RP_OFFSET(%r30), %r21974bv %r0(%r2)1975nop1976ENDPROC(sys32_sigaltstack_wrapper)1977#endif19781979ENTRY(syscall_exit)1980/* NOTE: HP-UX syscalls also come through here1981* after hpux_syscall_exit fixes up return1982* values. */19831984/* NOTE: Not all syscalls exit this way. rt_sigreturn will exit1985* via syscall_exit_rfi if the signal was received while the process1986* was running.1987*/19881989/* save return value now */19901991mfctl %cr30, %r11992LDREG TI_TASK(%r1),%r11993STREG %r28,TASK_PT_GR28(%r1)19941995#ifdef CONFIG_HPUX1996/* <linux/personality.h> cannot be easily included */1997#define PER_HPUX 0x101998ldw TASK_PERSONALITY(%r1),%r1919992000/* We can't use "CMPIB<> PER_HPUX" since "im5" field is sign extended */2001ldo -PER_HPUX(%r19), %r192002cmpib,COND(<>),n 0,%r19,1f20032004/* Save other hpux returns if personality is PER_HPUX */2005STREG %r22,TASK_PT_GR22(%r1)2006STREG %r29,TASK_PT_GR29(%r1)20071:20082009#endif /* CONFIG_HPUX */20102011/* Seems to me that dp could be wrong here, if the syscall involved2012* calling a module, and nothing got round to restoring dp on return.2013*/2014loadgp20152016syscall_check_resched:20172018/* check for reschedule */20192020LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */2021bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */20222023.import do_signal,code2024syscall_check_sig:2025LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r192026ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r262027and,COND(<>) %r19, %r26, %r02028b,n syscall_restore /* skip past if we've nothing to do */20292030syscall_do_signal:2031/* Save callee-save registers (for sigcontext).2032* FIXME: After this point the process structure should be2033* consistent with all the relevant state of the process2034* before the syscall. We need to verify this.2035*/2036LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r12037ldo TASK_REGS(%r1), %r26 /* struct pt_regs *regs */2038reg_save %r2620392040#ifdef CONFIG_64BIT2041ldo -16(%r30),%r29 /* Reference param save area */2042#endif20432044BL do_notify_resume,%r22045ldi 1, %r25 /* long in_syscall = 1 */20462047LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r12048ldo TASK_REGS(%r1), %r20 /* reload pt_regs */2049reg_restore %r2020502051b,n syscall_check_sig20522053syscall_restore:2054LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r120552056/* Are we being ptraced? */2057ldw TASK_FLAGS(%r1),%r192058ldi (_TIF_SINGLESTEP|_TIF_BLOCKSTEP),%r22059and,COND(=) %r19,%r2,%r02060b,n syscall_restore_rfi20612062ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */2063rest_fp %r1920642065LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */2066mtsar %r1920672068LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */2069LDREG TASK_PT_GR19(%r1),%r192070LDREG TASK_PT_GR20(%r1),%r202071LDREG TASK_PT_GR21(%r1),%r212072LDREG TASK_PT_GR22(%r1),%r222073LDREG TASK_PT_GR23(%r1),%r232074LDREG TASK_PT_GR24(%r1),%r242075LDREG TASK_PT_GR25(%r1),%r252076LDREG TASK_PT_GR26(%r1),%r262077LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */2078LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */2079LDREG TASK_PT_GR29(%r1),%r292080LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */20812082/* NOTE: We use rsm/ssm pair to make this operation atomic */2083LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */2084rsm PSW_SM_I, %r02085copy %r1,%r30 /* Restore user sp */2086mfsp %sr3,%r1 /* Get user space id */2087mtsp %r1,%sr7 /* Restore sr7 */2088ssm PSW_SM_I, %r020892090/* Set sr2 to zero for userspace syscalls to work. */2091mtsp %r0,%sr22092mtsp %r1,%sr4 /* Restore sr4 */2093mtsp %r1,%sr5 /* Restore sr5 */2094mtsp %r1,%sr6 /* Restore sr6 */20952096depi 3,31,2,%r31 /* ensure return to user mode. */20972098#ifdef CONFIG_64BIT2099/* decide whether to reset the wide mode bit2100*2101* For a syscall, the W bit is stored in the lowest bit2102* of sp. Extract it and reset W if it is zero */2103extrd,u,*<> %r30,63,1,%r12104rsm PSW_SM_W, %r02105/* now reset the lowest bit of sp if it was set */2106xor %r30,%r1,%r302107#endif2108be,n 0(%sr3,%r31) /* return to user space */21092110/* We have to return via an RFI, so that PSW T and R bits can be set2111* appropriately.2112* This sets up pt_regs so we can return via intr_restore, which is not2113* the most efficient way of doing things, but it works.2114*/2115syscall_restore_rfi:2116ldo -1(%r0),%r2 /* Set recovery cntr to -1 */2117mtctl %r2,%cr0 /* for immediate trap */2118LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */2119ldi 0x0b,%r20 /* Create new PSW */2120depi -1,13,1,%r20 /* C, Q, D, and I bits */21212122/* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are2123* set in thread_info.h and converted to PA bitmap2124* numbers in asm-offsets.c */21252126/* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */2127extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r02128depi -1,27,1,%r20 /* R bit */21292130/* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */2131extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r02132depi -1,7,1,%r20 /* T bit */21332134STREG %r20,TASK_PT_PSW(%r1)21352136/* Always store space registers, since sr3 can be changed (e.g. fork) */21372138mfsp %sr3,%r252139STREG %r25,TASK_PT_SR3(%r1)2140STREG %r25,TASK_PT_SR4(%r1)2141STREG %r25,TASK_PT_SR5(%r1)2142STREG %r25,TASK_PT_SR6(%r1)2143STREG %r25,TASK_PT_SR7(%r1)2144STREG %r25,TASK_PT_IASQ0(%r1)2145STREG %r25,TASK_PT_IASQ1(%r1)21462147/* XXX W bit??? */2148/* Now if old D bit is clear, it means we didn't save all registers2149* on syscall entry, so do that now. This only happens on TRACEME2150* calls, or if someone attached to us while we were on a syscall.2151* We could make this more efficient by not saving r3-r18, but2152* then we wouldn't be able to use the common intr_restore path.2153* It is only for traced processes anyway, so performance is not2154* an issue.2155*/2156bb,< %r2,30,pt_regs_ok /* Branch if D set */2157ldo TASK_REGS(%r1),%r252158reg_save %r25 /* Save r3 to r18 */21592160/* Save the current sr */2161mfsp %sr0,%r22162STREG %r2,TASK_PT_SR0(%r1)21632164/* Save the scratch sr */2165mfsp %sr1,%r22166STREG %r2,TASK_PT_SR1(%r1)21672168/* sr2 should be set to zero for userspace syscalls */2169STREG %r0,TASK_PT_SR2(%r1)21702171pt_regs_ok:2172LDREG TASK_PT_GR31(%r1),%r22173depi 3,31,2,%r2 /* ensure return to user mode. */2174STREG %r2,TASK_PT_IAOQ0(%r1)2175ldo 4(%r2),%r22176STREG %r2,TASK_PT_IAOQ1(%r1)2177copy %r25,%r162178b intr_restore2179nop21802181.import schedule,code2182syscall_do_resched:2183BL schedule,%r22184#ifdef CONFIG_64BIT2185ldo -16(%r30),%r29 /* Reference param save area */2186#else2187nop2188#endif2189b syscall_check_resched /* if resched, we start over again */2190nop2191ENDPROC(syscall_exit)219221932194#ifdef CONFIG_FUNCTION_TRACER2195.import ftrace_function_trampoline,code2196ENTRY(_mcount)2197copy %r3, %arg22198b ftrace_function_trampoline2199nop2200ENDPROC(_mcount)22012202ENTRY(return_to_handler)2203load32 return_trampoline, %rp2204copy %ret0, %arg02205copy %ret1, %arg12206b ftrace_return_to_handler2207nop2208return_trampoline:2209copy %ret0, %rp2210copy %r23, %ret02211copy %r24, %ret122122213.globl ftrace_stub2214ftrace_stub:2215bv %r0(%rp)2216nop2217ENDPROC(return_to_handler)2218#endif /* CONFIG_FUNCTION_TRACER */221922202221get_register:2222/*2223* get_register is used by the non access tlb miss handlers to2224* copy the value of the general register specified in r8 into2225* r1. This routine can't be used for shadowed registers, since2226* the rfir will restore the original value. So, for the shadowed2227* registers we put a -1 into r1 to indicate that the register2228* should not be used (the register being copied could also have2229* a -1 in it, but that is OK, it just means that we will have2230* to use the slow path instead).2231*/2232blr %r8,%r02233nop2234bv %r0(%r25) /* r0 */2235copy %r0,%r12236bv %r0(%r25) /* r1 - shadowed */2237ldi -1,%r12238bv %r0(%r25) /* r2 */2239copy %r2,%r12240bv %r0(%r25) /* r3 */2241copy %r3,%r12242bv %r0(%r25) /* r4 */2243copy %r4,%r12244bv %r0(%r25) /* r5 */2245copy %r5,%r12246bv %r0(%r25) /* r6 */2247copy %r6,%r12248bv %r0(%r25) /* r7 */2249copy %r7,%r12250bv %r0(%r25) /* r8 - shadowed */2251ldi -1,%r12252bv %r0(%r25) /* r9 - shadowed */2253ldi -1,%r12254bv %r0(%r25) /* r10 */2255copy %r10,%r12256bv %r0(%r25) /* r11 */2257copy %r11,%r12258bv %r0(%r25) /* r12 */2259copy %r12,%r12260bv %r0(%r25) /* r13 */2261copy %r13,%r12262bv %r0(%r25) /* r14 */2263copy %r14,%r12264bv %r0(%r25) /* r15 */2265copy %r15,%r12266bv %r0(%r25) /* r16 - shadowed */2267ldi -1,%r12268bv %r0(%r25) /* r17 - shadowed */2269ldi -1,%r12270bv %r0(%r25) /* r18 */2271copy %r18,%r12272bv %r0(%r25) /* r19 */2273copy %r19,%r12274bv %r0(%r25) /* r20 */2275copy %r20,%r12276bv %r0(%r25) /* r21 */2277copy %r21,%r12278bv %r0(%r25) /* r22 */2279copy %r22,%r12280bv %r0(%r25) /* r23 */2281copy %r23,%r12282bv %r0(%r25) /* r24 - shadowed */2283ldi -1,%r12284bv %r0(%r25) /* r25 - shadowed */2285ldi -1,%r12286bv %r0(%r25) /* r26 */2287copy %r26,%r12288bv %r0(%r25) /* r27 */2289copy %r27,%r12290bv %r0(%r25) /* r28 */2291copy %r28,%r12292bv %r0(%r25) /* r29 */2293copy %r29,%r12294bv %r0(%r25) /* r30 */2295copy %r30,%r12296bv %r0(%r25) /* r31 */2297copy %r31,%r1229822992300set_register:2301/*2302* set_register is used by the non access tlb miss handlers to2303* copy the value of r1 into the general register specified in2304* r8.2305*/2306blr %r8,%r02307nop2308bv %r0(%r25) /* r0 (silly, but it is a place holder) */2309copy %r1,%r02310bv %r0(%r25) /* r1 */2311copy %r1,%r12312bv %r0(%r25) /* r2 */2313copy %r1,%r22314bv %r0(%r25) /* r3 */2315copy %r1,%r32316bv %r0(%r25) /* r4 */2317copy %r1,%r42318bv %r0(%r25) /* r5 */2319copy %r1,%r52320bv %r0(%r25) /* r6 */2321copy %r1,%r62322bv %r0(%r25) /* r7 */2323copy %r1,%r72324bv %r0(%r25) /* r8 */2325copy %r1,%r82326bv %r0(%r25) /* r9 */2327copy %r1,%r92328bv %r0(%r25) /* r10 */2329copy %r1,%r102330bv %r0(%r25) /* r11 */2331copy %r1,%r112332bv %r0(%r25) /* r12 */2333copy %r1,%r122334bv %r0(%r25) /* r13 */2335copy %r1,%r132336bv %r0(%r25) /* r14 */2337copy %r1,%r142338bv %r0(%r25) /* r15 */2339copy %r1,%r152340bv %r0(%r25) /* r16 */2341copy %r1,%r162342bv %r0(%r25) /* r17 */2343copy %r1,%r172344bv %r0(%r25) /* r18 */2345copy %r1,%r182346bv %r0(%r25) /* r19 */2347copy %r1,%r192348bv %r0(%r25) /* r20 */2349copy %r1,%r202350bv %r0(%r25) /* r21 */2351copy %r1,%r212352bv %r0(%r25) /* r22 */2353copy %r1,%r222354bv %r0(%r25) /* r23 */2355copy %r1,%r232356bv %r0(%r25) /* r24 */2357copy %r1,%r242358bv %r0(%r25) /* r25 */2359copy %r1,%r252360bv %r0(%r25) /* r26 */2361copy %r1,%r262362bv %r0(%r25) /* r27 */2363copy %r1,%r272364bv %r0(%r25) /* r28 */2365copy %r1,%r282366bv %r0(%r25) /* r29 */2367copy %r1,%r292368bv %r0(%r25) /* r30 */2369copy %r1,%r302370bv %r0(%r25) /* r31 */2371copy %r1,%r312372237323742375