.macro loadsp, rb, tmp
.endm
.macro writeb, ch, rb
mcr p14, 0, \ch, c0, c5, 0
.endm
.macro loadsp, rb, tmp
.endm
.macro writeb, ch, rb
mcr p14, 0, \ch, c8, c0, 0
.endm
.macro loadsp, rb, tmp
.endm
.macro writeb, ch, rb
mcr p14, 0, \ch, c1, c0, 0
.endm
.macro writeb, ch, rb
senduart \ch, \rb
.endm
.macro loadsp, rb, tmp
mov \rb,
add \rb, \rb,
add \rb, \rb,
.endm
.macro loadsp, rb, tmp
mov \rb,
add \rb, \rb,
.endm
.macro loadsp, rb, tmp
addruart \rb, \tmp
.endm
.macro kputc,val
mov r0, \val
bl putc
.endm
.macro kphex,val,len
mov r0, \val
mov r1,
bl phex
.endm
.macro debug_reloc_start
kputc
kphex r6, 8
kputc
kphex r7, 8
kputc
mrc p15, 0, r0, c1, c0
kphex r0, 8
kputc
kphex r5, 8
kputc
kphex r9, 8
kputc
kphex r4, 8
kputc
.endm
.macro debug_reloc_end
kphex r5, 8
kputc
mov r0, r4
bl memdump
.endm
.section ".start",
.align
.arm @ Always enter in ARM state
start:
.type start,
.rept 7
mov r0, r0
.endr
ARM( mov r0, r0 )
ARM( b 1f )
THUMB( adr r12, BSYM(1f) )
THUMB( bx r12 )
.word 0x016f2818 @ Magic numbers to help the loader
.word start @ absolute load/run zImage address
.word _edata @ zImage end address
THUMB( .thumb )
1: mov r7, r1 @ save architecture ID
mov r8, r2 @ save atags pointer
mrs r2, cpsr @ get current mode
tst r2,
bne not_angel
mov r0,
ARM( swi 0x123456 ) @ angel_SWI_ARM
THUMB( svc 0xab ) @ angel_SWI_THUMB
not_angel:
mrs r2, cpsr @ turn off interrupts to
orr r2, r2,
msr cpsr_c, r2
teqp pc,
.text
@ determine final kernel image address
mov r4, pc
and r4, r4,
add r4, r4,
ldr r4, =zreladdr
bl cache_on
restart: adr r0, LC0
ldmia r0, {r1, r2, r3, r6, r10, r11, r12}
ldr sp, [r0,
sub r0, r0, r1 @ calculate the delta offset
add r6, r6, r0 @ _edata
add r10, r10, r0 @ inflated kernel size location
ldrb r9, [r10,
ldrb lr, [r10,
orr r9, r9, lr, lsl
ldrb lr, [r10,
ldrb r10, [r10,
orr r9, r9, lr, lsl
orr r9, r9, r10, lsl
add sp, sp, r0
add r10, sp,
mov r10, r6
add r10, r10,
cmp r4, r10
bhs wont_overwrite
add r10, r4, r9
ARM( cmp r10, pc )
THUMB( mov lr, pc )
THUMB( cmp r10, lr )
bls wont_overwrite
add r10, r10,
bic r10, r10,
adr r5, restart
bic r5, r5,
sub r9, r6, r5 @ size to copy
add r9, r9,
bic r9, r9,
add r6, r9, r5
add r9, r9, r10
1: ldmdb r6!, {r0 - r3, r10 - r12, lr}
cmp r6, r5
stmdb r9!, {r0 - r3, r10 - r12, lr}
bhi 1b
sub r6, r9, r6
add sp, sp, r6
bl cache_clean_flush
adr r0, BSYM(restart)
add r0, r0, r6
mov pc, r0
wont_overwrite:
teq r0,
beq not_relocated
add r11, r11, r0
add r12, r12, r0
add r2, r2, r0
add r3, r3, r0
1: ldr r1, [r11,
add r1, r1, r0 @ table. This fixes up the
str r1, [r11],
cmp r11, r12
blo 1b
1: ldr r1, [r11,
cmp r1, r2 @ entry < bss_start ||
cmphs r3, r1 @ _end < entry
addlo r1, r1, r0 @ table. This fixes up the
str r1, [r11],
cmp r11, r12
blo 1b
not_relocated: mov r0,
1: str r0, [r2],
str r0, [r2],
str r0, [r2],
str r0, [r2],
cmp r2, r3
blo 1b
mov r0, r4
mov r1, sp @ malloc space above stack
add r2, sp,
mov r3, r7
bl decompress_kernel
bl cache_clean_flush
bl cache_off
mov r0,
mov r1, r7 @ restore architecture number
mov r2, r8 @ restore atags pointer
mov pc, r4 @ call kernel
.align 2
.type LC0,
LC0: .word LC0 @ r1
.word __bss_start @ r2
.word _end @ r3
.word _edata @ r6
.word input_data_end - 4 @ r10 (inflated size location)
.word _got_start @ r11
.word _got_end @ ip
.word .L_user_stack_end @ sp
.size LC0, . - LC0
.globl params
params: ldr r0, =0x10000100 @ params_phys for RPC
mov pc, lr
.ltorg
.align
.align 5
cache_on: mov r3,
b call_cache_fn
__armv4_mpu_cache_on:
mov r0,
mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
mcr p15, 0, r0, c6, c7, 1
mov r0,
mcr p15, 0, r0, c2, c0, 0 @ D-cache on
mcr p15, 0, r0, c2, c0, 1 @ I-cache on
mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
mov r0,
mcr p15, 0, r0, c5, c0, 1 @ I-access permission
mcr p15, 0, r0, c5, c0, 0 @ D-access permission
mov r0,
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
mrc p15, 0, r0, c1, c0, 0 @ read control reg
@ ...I .... ..D. WC.M
orr r0, r0,
orr r0, r0,
mcr p15, 0, r0, c1, c0, 0 @ write control reg
mov r0,
mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
mov pc, lr
__armv3_mpu_cache_on:
mov r0,
mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
mov r0,
mcr p15, 0, r0, c2, c0, 0 @ cache on
mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
mov r0,
mcr p15, 0, r0, c5, c0, 0 @ access permission
mov r0,
mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
mrc p15, 0, r0, c1, c0, 0 @ read control reg
@ .... .... .... WC.M
orr r0, r0,
mov r0,
mcr p15, 0, r0, c1, c0, 0 @ write control reg
mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
mov pc, lr
__setup_mmu: sub r3, r4,
bic r3, r3,
bic r3, r3,
mov r0, r3
mov r9, r0, lsr
mov r9, r9, lsl
add r10, r9,
mov r1,
orr r1, r1,
add r2, r3,
1: cmp r1, r9 @ if virt > start of RAM
orrhs r1, r1,
orrhs r1, r1,
cmp r1, r10 @ if virt > end of RAM
bichs r1, r1,
str r1, [r0],
add r1, r1,
teq r0, r2
bne 1b
mov r1,
orr r1, r1,
mov r2, pc
mov r2, r2, lsr
orr r1, r1, r2, lsl
add r0, r3, r2, lsl
str r1, [r0],
add r1, r1,
str r1, [r0]
mov pc, lr
ENDPROC(__setup_mmu)
__arm926ejs_mmu_cache_on:
mov r0,
mcr p15, 7, r0, c15, c0, 0
__armv4_mmu_cache_on:
mov r12, lr
bl __setup_mmu
mov r0,
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
mrc p15, 0, r0, c1, c0, 0 @ read control reg
orr r0, r0,
orr r0, r0,
orr r0, r0,
bl __common_mmu_cache_on
mov r0,
mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
mov pc, r12
__armv7_mmu_cache_on:
mov r12, lr
mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0
tst r11,
blne __setup_mmu
mov r0,
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
tst r11,
mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
mrc p15, 0, r0, c1, c0, 0 @ read control reg
orr r0, r0,
orr r0, r0,
orr r0, r0,
orrne r0, r0,
movne r1,
mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
mcr p15, 0, r0, c1, c0, 0 @ load control register
mrc p15, 0, r0, c1, c0, 0 @ and read it back
mov r0,
mcr p15, 0, r0, c7, c5, 4 @ ISB
mov pc, r12
__fa526_cache_on:
mov r12, lr
bl __setup_mmu
mov r0,
mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
mrc p15, 0, r0, c1, c0, 0 @ read control reg
orr r0, r0,
bl __common_mmu_cache_on
mov r0,
mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
mov pc, r12
__arm6_mmu_cache_on:
mov r12, lr
bl __setup_mmu
mov r0,
mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
mov r0,
bl __common_mmu_cache_on
mov r0,
mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
mov pc, r12
__common_mmu_cache_on:
orr r0, r0,
mov r1,
mcr p15, 0, r3, c2, c0, 0 @ load page table pointer
mcr p15, 0, r1, c3, c0, 0 @ load domain access control
b 1f
.align 5 @ cache line aligned
1: mcr p15, 0, r0, c1, c0, 0 @ load control register
mrc p15, 0, r0, c1, c0, 0 @ and read it back to
sub pc, lr, r0, lsr
call_cache_fn: adr r12, proc_types
mrc p15, 0, r9, c0, c0 @ get processor ID
ldr r9, =CONFIG_PROCESSOR_ID
1: ldr r1, [r12,
ldr r2, [r12,
eor r1, r1, r9 @ (real ^ match)
tst r1, r2 @ & mask
ARM( addeq pc, r12, r3 ) @ call cache function
THUMB( addeq r12, r3 )
THUMB( moveq pc, r12 ) @ call cache function
add r12, r12,
b 1b
.align 2
.type proc_types,
proc_types:
.word 0x41560600 @ ARM6/610
.word 0xffffffe0
W(b) __arm6_mmu_cache_off @ works, but slow
W(b) __arm6_mmu_cache_off
mov pc, lr
THUMB( nop )
@ b __arm6_mmu_cache_on @ untested
@ b __arm6_mmu_cache_off
@ b __armv3_mmu_cache_flush
.word 0x00000000 @ old ARM ID
.word 0x0000f000
mov pc, lr
THUMB( nop )
mov pc, lr
THUMB( nop )
mov pc, lr
THUMB( nop )
.word 0x41007000 @ ARM7/710
.word 0xfff8fe00
W(b) __arm7_mmu_cache_off
W(b) __arm7_mmu_cache_off
mov pc, lr
THUMB( nop )
.word 0x41807200 @ ARM720T (writethrough)
.word 0xffffff00
W(b) __armv4_mmu_cache_on
W(b) __armv4_mmu_cache_off
mov pc, lr
THUMB( nop )
.word 0x41007400 @ ARM74x
.word 0xff00ff00
W(b) __armv3_mpu_cache_on
W(b) __armv3_mpu_cache_off
W(b) __armv3_mpu_cache_flush
.word 0x41009400 @ ARM94x
.word 0xff00ff00
W(b) __armv4_mpu_cache_on
W(b) __armv4_mpu_cache_off
W(b) __armv4_mpu_cache_flush
.word 0x41069260 @ ARM926EJ-S (v5TEJ)
.word 0xff0ffff0
W(b) __arm926ejs_mmu_cache_on
W(b) __armv4_mmu_cache_off
W(b) __armv5tej_mmu_cache_flush
.word 0x00007000 @ ARM7 IDs
.word 0x0000f000
mov pc, lr
THUMB( nop )
mov pc, lr
THUMB( nop )
mov pc, lr
THUMB( nop )
@ Everything from here on will be the new ID system.
.word 0x4401a100 @ sa110 / sa1100
.word 0xffffffe0
W(b) __armv4_mmu_cache_on
W(b) __armv4_mmu_cache_off
W(b) __armv4_mmu_cache_flush
.word 0x6901b110 @ sa1110
.word 0xfffffff0
W(b) __armv4_mmu_cache_on
W(b) __armv4_mmu_cache_off
W(b) __armv4_mmu_cache_flush
.word 0x56056900
.word 0xffffff00 @ PXA9xx
W(b) __armv4_mmu_cache_on
W(b) __armv4_mmu_cache_off
W(b) __armv4_mmu_cache_flush
.word 0x56158000 @ PXA168
.word 0xfffff000
W(b) __armv4_mmu_cache_on
W(b) __armv4_mmu_cache_off
W(b) __armv5tej_mmu_cache_flush
.word 0x56050000 @ Feroceon
.word 0xff0f0000
W(b) __armv4_mmu_cache_on
W(b) __armv4_mmu_cache_off
W(b) __armv5tej_mmu_cache_flush
.long 0x41009260 @ Old Feroceon
.long 0xff00fff0
b __armv4_mmu_cache_on
b __armv4_mmu_cache_off
b __armv5tej_mmu_cache_flush
.word 0x66015261 @ FA526
.word 0xff01fff1
W(b) __fa526_cache_on
W(b) __armv4_mmu_cache_off
W(b) __fa526_cache_flush
@ These match on the architecture ID
.word 0x00020000 @ ARMv4T
.word 0x000f0000
W(b) __armv4_mmu_cache_on
W(b) __armv4_mmu_cache_off
W(b) __armv4_mmu_cache_flush
.word 0x00050000 @ ARMv5TE
.word 0x000f0000
W(b) __armv4_mmu_cache_on
W(b) __armv4_mmu_cache_off
W(b) __armv4_mmu_cache_flush
.word 0x00060000 @ ARMv5TEJ
.word 0x000f0000
W(b) __armv4_mmu_cache_on
W(b) __armv4_mmu_cache_off
W(b) __armv5tej_mmu_cache_flush
.word 0x0007b000 @ ARMv6
.word 0x000ff000
W(b) __armv4_mmu_cache_on
W(b) __armv4_mmu_cache_off
W(b) __armv6_mmu_cache_flush
.word 0x000f0000 @ new CPU Id
.word 0x000f0000
W(b) __armv7_mmu_cache_on
W(b) __armv7_mmu_cache_off
W(b) __armv7_mmu_cache_flush
.word 0 @ unrecognised type
.word 0
mov pc, lr
THUMB( nop )
mov pc, lr
THUMB( nop )
mov pc, lr
THUMB( nop )
.size proc_types, . - proc_types
.if (. - proc_types) % PROC_ENTRY_SIZE != 0
.error "The size of one or more proc_types entries is wrong."
.endif
.align 5
cache_off: mov r3,
b call_cache_fn
__armv4_mpu_cache_off:
mrc p15, 0, r0, c1, c0
bic r0, r0,
mcr p15, 0, r0, c1, c0 @ turn MPU and cache off
mov r0,
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache
mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache
mov pc, lr
__armv3_mpu_cache_off:
mrc p15, 0, r0, c1, c0
bic r0, r0,
mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off
mov r0,
mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
mov pc, lr
__armv4_mmu_cache_off:
mrc p15, 0, r0, c1, c0
bic r0, r0,
mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
mov r0,
mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4
mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4
mov pc, lr
__armv7_mmu_cache_off:
mrc p15, 0, r0, c1, c0
bic r0, r0,
bic r0, r0,
mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
mov r12, lr
bl __armv7_mmu_cache_flush
mov r0,
mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB
mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC
mcr p15, 0, r0, c7, c10, 4 @ DSB
mcr p15, 0, r0, c7, c5, 4 @ ISB
mov pc, r12
__arm6_mmu_cache_off:
mov r0,
b __armv3_mmu_cache_off
__arm7_mmu_cache_off:
mov r0,
b __armv3_mmu_cache_off
__armv3_mmu_cache_off:
mcr p15, 0, r0, c1, c0, 0 @ turn MMU and cache off
mov r0,
mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
mov pc, lr
.align 5
cache_clean_flush:
mov r3,
b call_cache_fn
__armv4_mpu_cache_flush:
mov r2,
mov r3,
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
mov r1,
1: orr r3, r1,
2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
subs r3, r3,
bcs 2b @ entries 63 to 0
subs r1, r1,
bcs 1b @ segments 7 to 0
teq r2,
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
__fa526_cache_flush:
mov r1,
mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache
mcr p15, 0, r1, c7, c5, 0 @ flush I cache
mcr p15, 0, r1, c7, c10, 4 @ drain WB
mov pc, lr
__armv6_mmu_cache_flush:
mov r1,
mcr p15, 0, r1, c7, c14, 0 @ clean+invalidate D
mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB
mcr p15, 0, r1, c7, c15, 0 @ clean+invalidate unified
mcr p15, 0, r1, c7, c10, 4 @ drain WB
mov pc, lr
__armv7_mmu_cache_flush:
mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1
tst r10,
mov r10,
beq hierarchical
mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D
b iflush
hierarchical:
mcr p15, 0, r10, c7, c10, 5 @ DMB
stmfd sp!, {r0-r7, r9-r11}
mrc p15, 1, r0, c0, c0, 1 @ read clidr
ands r3, r0,
mov r3, r3, lsr
beq finished @ if loc is 0, then no need to clean
mov r10,
loop1:
add r2, r10, r10, lsr
mov r1, r0, lsr r2 @ extract cache type bits from clidr
and r1, r1,
cmp r1,
blt skip @ skip if no cache, or just i-cache
mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
mcr p15, 0, r10, c7, c5, 4 @ isb to sych the new cssr&csidr
mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
and r2, r1,
add r2, r2,
ldr r4, =0x3ff
ands r4, r4, r1, lsr
clz r5, r4 @ find bit position of way size increment
ldr r7, =0x7fff
ands r7, r7, r1, lsr
loop2:
mov r9, r4 @ create working copy of max way size
loop3:
ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
THUMB( lsl r6, r9, r5 )
THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
THUMB( lsl r6, r7, r2 )
THUMB( orr r11, r11, r6 ) @ factor index number into r11
mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
subs r9, r9,
bge loop3
subs r7, r7,
bge loop2
skip:
add r10, r10,
cmp r3, r10
bgt loop1
finished:
ldmfd sp!, {r0-r7, r9-r11}
mov r10,
mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
iflush:
mcr p15, 0, r10, c7, c10, 4 @ DSB
mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB
mcr p15, 0, r10, c7, c10, 4 @ DSB
mcr p15, 0, r10, c7, c5, 4 @ ISB
mov pc, lr
__armv5tej_mmu_cache_flush:
1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache
bne 1b
mcr p15, 0, r0, c7, c5, 0 @ flush I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
__armv4_mmu_cache_flush:
mov r2,
mov r11,
mrc p15, 0, r3, c0, c0, 1 @ read cache type
teq r3, r9 @ cache ID register present?
beq no_cache_id
mov r1, r3, lsr
and r1, r1,
mov r2,
mov r2, r2, lsl r1 @ base dcache size *2
tst r3,
addne r2, r2, r2, lsr
mov r3, r3, lsr
and r3, r3,
mov r11,
mov r11, r11, lsl r3 @ cache line size in bytes
no_cache_id:
mov r1, pc
bic r1, r1,
add r2, r1, r2
1:
ARM( ldr r3, [r1], r11 ) @ s/w flush D cache
THUMB( ldr r3, [r1] ) @ s/w flush D cache
THUMB( add r1, r1, r11 )
teq r1, r2
bne 1b
mcr p15, 0, r1, c7, c5, 0 @ flush I cache
mcr p15, 0, r1, c7, c6, 0 @ flush D cache
mcr p15, 0, r1, c7, c10, 4 @ drain WB
mov pc, lr
__armv3_mmu_cache_flush:
__armv3_mpu_cache_flush:
mov r1,
mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3
mov pc, lr
.align 2
.type phexbuf,
phexbuf: .space 12
.size phexbuf, . - phexbuf
@ phex corrupts {r0, r1, r2, r3}
phex: adr r3, phexbuf
mov r2,
strb r2, [r3, r1]
1: subs r1, r1,
movmi r0, r3
bmi puts
and r2, r0,
mov r0, r0, lsr
cmp r2,
addge r2, r2,
add r2, r2,
strb r2, [r3, r1]
b 1b
@ puts corrupts {r0, r1, r2, r3}
puts: loadsp r3, r1
1: ldrb r2, [r0],
teq r2,
moveq pc, lr
2: writeb r2, r3
mov r1,
3: subs r1, r1,
bne 3b
teq r2,
moveq r2,
beq 2b
teq r0,
bne 1b
mov pc, lr
@ putc corrupts {r0, r1, r2, r3}
putc:
mov r2, r0
mov r0,
loadsp r3, r1
b 2b
@ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr}
memdump: mov r12, r0
mov r10, lr
mov r11,
2: mov r0, r11, lsl
add r0, r0, r12
mov r1,
bl phex
mov r0,
bl putc
1: mov r0,
bl putc
ldr r0, [r12, r11, lsl
mov r1,
bl phex
and r0, r11,
teq r0,
moveq r0,
bleq putc
and r0, r11,
add r11, r11,
teq r0,
bne 1b
mov r0,
bl putc
cmp r11,
blt 2b
mov pc, r10
.ltorg
reloc_code_end:
.align
.section ".stack", "aw", %nobits
.L_user_stack: .space 4096
.L_user_stack_end: