/*1* arch/alpha/lib/ev6-memcpy.S2* 21264 version by Rick Gorton <[email protected]>3*4* Reasonably optimized memcpy() routine for the Alpha 212645*6* - memory accessed as aligned quadwords only7* - uses bcmpge to compare 8 bytes in parallel8*9* Much of the information about 21264 scheduling/coding comes from:10* Compiler Writer's Guide for the Alpha 2126411* abbreviated as 'CWG' in other comments here12* ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html13* Scheduling notation:14* E - either cluster15* U - upper subcluster; U0 - subcluster U0; U1 - subcluster U116* L - lower subcluster; L0 - subcluster L0; L1 - subcluster L117*18* Temp usage notes:19* $1,$2, - scratch20*/2122.set noreorder23.set noat2425.align 426.globl memcpy27.ent memcpy28memcpy:29.frame $30,0,$26,030.prologue 03132mov $16, $0 # E : copy dest to return33ble $18, $nomoredata # U : done with the copy?34xor $16, $17, $1 # E : are source and dest alignments the same?35and $1, 7, $1 # E : are they the same mod 8?3637bne $1, $misaligned # U : Nope - gotta do this the slow way38/* source and dest are same mod 8 address */39and $16, 7, $1 # E : Are both 0mod8?40beq $1, $both_0mod8 # U : Yes41nop # E :4243/*44* source and dest are same misalignment. move a byte at a time45* until a 0mod8 alignment for both is reached.46* At least one byte more to move47*/4849$head_align:50ldbu $1, 0($17) # L : grab a byte51subq $18, 1, $18 # E : count--52addq $17, 1, $17 # E : src++53stb $1, 0($16) # L :54addq $16, 1, $16 # E : dest++55and $16, 7, $1 # E : Are we at 0mod8 yet?56ble $18, $nomoredata # U : done with the copy?57bne $1, $head_align # U :5859$both_0mod8:60cmple $18, 127, $1 # E : Can we unroll the loop?61bne $1, $no_unroll # U :62and $16, 63, $1 # E : get mod64 alignment63beq $1, $do_unroll # U : no single quads to fiddle6465$single_head_quad:66ldq $1, 0($17) # L : get 8 bytes67subq $18, 8, $18 # E : count -= 868addq $17, 8, $17 # E : src += 869nop # E :7071stq $1, 0($16) # L : store72addq $16, 8, $16 # E : dest += 873and $16, 63, $1 # E : get mod64 alignment74bne $1, $single_head_quad # U : still not fully aligned7576$do_unroll:77addq $16, 64, $7 # E : Initial (+1 trip) wh64 address78cmple $18, 127, $1 # E : Can we go through the unrolled loop?79bne $1, $tail_quads # U : Nope80nop # E :8182$unroll_body:83wh64 ($7) # L1 : memory subsystem hint: 64 bytes at84# ($7) are about to be over-written85ldq $6, 0($17) # L0 : bytes 0..786nop # E :87nop # E :8889ldq $4, 8($17) # L : bytes 8..1590ldq $5, 16($17) # L : bytes 16..2391addq $7, 64, $7 # E : Update next wh64 address92nop # E :9394ldq $3, 24($17) # L : bytes 24..3195addq $16, 64, $1 # E : fallback value for wh6496nop # E :97nop # E :9899addq $17, 32, $17 # E : src += 32 bytes100stq $6, 0($16) # L : bytes 0..7101nop # E :102nop # E :103104stq $4, 8($16) # L : bytes 8..15105stq $5, 16($16) # L : bytes 16..23106subq $18, 192, $2 # E : At least two more trips to go?107nop # E :108109stq $3, 24($16) # L : bytes 24..31110addq $16, 32, $16 # E : dest += 32 bytes111nop # E :112nop # E :113114ldq $6, 0($17) # L : bytes 0..7115ldq $4, 8($17) # L : bytes 8..15116cmovlt $2, $1, $7 # E : Latency 2, extra map slot - Use117# fallback wh64 address if < 2 more trips118nop # E :119120ldq $5, 16($17) # L : bytes 16..23121ldq $3, 24($17) # L : bytes 24..31122addq $16, 32, $16 # E : dest += 32123subq $18, 64, $18 # E : count -= 64124125addq $17, 32, $17 # E : src += 32126stq $6, -32($16) # L : bytes 0..7127stq $4, -24($16) # L : bytes 8..15128cmple $18, 63, $1 # E : At least one more trip?129130stq $5, -16($16) # L : bytes 16..23131stq $3, -8($16) # L : bytes 24..31132nop # E :133beq $1, $unroll_body134135$tail_quads:136$no_unroll:137.align 4138subq $18, 8, $18 # E : At least a quad left?139blt $18, $less_than_8 # U : Nope140nop # E :141nop # E :142143$move_a_quad:144ldq $1, 0($17) # L : fetch 8145subq $18, 8, $18 # E : count -= 8146addq $17, 8, $17 # E : src += 8147nop # E :148149stq $1, 0($16) # L : store 8150addq $16, 8, $16 # E : dest += 8151bge $18, $move_a_quad # U :152nop # E :153154$less_than_8:155.align 4156addq $18, 8, $18 # E : add back for trailing bytes157ble $18, $nomoredata # U : All-done158nop # E :159nop # E :160161/* Trailing bytes */162$tail_bytes:163subq $18, 1, $18 # E : count--164ldbu $1, 0($17) # L : fetch a byte165addq $17, 1, $17 # E : src++166nop # E :167168stb $1, 0($16) # L : store a byte169addq $16, 1, $16 # E : dest++170bgt $18, $tail_bytes # U : more to be done?171nop # E :172173/* branching to exit takes 3 extra cycles, so replicate exit here */174ret $31, ($26), 1 # L0 :175nop # E :176nop # E :177nop # E :178179$misaligned:180mov $0, $4 # E : dest temp181and $0, 7, $1 # E : dest alignment mod8182beq $1, $dest_0mod8 # U : life doesnt totally suck183nop184185$aligndest:186ble $18, $nomoredata # U :187ldbu $1, 0($17) # L : fetch a byte188subq $18, 1, $18 # E : count--189addq $17, 1, $17 # E : src++190191stb $1, 0($4) # L : store it192addq $4, 1, $4 # E : dest++193and $4, 7, $1 # E : dest 0mod8 yet?194bne $1, $aligndest # U : go until we are aligned.195196/* Source has unknown alignment, but dest is known to be 0mod8 */197$dest_0mod8:198subq $18, 8, $18 # E : At least a quad left?199blt $18, $misalign_tail # U : Nope200ldq_u $3, 0($17) # L : seed (rotating load) of 8 bytes201nop # E :202203$mis_quad:204ldq_u $16, 8($17) # L : Fetch next 8205extql $3, $17, $3 # U : masking206extqh $16, $17, $1 # U : masking207bis $3, $1, $1 # E : merged bytes to store208209subq $18, 8, $18 # E : count -= 8210addq $17, 8, $17 # E : src += 8211stq $1, 0($4) # L : store 8 (aligned)212mov $16, $3 # E : "rotate" source data213214addq $4, 8, $4 # E : dest += 8215bge $18, $mis_quad # U : More quads to move216nop217nop218219$misalign_tail:220addq $18, 8, $18 # E : account for tail stuff221ble $18, $nomoredata # U :222nop223nop224225$misalign_byte:226ldbu $1, 0($17) # L : fetch 1227subq $18, 1, $18 # E : count--228addq $17, 1, $17 # E : src++229nop # E :230231stb $1, 0($4) # L : store232addq $4, 1, $4 # E : dest++233bgt $18, $misalign_byte # U : more to go?234nop235236237$nomoredata:238ret $31, ($26), 1 # L0 :239nop # E :240nop # E :241nop # E :242243.end memcpy244245/* For backwards module compatibility. */246__memcpy = memcpy247.globl __memcpy248249250