Path: blob/master/arch/x86/crypto/salsa20-x86_64-asm_64.S
10817 views
# enter ECRYPT_encrypt_bytes1.text2.p2align 53.globl ECRYPT_encrypt_bytes4ECRYPT_encrypt_bytes:5mov %rsp,%r116and $31,%r117add $256,%r118sub %r11,%rsp9# x = arg110mov %rdi,%r811# m = arg212mov %rsi,%rsi13# out = arg314mov %rdx,%rdi15# bytes = arg416mov %rcx,%rdx17# unsigned>? bytes - 018cmp $0,%rdx19# comment:fp stack unchanged by jump20# goto done if !unsigned>21jbe ._done22# comment:fp stack unchanged by fallthrough23# start:24._start:25# r11_stack = r1126movq %r11,0(%rsp)27# r12_stack = r1228movq %r12,8(%rsp)29# r13_stack = r1330movq %r13,16(%rsp)31# r14_stack = r1432movq %r14,24(%rsp)33# r15_stack = r1534movq %r15,32(%rsp)35# rbx_stack = rbx36movq %rbx,40(%rsp)37# rbp_stack = rbp38movq %rbp,48(%rsp)39# in0 = *(uint64 *) (x + 0)40movq 0(%r8),%rcx41# in2 = *(uint64 *) (x + 8)42movq 8(%r8),%r943# in4 = *(uint64 *) (x + 16)44movq 16(%r8),%rax45# in6 = *(uint64 *) (x + 24)46movq 24(%r8),%r1047# in8 = *(uint64 *) (x + 32)48movq 32(%r8),%r1149# in10 = *(uint64 *) (x + 40)50movq 40(%r8),%r1251# in12 = *(uint64 *) (x + 48)52movq 48(%r8),%r1353# in14 = *(uint64 *) (x + 56)54movq 56(%r8),%r1455# j0 = in056movq %rcx,56(%rsp)57# j2 = in258movq %r9,64(%rsp)59# j4 = in460movq %rax,72(%rsp)61# j6 = in662movq %r10,80(%rsp)63# j8 = in864movq %r11,88(%rsp)65# j10 = in1066movq %r12,96(%rsp)67# j12 = in1268movq %r13,104(%rsp)69# j14 = in1470movq %r14,112(%rsp)71# x_backup = x72movq %r8,120(%rsp)73# bytesatleast1:74._bytesatleast1:75# unsigned<? bytes - 6476cmp $64,%rdx77# comment:fp stack unchanged by jump78# goto nocopy if !unsigned<79jae ._nocopy80# ctarget = out81movq %rdi,128(%rsp)82# out = &tmp83leaq 192(%rsp),%rdi84# i = bytes85mov %rdx,%rcx86# while (i) { *out++ = *m++; --i }87rep movsb88# out = &tmp89leaq 192(%rsp),%rdi90# m = &tmp91leaq 192(%rsp),%rsi92# comment:fp stack unchanged by fallthrough93# nocopy:94._nocopy:95# out_backup = out96movq %rdi,136(%rsp)97# m_backup = m98movq %rsi,144(%rsp)99# bytes_backup = bytes100movq %rdx,152(%rsp)101# x1 = j0102movq 56(%rsp),%rdi103# x0 = x1104mov %rdi,%rdx105# (uint64) x1 >>= 32106shr $32,%rdi107# x3 = j2108movq 64(%rsp),%rsi109# x2 = x3110mov %rsi,%rcx111# (uint64) x3 >>= 32112shr $32,%rsi113# x5 = j4114movq 72(%rsp),%r8115# x4 = x5116mov %r8,%r9117# (uint64) x5 >>= 32118shr $32,%r8119# x5_stack = x5120movq %r8,160(%rsp)121# x7 = j6122movq 80(%rsp),%r8123# x6 = x7124mov %r8,%rax125# (uint64) x7 >>= 32126shr $32,%r8127# x9 = j8128movq 88(%rsp),%r10129# x8 = x9130mov %r10,%r11131# (uint64) x9 >>= 32132shr $32,%r10133# x11 = j10134movq 96(%rsp),%r12135# x10 = x11136mov %r12,%r13137# x10_stack = x10138movq %r13,168(%rsp)139# (uint64) x11 >>= 32140shr $32,%r12141# x13 = j12142movq 104(%rsp),%r13143# x12 = x13144mov %r13,%r14145# (uint64) x13 >>= 32146shr $32,%r13147# x15 = j14148movq 112(%rsp),%r15149# x14 = x15150mov %r15,%rbx151# (uint64) x15 >>= 32152shr $32,%r15153# x15_stack = x15154movq %r15,176(%rsp)155# i = 20156mov $20,%r15157# mainloop:158._mainloop:159# i_backup = i160movq %r15,184(%rsp)161# x5 = x5_stack162movq 160(%rsp),%r15163# a = x12 + x0164lea (%r14,%rdx),%rbp165# (uint32) a <<<= 7166rol $7,%ebp167# x4 ^= a168xor %rbp,%r9169# b = x1 + x5170lea (%rdi,%r15),%rbp171# (uint32) b <<<= 7172rol $7,%ebp173# x9 ^= b174xor %rbp,%r10175# a = x0 + x4176lea (%rdx,%r9),%rbp177# (uint32) a <<<= 9178rol $9,%ebp179# x8 ^= a180xor %rbp,%r11181# b = x5 + x9182lea (%r15,%r10),%rbp183# (uint32) b <<<= 9184rol $9,%ebp185# x13 ^= b186xor %rbp,%r13187# a = x4 + x8188lea (%r9,%r11),%rbp189# (uint32) a <<<= 13190rol $13,%ebp191# x12 ^= a192xor %rbp,%r14193# b = x9 + x13194lea (%r10,%r13),%rbp195# (uint32) b <<<= 13196rol $13,%ebp197# x1 ^= b198xor %rbp,%rdi199# a = x8 + x12200lea (%r11,%r14),%rbp201# (uint32) a <<<= 18202rol $18,%ebp203# x0 ^= a204xor %rbp,%rdx205# b = x13 + x1206lea (%r13,%rdi),%rbp207# (uint32) b <<<= 18208rol $18,%ebp209# x5 ^= b210xor %rbp,%r15211# x10 = x10_stack212movq 168(%rsp),%rbp213# x5_stack = x5214movq %r15,160(%rsp)215# c = x6 + x10216lea (%rax,%rbp),%r15217# (uint32) c <<<= 7218rol $7,%r15d219# x14 ^= c220xor %r15,%rbx221# c = x10 + x14222lea (%rbp,%rbx),%r15223# (uint32) c <<<= 9224rol $9,%r15d225# x2 ^= c226xor %r15,%rcx227# c = x14 + x2228lea (%rbx,%rcx),%r15229# (uint32) c <<<= 13230rol $13,%r15d231# x6 ^= c232xor %r15,%rax233# c = x2 + x6234lea (%rcx,%rax),%r15235# (uint32) c <<<= 18236rol $18,%r15d237# x10 ^= c238xor %r15,%rbp239# x15 = x15_stack240movq 176(%rsp),%r15241# x10_stack = x10242movq %rbp,168(%rsp)243# d = x11 + x15244lea (%r12,%r15),%rbp245# (uint32) d <<<= 7246rol $7,%ebp247# x3 ^= d248xor %rbp,%rsi249# d = x15 + x3250lea (%r15,%rsi),%rbp251# (uint32) d <<<= 9252rol $9,%ebp253# x7 ^= d254xor %rbp,%r8255# d = x3 + x7256lea (%rsi,%r8),%rbp257# (uint32) d <<<= 13258rol $13,%ebp259# x11 ^= d260xor %rbp,%r12261# d = x7 + x11262lea (%r8,%r12),%rbp263# (uint32) d <<<= 18264rol $18,%ebp265# x15 ^= d266xor %rbp,%r15267# x15_stack = x15268movq %r15,176(%rsp)269# x5 = x5_stack270movq 160(%rsp),%r15271# a = x3 + x0272lea (%rsi,%rdx),%rbp273# (uint32) a <<<= 7274rol $7,%ebp275# x1 ^= a276xor %rbp,%rdi277# b = x4 + x5278lea (%r9,%r15),%rbp279# (uint32) b <<<= 7280rol $7,%ebp281# x6 ^= b282xor %rbp,%rax283# a = x0 + x1284lea (%rdx,%rdi),%rbp285# (uint32) a <<<= 9286rol $9,%ebp287# x2 ^= a288xor %rbp,%rcx289# b = x5 + x6290lea (%r15,%rax),%rbp291# (uint32) b <<<= 9292rol $9,%ebp293# x7 ^= b294xor %rbp,%r8295# a = x1 + x2296lea (%rdi,%rcx),%rbp297# (uint32) a <<<= 13298rol $13,%ebp299# x3 ^= a300xor %rbp,%rsi301# b = x6 + x7302lea (%rax,%r8),%rbp303# (uint32) b <<<= 13304rol $13,%ebp305# x4 ^= b306xor %rbp,%r9307# a = x2 + x3308lea (%rcx,%rsi),%rbp309# (uint32) a <<<= 18310rol $18,%ebp311# x0 ^= a312xor %rbp,%rdx313# b = x7 + x4314lea (%r8,%r9),%rbp315# (uint32) b <<<= 18316rol $18,%ebp317# x5 ^= b318xor %rbp,%r15319# x10 = x10_stack320movq 168(%rsp),%rbp321# x5_stack = x5322movq %r15,160(%rsp)323# c = x9 + x10324lea (%r10,%rbp),%r15325# (uint32) c <<<= 7326rol $7,%r15d327# x11 ^= c328xor %r15,%r12329# c = x10 + x11330lea (%rbp,%r12),%r15331# (uint32) c <<<= 9332rol $9,%r15d333# x8 ^= c334xor %r15,%r11335# c = x11 + x8336lea (%r12,%r11),%r15337# (uint32) c <<<= 13338rol $13,%r15d339# x9 ^= c340xor %r15,%r10341# c = x8 + x9342lea (%r11,%r10),%r15343# (uint32) c <<<= 18344rol $18,%r15d345# x10 ^= c346xor %r15,%rbp347# x15 = x15_stack348movq 176(%rsp),%r15349# x10_stack = x10350movq %rbp,168(%rsp)351# d = x14 + x15352lea (%rbx,%r15),%rbp353# (uint32) d <<<= 7354rol $7,%ebp355# x12 ^= d356xor %rbp,%r14357# d = x15 + x12358lea (%r15,%r14),%rbp359# (uint32) d <<<= 9360rol $9,%ebp361# x13 ^= d362xor %rbp,%r13363# d = x12 + x13364lea (%r14,%r13),%rbp365# (uint32) d <<<= 13366rol $13,%ebp367# x14 ^= d368xor %rbp,%rbx369# d = x13 + x14370lea (%r13,%rbx),%rbp371# (uint32) d <<<= 18372rol $18,%ebp373# x15 ^= d374xor %rbp,%r15375# x15_stack = x15376movq %r15,176(%rsp)377# x5 = x5_stack378movq 160(%rsp),%r15379# a = x12 + x0380lea (%r14,%rdx),%rbp381# (uint32) a <<<= 7382rol $7,%ebp383# x4 ^= a384xor %rbp,%r9385# b = x1 + x5386lea (%rdi,%r15),%rbp387# (uint32) b <<<= 7388rol $7,%ebp389# x9 ^= b390xor %rbp,%r10391# a = x0 + x4392lea (%rdx,%r9),%rbp393# (uint32) a <<<= 9394rol $9,%ebp395# x8 ^= a396xor %rbp,%r11397# b = x5 + x9398lea (%r15,%r10),%rbp399# (uint32) b <<<= 9400rol $9,%ebp401# x13 ^= b402xor %rbp,%r13403# a = x4 + x8404lea (%r9,%r11),%rbp405# (uint32) a <<<= 13406rol $13,%ebp407# x12 ^= a408xor %rbp,%r14409# b = x9 + x13410lea (%r10,%r13),%rbp411# (uint32) b <<<= 13412rol $13,%ebp413# x1 ^= b414xor %rbp,%rdi415# a = x8 + x12416lea (%r11,%r14),%rbp417# (uint32) a <<<= 18418rol $18,%ebp419# x0 ^= a420xor %rbp,%rdx421# b = x13 + x1422lea (%r13,%rdi),%rbp423# (uint32) b <<<= 18424rol $18,%ebp425# x5 ^= b426xor %rbp,%r15427# x10 = x10_stack428movq 168(%rsp),%rbp429# x5_stack = x5430movq %r15,160(%rsp)431# c = x6 + x10432lea (%rax,%rbp),%r15433# (uint32) c <<<= 7434rol $7,%r15d435# x14 ^= c436xor %r15,%rbx437# c = x10 + x14438lea (%rbp,%rbx),%r15439# (uint32) c <<<= 9440rol $9,%r15d441# x2 ^= c442xor %r15,%rcx443# c = x14 + x2444lea (%rbx,%rcx),%r15445# (uint32) c <<<= 13446rol $13,%r15d447# x6 ^= c448xor %r15,%rax449# c = x2 + x6450lea (%rcx,%rax),%r15451# (uint32) c <<<= 18452rol $18,%r15d453# x10 ^= c454xor %r15,%rbp455# x15 = x15_stack456movq 176(%rsp),%r15457# x10_stack = x10458movq %rbp,168(%rsp)459# d = x11 + x15460lea (%r12,%r15),%rbp461# (uint32) d <<<= 7462rol $7,%ebp463# x3 ^= d464xor %rbp,%rsi465# d = x15 + x3466lea (%r15,%rsi),%rbp467# (uint32) d <<<= 9468rol $9,%ebp469# x7 ^= d470xor %rbp,%r8471# d = x3 + x7472lea (%rsi,%r8),%rbp473# (uint32) d <<<= 13474rol $13,%ebp475# x11 ^= d476xor %rbp,%r12477# d = x7 + x11478lea (%r8,%r12),%rbp479# (uint32) d <<<= 18480rol $18,%ebp481# x15 ^= d482xor %rbp,%r15483# x15_stack = x15484movq %r15,176(%rsp)485# x5 = x5_stack486movq 160(%rsp),%r15487# a = x3 + x0488lea (%rsi,%rdx),%rbp489# (uint32) a <<<= 7490rol $7,%ebp491# x1 ^= a492xor %rbp,%rdi493# b = x4 + x5494lea (%r9,%r15),%rbp495# (uint32) b <<<= 7496rol $7,%ebp497# x6 ^= b498xor %rbp,%rax499# a = x0 + x1500lea (%rdx,%rdi),%rbp501# (uint32) a <<<= 9502rol $9,%ebp503# x2 ^= a504xor %rbp,%rcx505# b = x5 + x6506lea (%r15,%rax),%rbp507# (uint32) b <<<= 9508rol $9,%ebp509# x7 ^= b510xor %rbp,%r8511# a = x1 + x2512lea (%rdi,%rcx),%rbp513# (uint32) a <<<= 13514rol $13,%ebp515# x3 ^= a516xor %rbp,%rsi517# b = x6 + x7518lea (%rax,%r8),%rbp519# (uint32) b <<<= 13520rol $13,%ebp521# x4 ^= b522xor %rbp,%r9523# a = x2 + x3524lea (%rcx,%rsi),%rbp525# (uint32) a <<<= 18526rol $18,%ebp527# x0 ^= a528xor %rbp,%rdx529# b = x7 + x4530lea (%r8,%r9),%rbp531# (uint32) b <<<= 18532rol $18,%ebp533# x5 ^= b534xor %rbp,%r15535# x10 = x10_stack536movq 168(%rsp),%rbp537# x5_stack = x5538movq %r15,160(%rsp)539# c = x9 + x10540lea (%r10,%rbp),%r15541# (uint32) c <<<= 7542rol $7,%r15d543# x11 ^= c544xor %r15,%r12545# c = x10 + x11546lea (%rbp,%r12),%r15547# (uint32) c <<<= 9548rol $9,%r15d549# x8 ^= c550xor %r15,%r11551# c = x11 + x8552lea (%r12,%r11),%r15553# (uint32) c <<<= 13554rol $13,%r15d555# x9 ^= c556xor %r15,%r10557# c = x8 + x9558lea (%r11,%r10),%r15559# (uint32) c <<<= 18560rol $18,%r15d561# x10 ^= c562xor %r15,%rbp563# x15 = x15_stack564movq 176(%rsp),%r15565# x10_stack = x10566movq %rbp,168(%rsp)567# d = x14 + x15568lea (%rbx,%r15),%rbp569# (uint32) d <<<= 7570rol $7,%ebp571# x12 ^= d572xor %rbp,%r14573# d = x15 + x12574lea (%r15,%r14),%rbp575# (uint32) d <<<= 9576rol $9,%ebp577# x13 ^= d578xor %rbp,%r13579# d = x12 + x13580lea (%r14,%r13),%rbp581# (uint32) d <<<= 13582rol $13,%ebp583# x14 ^= d584xor %rbp,%rbx585# d = x13 + x14586lea (%r13,%rbx),%rbp587# (uint32) d <<<= 18588rol $18,%ebp589# x15 ^= d590xor %rbp,%r15591# x15_stack = x15592movq %r15,176(%rsp)593# i = i_backup594movq 184(%rsp),%r15595# unsigned>? i -= 4596sub $4,%r15597# comment:fp stack unchanged by jump598# goto mainloop if unsigned>599ja ._mainloop600# (uint32) x2 += j2601addl 64(%rsp),%ecx602# x3 <<= 32603shl $32,%rsi604# x3 += j2605addq 64(%rsp),%rsi606# (uint64) x3 >>= 32607shr $32,%rsi608# x3 <<= 32609shl $32,%rsi610# x2 += x3611add %rsi,%rcx612# (uint32) x6 += j6613addl 80(%rsp),%eax614# x7 <<= 32615shl $32,%r8616# x7 += j6617addq 80(%rsp),%r8618# (uint64) x7 >>= 32619shr $32,%r8620# x7 <<= 32621shl $32,%r8622# x6 += x7623add %r8,%rax624# (uint32) x8 += j8625addl 88(%rsp),%r11d626# x9 <<= 32627shl $32,%r10628# x9 += j8629addq 88(%rsp),%r10630# (uint64) x9 >>= 32631shr $32,%r10632# x9 <<= 32633shl $32,%r10634# x8 += x9635add %r10,%r11636# (uint32) x12 += j12637addl 104(%rsp),%r14d638# x13 <<= 32639shl $32,%r13640# x13 += j12641addq 104(%rsp),%r13642# (uint64) x13 >>= 32643shr $32,%r13644# x13 <<= 32645shl $32,%r13646# x12 += x13647add %r13,%r14648# (uint32) x0 += j0649addl 56(%rsp),%edx650# x1 <<= 32651shl $32,%rdi652# x1 += j0653addq 56(%rsp),%rdi654# (uint64) x1 >>= 32655shr $32,%rdi656# x1 <<= 32657shl $32,%rdi658# x0 += x1659add %rdi,%rdx660# x5 = x5_stack661movq 160(%rsp),%rdi662# (uint32) x4 += j4663addl 72(%rsp),%r9d664# x5 <<= 32665shl $32,%rdi666# x5 += j4667addq 72(%rsp),%rdi668# (uint64) x5 >>= 32669shr $32,%rdi670# x5 <<= 32671shl $32,%rdi672# x4 += x5673add %rdi,%r9674# x10 = x10_stack675movq 168(%rsp),%r8676# (uint32) x10 += j10677addl 96(%rsp),%r8d678# x11 <<= 32679shl $32,%r12680# x11 += j10681addq 96(%rsp),%r12682# (uint64) x11 >>= 32683shr $32,%r12684# x11 <<= 32685shl $32,%r12686# x10 += x11687add %r12,%r8688# x15 = x15_stack689movq 176(%rsp),%rdi690# (uint32) x14 += j14691addl 112(%rsp),%ebx692# x15 <<= 32693shl $32,%rdi694# x15 += j14695addq 112(%rsp),%rdi696# (uint64) x15 >>= 32697shr $32,%rdi698# x15 <<= 32699shl $32,%rdi700# x14 += x15701add %rdi,%rbx702# out = out_backup703movq 136(%rsp),%rdi704# m = m_backup705movq 144(%rsp),%rsi706# x0 ^= *(uint64 *) (m + 0)707xorq 0(%rsi),%rdx708# *(uint64 *) (out + 0) = x0709movq %rdx,0(%rdi)710# x2 ^= *(uint64 *) (m + 8)711xorq 8(%rsi),%rcx712# *(uint64 *) (out + 8) = x2713movq %rcx,8(%rdi)714# x4 ^= *(uint64 *) (m + 16)715xorq 16(%rsi),%r9716# *(uint64 *) (out + 16) = x4717movq %r9,16(%rdi)718# x6 ^= *(uint64 *) (m + 24)719xorq 24(%rsi),%rax720# *(uint64 *) (out + 24) = x6721movq %rax,24(%rdi)722# x8 ^= *(uint64 *) (m + 32)723xorq 32(%rsi),%r11724# *(uint64 *) (out + 32) = x8725movq %r11,32(%rdi)726# x10 ^= *(uint64 *) (m + 40)727xorq 40(%rsi),%r8728# *(uint64 *) (out + 40) = x10729movq %r8,40(%rdi)730# x12 ^= *(uint64 *) (m + 48)731xorq 48(%rsi),%r14732# *(uint64 *) (out + 48) = x12733movq %r14,48(%rdi)734# x14 ^= *(uint64 *) (m + 56)735xorq 56(%rsi),%rbx736# *(uint64 *) (out + 56) = x14737movq %rbx,56(%rdi)738# bytes = bytes_backup739movq 152(%rsp),%rdx740# in8 = j8741movq 88(%rsp),%rcx742# in8 += 1743add $1,%rcx744# j8 = in8745movq %rcx,88(%rsp)746# unsigned>? unsigned<? bytes - 64747cmp $64,%rdx748# comment:fp stack unchanged by jump749# goto bytesatleast65 if unsigned>750ja ._bytesatleast65751# comment:fp stack unchanged by jump752# goto bytesatleast64 if !unsigned<753jae ._bytesatleast64754# m = out755mov %rdi,%rsi756# out = ctarget757movq 128(%rsp),%rdi758# i = bytes759mov %rdx,%rcx760# while (i) { *out++ = *m++; --i }761rep movsb762# comment:fp stack unchanged by fallthrough763# bytesatleast64:764._bytesatleast64:765# x = x_backup766movq 120(%rsp),%rdi767# in8 = j8768movq 88(%rsp),%rsi769# *(uint64 *) (x + 32) = in8770movq %rsi,32(%rdi)771# r11 = r11_stack772movq 0(%rsp),%r11773# r12 = r12_stack774movq 8(%rsp),%r12775# r13 = r13_stack776movq 16(%rsp),%r13777# r14 = r14_stack778movq 24(%rsp),%r14779# r15 = r15_stack780movq 32(%rsp),%r15781# rbx = rbx_stack782movq 40(%rsp),%rbx783# rbp = rbp_stack784movq 48(%rsp),%rbp785# comment:fp stack unchanged by fallthrough786# done:787._done:788# leave789add %r11,%rsp790mov %rdi,%rax791mov %rsi,%rdx792ret793# bytesatleast65:794._bytesatleast65:795# bytes -= 64796sub $64,%rdx797# out += 64798add $64,%rdi799# m += 64800add $64,%rsi801# comment:fp stack unchanged by jump802# goto bytesatleast1803jmp ._bytesatleast1804# enter ECRYPT_keysetup805.text806.p2align 5807.globl ECRYPT_keysetup808ECRYPT_keysetup:809mov %rsp,%r11810and $31,%r11811add $256,%r11812sub %r11,%rsp813# k = arg2814mov %rsi,%rsi815# kbits = arg3816mov %rdx,%rdx817# x = arg1818mov %rdi,%rdi819# in0 = *(uint64 *) (k + 0)820movq 0(%rsi),%r8821# in2 = *(uint64 *) (k + 8)822movq 8(%rsi),%r9823# *(uint64 *) (x + 4) = in0824movq %r8,4(%rdi)825# *(uint64 *) (x + 12) = in2826movq %r9,12(%rdi)827# unsigned<? kbits - 256828cmp $256,%rdx829# comment:fp stack unchanged by jump830# goto kbits128 if unsigned<831jb ._kbits128832# kbits256:833._kbits256:834# in10 = *(uint64 *) (k + 16)835movq 16(%rsi),%rdx836# in12 = *(uint64 *) (k + 24)837movq 24(%rsi),%rsi838# *(uint64 *) (x + 44) = in10839movq %rdx,44(%rdi)840# *(uint64 *) (x + 52) = in12841movq %rsi,52(%rdi)842# in0 = 1634760805843mov $1634760805,%rsi844# in4 = 857760878845mov $857760878,%rdx846# in10 = 2036477234847mov $2036477234,%rcx848# in14 = 1797285236849mov $1797285236,%r8850# *(uint32 *) (x + 0) = in0851movl %esi,0(%rdi)852# *(uint32 *) (x + 20) = in4853movl %edx,20(%rdi)854# *(uint32 *) (x + 40) = in10855movl %ecx,40(%rdi)856# *(uint32 *) (x + 60) = in14857movl %r8d,60(%rdi)858# comment:fp stack unchanged by jump859# goto keysetupdone860jmp ._keysetupdone861# kbits128:862._kbits128:863# in10 = *(uint64 *) (k + 0)864movq 0(%rsi),%rdx865# in12 = *(uint64 *) (k + 8)866movq 8(%rsi),%rsi867# *(uint64 *) (x + 44) = in10868movq %rdx,44(%rdi)869# *(uint64 *) (x + 52) = in12870movq %rsi,52(%rdi)871# in0 = 1634760805872mov $1634760805,%rsi873# in4 = 824206446874mov $824206446,%rdx875# in10 = 2036477238876mov $2036477238,%rcx877# in14 = 1797285236878mov $1797285236,%r8879# *(uint32 *) (x + 0) = in0880movl %esi,0(%rdi)881# *(uint32 *) (x + 20) = in4882movl %edx,20(%rdi)883# *(uint32 *) (x + 40) = in10884movl %ecx,40(%rdi)885# *(uint32 *) (x + 60) = in14886movl %r8d,60(%rdi)887# keysetupdone:888._keysetupdone:889# leave890add %r11,%rsp891mov %rdi,%rax892mov %rsi,%rdx893ret894# enter ECRYPT_ivsetup895.text896.p2align 5897.globl ECRYPT_ivsetup898ECRYPT_ivsetup:899mov %rsp,%r11900and $31,%r11901add $256,%r11902sub %r11,%rsp903# iv = arg2904mov %rsi,%rsi905# x = arg1906mov %rdi,%rdi907# in6 = *(uint64 *) (iv + 0)908movq 0(%rsi),%rsi909# in8 = 0910mov $0,%r8911# *(uint64 *) (x + 24) = in6912movq %rsi,24(%rdi)913# *(uint64 *) (x + 32) = in8914movq %r8,32(%rdi)915# leave916add %r11,%rsp917mov %rdi,%rax918mov %rsi,%rdx919ret920921922