.weak strncmp
.set strncmp, __strncmp
.text
ENTRY(__strncmp)
bic x8, x0,
and x9, x0,
bic x10, x1,
and x11, x1,
subs x2, x2,
b.lo .Lempty
mov x13,
mov x16,
add x3, x0,
add x4, x1,
eor x3, x3, x0
eor x4, x4, x1 // bits that changed
orr x3, x3, x4 // in either str1 or str2
cmp x2,
b.lo .Llt16
tbz w3,
ldr q0, [x8] // load aligned head
ldr q1, [x10]
lsl x14, x9,
lsl x15, x11,
lsl x3, x13, x14 // string head
lsl x4, x13, x15
cmeq v5.16b, v0.16b,
cmeq v6.16b, v1.16b,
shrn v5.8b, v5.8h,
shrn v6.8b, v6.8h,
fmov x5, d5
fmov x6, d6
adrp x14, shift_data
add x14, x14, :lo12:shift_data
tst x5, x3
b.eq 0f
ldr q4, [x14, x9] // load permutation table
tbl v0.16b, {v0.16b}, v4.16b
b 1f
.p2align 4
0:
ldr q0, [x0] // load true head
1:
tst x6, x4
b.eq 0f
ldr q4, [x14, x11]
tbl v4.16b, {v1.16b}, v4.16b
b 1f
.p2align 4
.Lbegin:
ldr q0, [x0] // load true heads
0:
ldr q4, [x1]
1:
cmeq v2.16b, v0.16b,
cmeq v4.16b, v0.16b, v4.16b // which bytes match?
orn v2.16b, v2.16b, v4.16b // mismatch or NUL byte?
shrn v2.8b, v2.8h,
fmov x5, d2
cbnz x5, .Lhead_mismatch
ldr q2, [x8,
ldr q3, [x10,
add x2, x2, x11
sub x2, x2,
subs x9, x9, x11 // is a&0xf >= b&0xf
b.lo .Lswapped // if not swap operands
b .Lnormal
.p2align 4
.Llt16:
tbz w3,
ldr q0, [x8] // load aligned head
ldr q1, [x10]
lsl x14, x9,
lsl x15, x11,
lsl x3, x13, x14 // string head
lsl x4, x13, x15
add x14, x2, x9
add x15, x2, x11
lsl x14, x14,
lsl x15, x15,
lsl x14, x16, x14
lsl x15, x16, x15
cmeq v5.16b, v0.16b,
cmeq v6.16b, v1.16b,
shrn v5.8b, v5.8h,
shrn v6.8b, v6.8h,
fmov x5, d5
fmov x6, d6
orr x5, x5, x14 // insert match at limit
orr x6, x6, x15
adrp x14, shift_data
add x14, x14, :lo12:shift_data
tst x5, x3
b.eq 0f
ldr q4, [x14, x9] // load permutation table
tbl v0.16b, {v0.16b}, v4.16b
b 1f
.p2align 4
0:
ldr q0, [x0] // load true head
1:
tst x6, x4
b.eq 0f
ldr q4, [x14, x11]
tbl v4.16b, {v1.16b}, v4.16b
b 1f
.p2align 4
2:
ldr q0, [x0] // load true heads
0:
ldr q4, [x1]
1:
cmeq v2.16b, v0.16b,
cmeq v4.16b, v0.16b, v4.16b // which bytes match?
bic v2.16b, v4.16b, v2.16b // match and not NUL byte
shrn v2.8b, v2.8h,
fmov x5, d2
lsl x4, x2,
lsl x4, x13, x4
orn x5, x4, x5 // mismatch or NUL byte?
.Lhead_mismatch:
rbit x3, x5
clz x3, x3 // index of mismatch
lsr x3, x3,
ldrb w4, [x0, x3]
ldrb w5, [x1, x3]
sub w0, w4, w5
ret
.p2align 4
.Lnormal:
sub x12, x10, x9
ldr q0, [x12,
sub x10, x10, x8
sub x11, x10, x9
cmeq v1.16b, v3.16b,
cmeq v0.16b, v0.16b, v2.16b // Mismatch between chunks?
shrn v1.8b, v1.8h,
shrn v0.8b, v0.8h,
fmov x6, d1
fmov x5, d0
add x8, x8,
lsl x4, x2,
lsl x4, x13, x4
orr x3, x6, x4 // introduce a null byte match
cmp x2,
csel x6, x3, x6, lo
cbnz x6, .Lnulfound2 // NUL or end of buffer found?
mvn x5, x5
cbnz x5, .Lmismatch2
sub x2, x2,
cmp x2,
b.lo .Ltail
.p2align 4
0:
ldr q0, [x8, x11]
ldr q1, [x8, x10]
ldr q2, [x8]
cmeq v1.16b, v1.16b,
cmeq v0.16b, v0.16b, v2.16b // do the chunks match?
shrn v1.8b, v1.8h,
shrn v0.8b, v0.8h,
fmov x6, d1
fmov x5, d0
cbnz x6, .Lnulfound
mvn x5, x5 // any mismatches?
cbnz x5, .Lmismatch
add x8, x8,
ldr q0, [x8, x11]
ldr q1, [x8, x10]
ldr q2, [x8]
add x8, x8,
cmeq v1.16b, v1.16b,
cmeq v0.16b, v0.16b, v2.16b
shrn v1.8b, v1.8h,
shrn v0.8b, v0.8h,
fmov x6, d1
fmov x5, d0
cbnz x6, .Lnulfound2
mvn x5, x5
cbnz x5, .Lmismatch2
sub x2, x2,
cmp x2,
b.hs 0b // if yes, process tail
.Ltail:
ldr q0, [x8, x11]
ldr q1, [x8, x10]
ldr q2, [x8]
cmeq v1.16b, v1.16b,
cmeq v0.16b, v0.16b, v2.16b // do the chunks match?
shrn v1.8b, v1.8h,
shrn v0.8b, v0.8h,
fmov x6, d1
fmov x5, d0
lsl x4, x2,
lsl x4, x13, x4
orr x3, x6, x4 // introduce a null byte match
cmp x2,
csel x6, x3, x6, lo
cbnz x6, .Lnulfound // NUL or end of string found
mvn x5, x5
cbnz x5, .Lmismatch
add x8, x8,
ldr q0, [x8, x11]
ldr q1, [x8, x10]
ldr q2, [x8]
add x8, x8,
cmeq v1.16b, v1.16b,
cmeq v0.16b, v0.16b, v2.16b
shrn v1.8b, v1.8h,
shrn v0.8b, v0.8h,
fmov x6, d1
fmov x5, d0
ubfiz x4, x2,
lsl x4, x13, x4 // take first half into account
orr x6, x6, x4 // introduce a null byte match
.Lnulfound2:
sub x8, x8,
.Lnulfound:
mov x4, x6
ubfiz x7, x9,
lsl x6, x6, x7 // adjust NUL mask to indices
orn x5, x6, x5
cbnz x5, .Lmismatch
ldr q0, [x8, x9]
ldr q1, [x8, x10]
cmeq v1.16b, v0.16b, v1.16b
shrn v1.8b, v1.8h,
fmov x5, d1
orn x5, x4, x5
rbit x3, x5
clz x3, x3
lsr x5, x3,
add x10, x10, x8 // restore x10 pointer
add x8, x8, x9 // point to corresponding chunk
ldrb w4, [x8, x5]
ldrb w5, [x10, x5]
sub w0, w4, w5
ret
.p2align 4
.Lmismatch2:
sub x8, x8,
.Lmismatch:
rbit x3, x5
clz x3, x3 // index of mismatch
lsr x3, x3,
add x11, x8, x11
ldrb w4, [x8, x3]
ldrb w5, [x11, x3]
sub w0, w4, w5 // byte difference
ret
.p2align 4
.Lswapped:
add x12, x8, x9
ldr q0, [x12,
sub x8, x8, x10
add x11, x8, x9
add x2,x2,x9
neg x9, x9
cmeq v1.16b, v2.16b,
cmeq v0.16b, v0.16b, v3.16b
shrn v1.8b, v1.8h,
shrn v0.8b, v0.8h,
fmov x6, d1
fmov x5, d0
add x10, x10,
lsl x4, x2,
lsl x4, x13, x4
orr x3,x6,x4 // introduce a null byte match
cmp x2,
csel x6, x3, x6, lo
cbnz x6, .Lnulfound2s
mvn x5, x5
cbnz x5, .Lmismatch2s
sub x2, x2,
cmp x2,
b.lo .Ltails
.p2align 4
0:
ldr q0, [x10, x11]
ldr q1, [x10, x8]
ldr q2, [x10]
cmeq v1.16b, v1.16b,
cmeq v0.16b, v0.16b, v2.16b
shrn v1.8b, v1.8h,
shrn v0.8b, v0.8h,
fmov x6, d1
fmov x5, d0
cbnz x6, .Lnulfounds
mvn x5, x5
cbnz x5, .Lmismatchs
add x10, x10,
ldr q0, [x10, x11]
ldr q1, [x10, x8]
ldr q2, [x10]
add x10, x10,
cmeq v1.16b, v1.16b,
cmeq v0.16b, v0.16b, v2.16b
shrn v1.8b, v1.8h,
shrn v0.8b, v0.8h,
fmov x6, d1
fmov x5, d0
cbnz x6, .Lnulfound2s
mvn x5, x5
cbnz x5, .Lmismatch2s
sub x2, x2,
cmp x2,
b.hs 0b
.Ltails:
ldr q0, [x10, x11]
ldr q1, [x10, x8]
ldr q2, [x10]
cmeq v1.16b, v1.16b,
cmeq v0.16b, v0.16b, v2.16b
shrn v1.8b, v1.8h,
shrn v0.8b, v0.8h,
fmov x6, d1
fmov x5, d0
lsl x4, x2,
lsl x4, x13, x4
orr x3, x6, x4 // introduce a null byte match
cmp x2,
csel x6, x3, x6, lo
cbnz x6, .Lnulfounds
mvn x5, x5
cbnz x5, .Lmismatchs
add x10, x10,
ldr q0, [x10, x11]
ldr q1, [x10, x8]
ldr q2, [x10]
add x10, x10,
cmeq v1.16b, v1.16b,
cmeq v0.16b, v0.16b, v2.16b
shrn v1.8b, v1.8h,
shrn v0.8b, v0.8h,
fmov x6, d1
fmov x5, d0
ubfiz x4, x2,
lsl x4, x13, x4
orr x6, x6, x4 // introduce a null byte match
.Lnulfound2s:
sub x10, x10,
.Lnulfounds:
mov x4, x6
ubfiz x7, x9,
lsl x6, x6, x7
orn x5, x6, x5
cbnz x5, .Lmismatchs
ldr q0, [x10, x9]
ldr q1, [x10, x8]
cmeq v1.16b, v0.16b, v1.16b
shrn v1.8b, v1.8h,
fmov x5, d1
orn x5, x4, x5
rbit x3, x5
clz x3, x3
lsr x5, x3,
add x11, x10, x8
add x10, x10, x9
ldrb w4, [x10, x5]
ldrb w5, [x11, x5]
sub w0, w5, w4
ret
.p2align 4
.Lmismatch2s:
sub x10, x10,
.Lmismatchs:
rbit x3, x5
clz x3, x3
lsr x3, x3,
add x11, x10, x11
ldrb w4, [x10, x3]
ldrb w5, [x11, x3]
sub w0, w5, w4
ret
.p2align 4
.Lempty:
eor x0, x0, x0
ret
END(__strncmp)
.section .rodata
.p2align 4
shift_data:
.byte 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
.fill 16, 1, -1
.size shift_data, .-shift_data