/*1* arch/xtensa/lib/memset.S2*3* ANSI C standard library function memset4* (Well, almost. .fixup code might return zero.)5*6* This file is subject to the terms and conditions of the GNU General7* Public License. See the file "COPYING" in the main directory of8* this archive for more details.9*10* Copyright (C) 2002 Tensilica Inc.11*/1213#include <variant/core.h>1415/*16* void *memset(void *dst, int c, size_t length)17*18* The algorithm is as follows:19* Create a word with c in all byte positions20* If the destination is aligned,21* do 16B chucks with a loop, and then finish up with22* 8B, 4B, 2B, and 1B stores conditional on the length.23* If destination is unaligned, align it by conditionally24* setting 1B and 2B and then go to aligned case.25* This code tries to use fall-through branches for the common26* case of an aligned destination (except for the branches to27* the alignment labels).28*/2930/* Load or store instructions that may cause exceptions use the EX macro. */3132#define EX(insn,reg1,reg2,offset,handler) \339: insn reg1, reg2, offset; \34.section __ex_table, "a"; \35.word 9b, handler; \36.previous373839.text40.align 441.global memset42.type memset,@function43memset:44entry sp, 16 # minimal stack frame45# a2/ dst, a3/ c, a4/ length46extui a3, a3, 0, 8 # mask to just 8 bits47slli a7, a3, 8 # duplicate character in all bytes of word48or a3, a3, a7 # ...49slli a7, a3, 16 # ...50or a3, a3, a7 # ...51mov a5, a2 # copy dst so that a2 is return value52movi a6, 3 # for alignment tests53bany a2, a6, .Ldstunaligned # if dst is unaligned54.L0: # return here from .Ldstunaligned when dst is aligned55srli a7, a4, 4 # number of loop iterations with 16B56# per iteration57bnez a4, .Laligned58retw5960/*61* Destination is word-aligned.62*/63# set 16 bytes per iteration for word-aligned dst64.align 4 # 1 mod 4 alignment for LOOPNEZ65.byte 0 # (0 mod 4 alignment for LBEG)66.Laligned:67#if XCHAL_HAVE_LOOPS68loopnez a7, .Loop1done69#else /* !XCHAL_HAVE_LOOPS */70beqz a7, .Loop1done71slli a6, a7, 472add a6, a6, a5 # a6 = end of last 16B chunk73#endif /* !XCHAL_HAVE_LOOPS */74.Loop1:75EX(s32i, a3, a5, 0, memset_fixup)76EX(s32i, a3, a5, 4, memset_fixup)77EX(s32i, a3, a5, 8, memset_fixup)78EX(s32i, a3, a5, 12, memset_fixup)79addi a5, a5, 1680#if !XCHAL_HAVE_LOOPS81blt a5, a6, .Loop182#endif /* !XCHAL_HAVE_LOOPS */83.Loop1done:84bbci.l a4, 3, .L285# set 8 bytes86EX(s32i, a3, a5, 0, memset_fixup)87EX(s32i, a3, a5, 4, memset_fixup)88addi a5, a5, 889.L2:90bbci.l a4, 2, .L391# set 4 bytes92EX(s32i, a3, a5, 0, memset_fixup)93addi a5, a5, 494.L3:95bbci.l a4, 1, .L496# set 2 bytes97EX(s16i, a3, a5, 0, memset_fixup)98addi a5, a5, 299.L4:100bbci.l a4, 0, .L5101# set 1 byte102EX(s8i, a3, a5, 0, memset_fixup)103.L5:104.Lret1:105retw106107/*108* Destination is unaligned109*/110111.Ldstunaligned:112bltui a4, 8, .Lbyteset # do short copies byte by byte113bbci.l a5, 0, .L20 # branch if dst alignment half-aligned114# dst is only byte aligned115# set 1 byte116EX(s8i, a3, a5, 0, memset_fixup)117addi a5, a5, 1118addi a4, a4, -1119# now retest if dst aligned120bbci.l a5, 1, .L0 # if now aligned, return to main algorithm121.L20:122# dst half-aligned123# set 2 bytes124EX(s16i, a3, a5, 0, memset_fixup)125addi a5, a5, 2126addi a4, a4, -2127j .L0 # dst is now aligned, return to main algorithm128129/*130* Byte by byte set131*/132.align 4133.byte 0 # 1 mod 4 alignment for LOOPNEZ134# (0 mod 4 alignment for LBEG)135.Lbyteset:136#if XCHAL_HAVE_LOOPS137loopnez a4, .Lbytesetdone138#else /* !XCHAL_HAVE_LOOPS */139beqz a4, .Lbytesetdone140add a6, a5, a4 # a6 = ending address141#endif /* !XCHAL_HAVE_LOOPS */142.Lbyteloop:143EX(s8i, a3, a5, 0, memset_fixup)144addi a5, a5, 1145#if !XCHAL_HAVE_LOOPS146blt a5, a6, .Lbyteloop147#endif /* !XCHAL_HAVE_LOOPS */148.Lbytesetdone:149retw150151152.section .fixup, "ax"153.align 4154155/* We return zero if a failure occurred. */156157memset_fixup:158movi a2, 0159retw160161162