/* SPDX-License-Identifier: GPL-2.0-or-later */1/*2* Divide a 64-bit unsigned number by a 32-bit unsigned number.3* This routine assumes that the top 32 bits of the dividend are4* non-zero to start with.5* On entry, r3 points to the dividend, which get overwritten with6* the 64-bit quotient, and r4 contains the divisor.7* On exit, r3 contains the remainder.8*9* Copyright (C) 2002 Paul Mackerras, IBM Corp.10*/11#include "ppc_asm.h"1213.globl __div64_3214__div64_32:15lwz r5,0(r3) # get the dividend into r5/r616lwz r6,4(r3)17cmplw r5,r418li r7,019li r8,020blt 1f21divwu r7,r5,r4 # if dividend.hi >= divisor,22mullw r0,r7,r4 # quotient.hi = dividend.hi / divisor23subf. r5,r0,r5 # dividend.hi %= divisor24beq 3f251: mr r11,r5 # here dividend.hi != 026andis. r0,r5,0xc00027bne 2f28cntlzw r0,r5 # we are shifting the dividend right29li r10,-1 # to make it < 2^32, and shifting30srw r10,r10,r0 # the divisor right the same amount,31addc r9,r4,r10 # rounding up (so the estimate cannot32andc r11,r6,r10 # ever be too large, only too small)33andc r9,r9,r1034addze r9,r935or r11,r5,r1136rotlw r9,r9,r037rotlw r11,r11,r038divwu r11,r11,r9 # then we divide the shifted quantities392: mullw r10,r11,r4 # to get an estimate of the quotient,40mulhwu r9,r11,r4 # multiply the estimate by the divisor,41subfc r6,r10,r6 # take the product from the divisor,42add r8,r8,r11 # and add the estimate to the accumulated43subfe. r5,r9,r5 # quotient44bne 1b453: cmplw r6,r446blt 4f47divwu r0,r6,r4 # perform the remaining 32-bit division48mullw r10,r0,r4 # and get the remainder49add r8,r8,r050subf r6,r10,r6514: stw r7,0(r3) # return the quotient in *r352stw r8,4(r3)53mr r3,r6 # return the remainder in r354blr5556/*57* Extended precision shifts.58*59* Updated to be valid for shift counts from 0 to 63 inclusive.60* -- Gabriel61*62* R3/R4 has 64 bit value63* R5 has shift count64* result in R3/R465*66* ashrdi3: arithmetic right shift (sign propagation)67* lshrdi3: logical right shift68* ashldi3: left shift69*/70.globl __ashrdi371__ashrdi3:72subfic r6,r5,3273srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count74addi r7,r5,32 # could be xori, or addi with -3275slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)76rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 077sraw r7,r3,r7 # t2 = MSW >> (count-32)78or r4,r4,r6 # LSW |= t179slw r7,r7,r8 # t2 = (count < 32) ? 0 : t280sraw r3,r3,r5 # MSW = MSW >> count81or r4,r4,r7 # LSW |= t282blr8384.globl __ashldi385__ashldi3:86subfic r6,r5,3287slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count88addi r7,r5,32 # could be xori, or addi with -3289srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)90slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)91or r3,r3,r6 # MSW |= t192slw r4,r4,r5 # LSW = LSW << count93or r3,r3,r7 # MSW |= t294blr9596.globl __lshrdi397__lshrdi3:98subfic r6,r5,3299srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count100addi r7,r5,32 # could be xori, or addi with -32101slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)102srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)103or r4,r4,r6 # LSW |= t1104srw r3,r3,r5 # MSW = MSW >> count105or r4,r4,r7 # LSW |= t2106blr107108109