Path: blob/main/lib/libc/arm/aeabi/aeabi_vfp_double.S
39507 views
/*1* Copyright (C) 2013 Andrew Turner2* All rights reserved.3*4* Redistribution and use in source and binary forms, with or without5* modification, are permitted provided that the following conditions6* are met:7* 1. Redistributions of source code must retain the above copyright8* notice, this list of conditions and the following disclaimer.9* 2. Redistributions in binary form must reproduce the above copyright10* notice, this list of conditions and the following disclaimer in the11* documentation and/or other materials provided with the distribution.12*13* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND14* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE15* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE16* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE17* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL18* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS19* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)20* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT21* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY22* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF23* SUCH DAMAGE.24*25*/2627#include <machine/asm.h>28#include "aeabi_vfp.h"2930.fpu vfp31.syntax unified3233/* void __aeabi_cdcmpeq(double, double) */34AEABI_ENTRY(cdcmpeq)35LOAD_DREG(d0, r0, r1)36LOAD_DREG(d1, r2, r3)37vcmp.f64 d0, d138vmrs APSR_nzcv, fpscr39RET40AEABI_END(cdcmpeq)4142/* void __aeabi_cdcmple(double, double) */43AEABI_ENTRY(cdcmple)44LOAD_DREG(d0, r0, r1)45LOAD_DREG(d1, r2, r3)46vcmpe.f64 d0, d147vmrs APSR_nzcv, fpscr48RET49AEABI_END(cdcmple)5051/* void __aeabi_cdrcmple(double, double) */52AEABI_ENTRY(cdrcmple)53LOAD_DREG(d0, r0, r1)54LOAD_DREG(d1, r2, r3)55vcmpe.f64 d1, d056vmrs APSR_nzcv, fpscr57RET58AEABI_END(cdrcmple)5960/* int __aeabi_dcmpeq(double, double) */61AEABI_ENTRY(dcmpeq)62LOAD_DREG(d0, r0, r1)63LOAD_DREG(d1, r2, r3)64vcmp.f64 d0, d165vmrs APSR_nzcv, fpscr66ite ne67movne r0, #068moveq r0, #169RET70AEABI_END(dcmpeq)7172/* int __aeabi_dcmplt(double, double) */73AEABI_ENTRY(dcmplt)74LOAD_DREG(d0, r0, r1)75LOAD_DREG(d1, r2, r3)76vcmp.f64 d0, d177vmrs APSR_nzcv, fpscr78ite cs79movcs r0, #080movcc r0, #181RET82AEABI_END(dcmplt)8384/* int __aeabi_dcmple(double, double) */85AEABI_ENTRY(dcmple)86LOAD_DREG(d0, r0, r1)87LOAD_DREG(d1, r2, r3)88vcmp.f64 d0, d189vmrs APSR_nzcv, fpscr90ite hi91movhi r0, #092movls r0, #193RET94AEABI_END(dcmple)9596/* int __aeabi_dcmpge(double, double) */97AEABI_ENTRY(dcmpge)98LOAD_DREG(d0, r0, r1)99LOAD_DREG(d1, r2, r3)100vcmp.f64 d0, d1101vmrs APSR_nzcv, fpscr102ite lt103movlt r0, #0104movge r0, #1105RET106AEABI_END(dcmpge)107108/* int __aeabi_dcmpgt(double, double) */109AEABI_ENTRY(dcmpgt)110LOAD_DREG(d0, r0, r1)111LOAD_DREG(d1, r2, r3)112vcmp.f64 d0, d1113vmrs APSR_nzcv, fpscr114ite le115movle r0, #0116movgt r0, #1117RET118AEABI_END(dcmpgt)119120/* int __aeabi_dcmpun(double, double) */121AEABI_ENTRY(dcmpun)122LOAD_DREG(d0, r0, r1)123LOAD_DREG(d1, r2, r3)124vcmp.f64 d0, d1125vmrs APSR_nzcv, fpscr126ite vc127movvc r0, #0128movvs r0, #1129RET130AEABI_END(dcmpun)131132/* int __aeabi_d2iz(double) */133AEABI_ENTRY(d2iz)134LOAD_DREG(d0, r0, r1)135#if 0136/*137* This should be the correct instruction, but binutils incorrectly138* encodes it as the version that used FPSCR to determine the rounding.139* When binutils is fixed we can use this again.140*/141vcvt.s32.f64 s0, d0142#else143ftosizd s0, d0144#endif145vmov r0, s0146RET147AEABI_END(d2iz)148149/* float __aeabi_d2f(double) */150AEABI_ENTRY(d2f)151LOAD_DREG(d0, r0, r1)152vcvt.f32.f64 s0, d0153UNLOAD_SREG(r0, s0)154RET155AEABI_END(d2f)156157/* double __aeabi_i2d(int) */158AEABI_ENTRY(i2d)159vmov s0, r0160vcvt.f64.s32 d0, s0161UNLOAD_DREG(r0, r1, d0)162RET163AEABI_END(i2d)164165/* double __aeabi_dadd(double, double) */166AEABI_ENTRY(dadd)167LOAD_DREG(d0, r0, r1)168LOAD_DREG(d1, r2, r3)169vadd.f64 d0, d0, d1170UNLOAD_DREG(r0, r1, d0)171RET172AEABI_END(dadd)173174/* double __aeabi_ddiv(double, double) */175AEABI_ENTRY(ddiv)176LOAD_DREG(d0, r0, r1)177LOAD_DREG(d1, r2, r3)178vdiv.f64 d0, d0, d1179UNLOAD_DREG(r0, r1, d0)180RET181AEABI_END(ddiv)182183/* double __aeabi_dmul(double, double) */184AEABI_ENTRY(dmul)185LOAD_DREG(d0, r0, r1)186LOAD_DREG(d1, r2, r3)187vmul.f64 d0, d0, d1188UNLOAD_DREG(r0, r1, d0)189RET190AEABI_END(dmul)191192/* double __aeabi_dsub(double, double) */193AEABI_ENTRY(dsub)194LOAD_DREG(d0, r0, r1)195LOAD_DREG(d1, r2, r3)196vsub.f64 d0, d0, d1197UNLOAD_DREG(r0, r1, d0)198RET199AEABI_END(dsub)200201.section .note.GNU-stack,"",%progbits202203204