Path: blob/master/dep/vixl/src/aarch32/macro-assembler-aarch32.cc
4261 views
// Copyright 2017, VIXL authors1// All rights reserved.2//3// Redistribution and use in source and binary forms, with or without4// modification, are permitted provided that the following conditions are met:5//6// * Redistributions of source code must retain the above copyright notice,7// this list of conditions and the following disclaimer.8// * Redistributions in binary form must reproduce the above copyright9// notice, this list of conditions and the following disclaimer in the10// documentation and/or other materials provided with the distribution.11// * Neither the name of ARM Limited nor the names of its contributors may12// be used to endorse or promote products derived from this software13// without specific prior written permission.14//15// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND16// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE17// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE18// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE19// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR20// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF21// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS22// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN23// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)24// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE25// POSSIBILITY OF SUCH DAMAGE.2627#include "aarch32/macro-assembler-aarch32.h"2829#define STRINGIFY(x) #x30#define TOSTRING(x) STRINGIFY(x)3132#define CONTEXT_SCOPE \33ContextScope context(this, __FILE__ ":" TOSTRING(__LINE__))3435namespace vixl {36namespace aarch32 {3738ExactAssemblyScopeWithoutPoolsCheck::ExactAssemblyScopeWithoutPoolsCheck(39MacroAssembler* masm, size_t size, SizePolicy size_policy)40: ExactAssemblyScope(masm,41size,42size_policy,43ExactAssemblyScope::kIgnorePools) {}4445void UseScratchRegisterScope::Open(MacroAssembler* masm) {46VIXL_ASSERT(masm_ == NULL);47VIXL_ASSERT(masm != NULL);48masm_ = masm;4950old_available_ = masm_->GetScratchRegisterList()->GetList();51old_available_vfp_ = masm_->GetScratchVRegisterList()->GetList();5253parent_ = masm->GetCurrentScratchRegisterScope();54masm->SetCurrentScratchRegisterScope(this);55}565758void UseScratchRegisterScope::Close() {59if (masm_ != NULL) {60// Ensure that scopes nest perfectly, and do not outlive their parents.61// This is a run-time check because the order of destruction of objects in62// the _same_ scope is implementation-defined, and is likely to change in63// optimised builds.64VIXL_CHECK(masm_->GetCurrentScratchRegisterScope() == this);65masm_->SetCurrentScratchRegisterScope(parent_);6667masm_->GetScratchRegisterList()->SetList(old_available_);68masm_->GetScratchVRegisterList()->SetList(old_available_vfp_);6970masm_ = NULL;71}72}737475bool UseScratchRegisterScope::IsAvailable(const Register& reg) const {76VIXL_ASSERT(masm_ != NULL);77VIXL_ASSERT(reg.IsValid());78return masm_->GetScratchRegisterList()->Includes(reg);79}808182bool UseScratchRegisterScope::IsAvailable(const VRegister& reg) const {83VIXL_ASSERT(masm_ != NULL);84VIXL_ASSERT(reg.IsValid());85return masm_->GetScratchVRegisterList()->IncludesAllOf(reg);86}878889Register UseScratchRegisterScope::Acquire() {90VIXL_ASSERT(masm_ != NULL);91Register reg = masm_->GetScratchRegisterList()->GetFirstAvailableRegister();92VIXL_CHECK(reg.IsValid());93masm_->GetScratchRegisterList()->Remove(reg);94return reg;95}969798VRegister UseScratchRegisterScope::AcquireV(unsigned size_in_bits) {99switch (size_in_bits) {100case kSRegSizeInBits:101return AcquireS();102case kDRegSizeInBits:103return AcquireD();104case kQRegSizeInBits:105return AcquireQ();106default:107VIXL_UNREACHABLE();108return NoVReg;109}110}111112113QRegister UseScratchRegisterScope::AcquireQ() {114VIXL_ASSERT(masm_ != NULL);115QRegister reg =116masm_->GetScratchVRegisterList()->GetFirstAvailableQRegister();117VIXL_CHECK(reg.IsValid());118masm_->GetScratchVRegisterList()->Remove(reg);119return reg;120}121122123DRegister UseScratchRegisterScope::AcquireD() {124VIXL_ASSERT(masm_ != NULL);125DRegister reg =126masm_->GetScratchVRegisterList()->GetFirstAvailableDRegister();127VIXL_CHECK(reg.IsValid());128masm_->GetScratchVRegisterList()->Remove(reg);129return reg;130}131132133SRegister UseScratchRegisterScope::AcquireS() {134VIXL_ASSERT(masm_ != NULL);135SRegister reg =136masm_->GetScratchVRegisterList()->GetFirstAvailableSRegister();137VIXL_CHECK(reg.IsValid());138masm_->GetScratchVRegisterList()->Remove(reg);139return reg;140}141142143void UseScratchRegisterScope::Release(const Register& reg) {144VIXL_ASSERT(masm_ != NULL);145VIXL_ASSERT(reg.IsValid());146VIXL_ASSERT(!masm_->GetScratchRegisterList()->Includes(reg));147masm_->GetScratchRegisterList()->Combine(reg);148}149150151void UseScratchRegisterScope::Release(const VRegister& reg) {152VIXL_ASSERT(masm_ != NULL);153VIXL_ASSERT(reg.IsValid());154VIXL_ASSERT(!masm_->GetScratchVRegisterList()->IncludesAliasOf(reg));155masm_->GetScratchVRegisterList()->Combine(reg);156}157158159void UseScratchRegisterScope::Include(const RegisterList& list) {160VIXL_ASSERT(masm_ != NULL);161RegisterList excluded_registers(sp, lr, pc);162uint32_t mask = list.GetList() & ~excluded_registers.GetList();163RegisterList* available = masm_->GetScratchRegisterList();164available->SetList(available->GetList() | mask);165}166167168void UseScratchRegisterScope::Include(const VRegisterList& list) {169VIXL_ASSERT(masm_ != NULL);170VRegisterList* available = masm_->GetScratchVRegisterList();171available->SetList(available->GetList() | list.GetList());172}173174175void UseScratchRegisterScope::Exclude(const RegisterList& list) {176VIXL_ASSERT(masm_ != NULL);177RegisterList* available = masm_->GetScratchRegisterList();178available->SetList(available->GetList() & ~list.GetList());179}180181182void UseScratchRegisterScope::Exclude(const VRegisterList& list) {183VIXL_ASSERT(masm_ != NULL);184VRegisterList* available = masm_->GetScratchVRegisterList();185available->SetList(available->GetList() & ~list.GetList());186}187188189void UseScratchRegisterScope::Exclude(const Operand& operand) {190if (operand.IsImmediateShiftedRegister()) {191Exclude(operand.GetBaseRegister());192} else if (operand.IsRegisterShiftedRegister()) {193Exclude(operand.GetBaseRegister(), operand.GetShiftRegister());194} else {195VIXL_ASSERT(operand.IsImmediate());196}197}198199200void UseScratchRegisterScope::ExcludeAll() {201VIXL_ASSERT(masm_ != NULL);202masm_->GetScratchRegisterList()->SetList(0);203masm_->GetScratchVRegisterList()->SetList(0);204}205206207void MacroAssembler::EnsureEmitPoolsFor(size_t size_arg) {208// We skip the check when the pools are blocked.209if (ArePoolsBlocked()) return;210211VIXL_ASSERT(IsUint32(size_arg));212uint32_t size = static_cast<uint32_t>(size_arg);213214if (pool_manager_.MustEmit(GetCursorOffset(), size)) {215int32_t new_pc = pool_manager_.Emit(this, GetCursorOffset(), size);216VIXL_ASSERT(new_pc == GetCursorOffset());217USE(new_pc);218}219}220221222void MacroAssembler::HandleOutOfBoundsImmediate(Condition cond,223Register tmp,224uint32_t imm) {225if (IsUintN(16, imm)) {226CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);227mov(cond, tmp, imm & 0xffff);228return;229}230if (IsUsingT32()) {231if (ImmediateT32::IsImmediateT32(~imm)) {232CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);233mvn(cond, tmp, ~imm);234return;235}236} else {237if (ImmediateA32::IsImmediateA32(~imm)) {238CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);239mvn(cond, tmp, ~imm);240return;241}242}243CodeBufferCheckScope scope(this, 2 * kMaxInstructionSizeInBytes);244mov(cond, tmp, imm & 0xffff);245movt(cond, tmp, imm >> 16);246}247248249MemOperand MacroAssembler::MemOperandComputationHelper(250Condition cond,251Register scratch,252Register base,253uint32_t offset,254uint32_t extra_offset_mask) {255VIXL_ASSERT(!AliasesAvailableScratchRegister(scratch));256VIXL_ASSERT(!AliasesAvailableScratchRegister(base));257VIXL_ASSERT(allow_macro_instructions_);258VIXL_ASSERT(OutsideITBlock());259260// Check for the simple pass-through case.261if ((offset & extra_offset_mask) == offset) return MemOperand(base, offset);262263MacroEmissionCheckScope guard(this);264ITScope it_scope(this, &cond, guard);265266uint32_t load_store_offset = offset & extra_offset_mask;267uint32_t add_offset = offset & ~extra_offset_mask;268if ((add_offset != 0) && (IsModifiedImmediate(offset) ||269IsModifiedImmediate(UnsignedNegate(offset)))) {270load_store_offset = 0;271add_offset = offset;272}273274if (base.IsPC()) {275// Special handling for PC bases. We must read the PC in the first276// instruction (and only in that instruction), and we must also take care to277// keep the same address calculation as loads and stores. For T32, that278// means using something like ADR, which uses AlignDown(PC, 4).279280// We don't handle positive offsets from PC because the intention is not281// clear; does the user expect the offset from the current282// GetCursorOffset(), or to allow a certain amount of space after the283// instruction?284VIXL_ASSERT((offset & 0x80000000) != 0);285if (IsUsingT32()) {286// T32: make the first instruction "SUB (immediate, from PC)" -- an alias287// of ADR -- to get behaviour like loads and stores. This ADR can handle288// at least as much offset as the load_store_offset so it can replace it.289290uint32_t sub_pc_offset = UnsignedNegate(offset) & 0xfff;291load_store_offset = (offset + sub_pc_offset) & extra_offset_mask;292add_offset = (offset + sub_pc_offset) & ~extra_offset_mask;293294ExactAssemblyScope scope(this, k32BitT32InstructionSizeInBytes);295sub(cond, scratch, base, sub_pc_offset);296297if (add_offset == 0) return MemOperand(scratch, load_store_offset);298299// The rest of the offset can be generated in the usual way.300base = scratch;301}302// A32 can use any SUB instruction, so we don't have to do anything special303// here except to ensure that we read the PC first.304}305306add(cond, scratch, base, add_offset);307return MemOperand(scratch, load_store_offset);308}309310311uint32_t MacroAssembler::GetOffsetMask(InstructionType type,312AddrMode addrmode) {313switch (type) {314case kLdr:315case kLdrb:316case kStr:317case kStrb:318if (IsUsingA32() || (addrmode == Offset)) {319return 0xfff;320} else {321return 0xff;322}323case kLdrsb:324case kLdrh:325case kLdrsh:326case kStrh:327if (IsUsingT32() && (addrmode == Offset)) {328return 0xfff;329} else {330return 0xff;331}332case kVldr:333case kVstr:334return 0x3fc;335case kLdrd:336case kStrd:337if (IsUsingA32()) {338return 0xff;339} else {340return 0x3fc;341}342default:343VIXL_UNREACHABLE();344return 0;345}346}347348349HARDFLOAT void PrintfTrampolineRRRR(350const char* format, uint32_t a, uint32_t b, uint32_t c, uint32_t d) {351printf(format, a, b, c, d);352}353354355HARDFLOAT void PrintfTrampolineRRRD(356const char* format, uint32_t a, uint32_t b, uint32_t c, double d) {357printf(format, a, b, c, d);358}359360361HARDFLOAT void PrintfTrampolineRRDR(362const char* format, uint32_t a, uint32_t b, double c, uint32_t d) {363printf(format, a, b, c, d);364}365366367HARDFLOAT void PrintfTrampolineRRDD(368const char* format, uint32_t a, uint32_t b, double c, double d) {369printf(format, a, b, c, d);370}371372373HARDFLOAT void PrintfTrampolineRDRR(374const char* format, uint32_t a, double b, uint32_t c, uint32_t d) {375printf(format, a, b, c, d);376}377378379HARDFLOAT void PrintfTrampolineRDRD(380const char* format, uint32_t a, double b, uint32_t c, double d) {381printf(format, a, b, c, d);382}383384385HARDFLOAT void PrintfTrampolineRDDR(386const char* format, uint32_t a, double b, double c, uint32_t d) {387printf(format, a, b, c, d);388}389390391HARDFLOAT void PrintfTrampolineRDDD(392const char* format, uint32_t a, double b, double c, double d) {393printf(format, a, b, c, d);394}395396397HARDFLOAT void PrintfTrampolineDRRR(398const char* format, double a, uint32_t b, uint32_t c, uint32_t d) {399printf(format, a, b, c, d);400}401402403HARDFLOAT void PrintfTrampolineDRRD(404const char* format, double a, uint32_t b, uint32_t c, double d) {405printf(format, a, b, c, d);406}407408409HARDFLOAT void PrintfTrampolineDRDR(410const char* format, double a, uint32_t b, double c, uint32_t d) {411printf(format, a, b, c, d);412}413414415HARDFLOAT void PrintfTrampolineDRDD(416const char* format, double a, uint32_t b, double c, double d) {417printf(format, a, b, c, d);418}419420421HARDFLOAT void PrintfTrampolineDDRR(422const char* format, double a, double b, uint32_t c, uint32_t d) {423printf(format, a, b, c, d);424}425426427HARDFLOAT void PrintfTrampolineDDRD(428const char* format, double a, double b, uint32_t c, double d) {429printf(format, a, b, c, d);430}431432433HARDFLOAT void PrintfTrampolineDDDR(434const char* format, double a, double b, double c, uint32_t d) {435printf(format, a, b, c, d);436}437438439HARDFLOAT void PrintfTrampolineDDDD(440const char* format, double a, double b, double c, double d) {441printf(format, a, b, c, d);442}443444445void MacroAssembler::Printf(const char* format,446CPURegister reg1,447CPURegister reg2,448CPURegister reg3,449CPURegister reg4) {450// Exclude all registers from the available scratch registers, so451// that we are able to use ip below.452// TODO: Refactor this function to use UseScratchRegisterScope453// for temporary registers below.454UseScratchRegisterScope scratch(this);455scratch.ExcludeAll();456if (generate_simulator_code_) {457PushRegister(reg4);458PushRegister(reg3);459PushRegister(reg2);460PushRegister(reg1);461Push(RegisterList(r0, r1));462StringLiteral* format_literal =463new StringLiteral(format, RawLiteral::kDeletedOnPlacementByPool);464Adr(r0, format_literal);465uint32_t args = (reg4.GetType() << 12) | (reg3.GetType() << 8) |466(reg2.GetType() << 4) | reg1.GetType();467Mov(r1, args);468Hvc(kPrintfCode);469Pop(RegisterList(r0, r1));470int size = reg4.GetRegSizeInBytes() + reg3.GetRegSizeInBytes() +471reg2.GetRegSizeInBytes() + reg1.GetRegSizeInBytes();472Drop(size);473} else {474// Generate on a native platform => 32 bit environment.475// Preserve core registers r0-r3, r12, r14476const uint32_t saved_registers_mask =477kCallerSavedRegistersMask | (1 << r5.GetCode());478Push(RegisterList(saved_registers_mask));479// Push VFP registers.480Vpush(Untyped64, DRegisterList(d0, 8));481if (Has32DRegs()) Vpush(Untyped64, DRegisterList(d16, 16));482// Search one register which has been saved and which doesn't need to be483// printed.484RegisterList available_registers(kCallerSavedRegistersMask);485if (reg1.GetType() == CPURegister::kRRegister) {486available_registers.Remove(Register(reg1.GetCode()));487}488if (reg2.GetType() == CPURegister::kRRegister) {489available_registers.Remove(Register(reg2.GetCode()));490}491if (reg3.GetType() == CPURegister::kRRegister) {492available_registers.Remove(Register(reg3.GetCode()));493}494if (reg4.GetType() == CPURegister::kRRegister) {495available_registers.Remove(Register(reg4.GetCode()));496}497Register tmp = available_registers.GetFirstAvailableRegister();498VIXL_ASSERT(tmp.GetType() == CPURegister::kRRegister);499// Push the flags.500Mrs(tmp, APSR);501Push(tmp);502Vmrs(RegisterOrAPSR_nzcv(tmp.GetCode()), FPSCR);503Push(tmp);504// Push the registers to print on the stack.505PushRegister(reg4);506PushRegister(reg3);507PushRegister(reg2);508PushRegister(reg1);509int core_count = 1;510int vfp_count = 0;511uint32_t printf_type = 0;512// Pop the registers to print and store them into r1-r3 and/or d0-d3.513// Reg4 may stay into the stack if all the register to print are core514// registers.515PreparePrintfArgument(reg1, &core_count, &vfp_count, &printf_type);516PreparePrintfArgument(reg2, &core_count, &vfp_count, &printf_type);517PreparePrintfArgument(reg3, &core_count, &vfp_count, &printf_type);518PreparePrintfArgument(reg4, &core_count, &vfp_count, &printf_type);519// Ensure that the stack is aligned on 8 bytes.520And(r5, sp, 0x7);521if (core_count == 5) {522// One 32 bit argument (reg4) has been left on the stack => align the523// stack524// before the argument.525Pop(r0);526Sub(sp, sp, r5);527Push(r0);528} else {529Sub(sp, sp, r5);530}531// Select the right trampoline depending on the arguments.532uintptr_t address;533switch (printf_type) {534case 0:535address = reinterpret_cast<uintptr_t>(PrintfTrampolineRRRR);536break;537case 1:538address = reinterpret_cast<uintptr_t>(PrintfTrampolineDRRR);539break;540case 2:541address = reinterpret_cast<uintptr_t>(PrintfTrampolineRDRR);542break;543case 3:544address = reinterpret_cast<uintptr_t>(PrintfTrampolineDDRR);545break;546case 4:547address = reinterpret_cast<uintptr_t>(PrintfTrampolineRRDR);548break;549case 5:550address = reinterpret_cast<uintptr_t>(PrintfTrampolineDRDR);551break;552case 6:553address = reinterpret_cast<uintptr_t>(PrintfTrampolineRDDR);554break;555case 7:556address = reinterpret_cast<uintptr_t>(PrintfTrampolineDDDR);557break;558case 8:559address = reinterpret_cast<uintptr_t>(PrintfTrampolineRRRD);560break;561case 9:562address = reinterpret_cast<uintptr_t>(PrintfTrampolineDRRD);563break;564case 10:565address = reinterpret_cast<uintptr_t>(PrintfTrampolineRDRD);566break;567case 11:568address = reinterpret_cast<uintptr_t>(PrintfTrampolineDDRD);569break;570case 12:571address = reinterpret_cast<uintptr_t>(PrintfTrampolineRRDD);572break;573case 13:574address = reinterpret_cast<uintptr_t>(PrintfTrampolineDRDD);575break;576case 14:577address = reinterpret_cast<uintptr_t>(PrintfTrampolineRDDD);578break;579case 15:580address = reinterpret_cast<uintptr_t>(PrintfTrampolineDDDD);581break;582default:583VIXL_UNREACHABLE();584address = reinterpret_cast<uintptr_t>(PrintfTrampolineRRRR);585break;586}587StringLiteral* format_literal =588new StringLiteral(format, RawLiteral::kDeletedOnPlacementByPool);589Adr(r0, format_literal);590Mov(ip, Operand::From(address));591Blx(ip);592// If register reg4 was left on the stack => skip it.593if (core_count == 5) Drop(kRegSizeInBytes);594// Restore the stack as it was before alignment.595Add(sp, sp, r5);596// Restore the flags.597Pop(tmp);598Vmsr(FPSCR, tmp);599Pop(tmp);600Msr(APSR_nzcvqg, tmp);601// Restore the registers.602if (Has32DRegs()) Vpop(Untyped64, DRegisterList(d16, 16));603Vpop(Untyped64, DRegisterList(d0, 8));604Pop(RegisterList(saved_registers_mask));605}606}607608609void MacroAssembler::PushRegister(CPURegister reg) {610switch (reg.GetType()) {611case CPURegister::kNoRegister:612break;613case CPURegister::kRRegister:614Push(Register(reg.GetCode()));615break;616case CPURegister::kSRegister:617Vpush(Untyped32, SRegisterList(SRegister(reg.GetCode())));618break;619case CPURegister::kDRegister:620Vpush(Untyped64, DRegisterList(DRegister(reg.GetCode())));621break;622case CPURegister::kQRegister:623VIXL_UNIMPLEMENTED();624break;625}626}627628629void MacroAssembler::PreparePrintfArgument(CPURegister reg,630int* core_count,631int* vfp_count,632uint32_t* printf_type) {633switch (reg.GetType()) {634case CPURegister::kNoRegister:635break;636case CPURegister::kRRegister:637VIXL_ASSERT(*core_count <= 4);638if (*core_count < 4) Pop(Register(*core_count));639*core_count += 1;640break;641case CPURegister::kSRegister:642VIXL_ASSERT(*vfp_count < 4);643*printf_type |= 1 << (*core_count + *vfp_count - 1);644Vpop(Untyped32, SRegisterList(SRegister(*vfp_count * 2)));645Vcvt(F64, F32, DRegister(*vfp_count), SRegister(*vfp_count * 2));646*vfp_count += 1;647break;648case CPURegister::kDRegister:649VIXL_ASSERT(*vfp_count < 4);650*printf_type |= 1 << (*core_count + *vfp_count - 1);651Vpop(Untyped64, DRegisterList(DRegister(*vfp_count)));652*vfp_count += 1;653break;654case CPURegister::kQRegister:655VIXL_UNIMPLEMENTED();656break;657}658}659660661void MacroAssembler::Delegate(InstructionType type,662InstructionCondROp instruction,663Condition cond,664Register rn,665const Operand& operand) {666VIXL_ASSERT((type == kMovt) || (type == kSxtb16) || (type == kTeq) ||667(type == kUxtb16));668669if (type == kMovt) {670VIXL_ABORT_WITH_MSG("`Movt` expects a 16-bit immediate.\n");671}672673// This delegate only supports teq with immediates.674CONTEXT_SCOPE;675if ((type == kTeq) && operand.IsImmediate()) {676UseScratchRegisterScope temps(this);677Register scratch = temps.Acquire();678HandleOutOfBoundsImmediate(cond, scratch, operand.GetImmediate());679CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);680teq(cond, rn, scratch);681return;682}683Assembler::Delegate(type, instruction, cond, rn, operand);684}685686687void MacroAssembler::Delegate(InstructionType type,688InstructionCondSizeROp instruction,689Condition cond,690EncodingSize size,691Register rn,692const Operand& operand) {693CONTEXT_SCOPE;694VIXL_ASSERT(size.IsBest());695VIXL_ASSERT((type == kCmn) || (type == kCmp) || (type == kMov) ||696(type == kMovs) || (type == kMvn) || (type == kMvns) ||697(type == kSxtb) || (type == kSxth) || (type == kTst) ||698(type == kUxtb) || (type == kUxth));699if (IsUsingT32() && operand.IsRegisterShiftedRegister()) {700VIXL_ASSERT((type != kMov) || (type != kMovs));701InstructionCondRROp shiftop = NULL;702switch (operand.GetShift().GetType()) {703case LSL:704shiftop = &Assembler::lsl;705break;706case LSR:707shiftop = &Assembler::lsr;708break;709case ASR:710shiftop = &Assembler::asr;711break;712case RRX:713// A RegisterShiftedRegister operand cannot have a shift of type RRX.714VIXL_UNREACHABLE();715break;716case ROR:717shiftop = &Assembler::ror;718break;719default:720VIXL_UNREACHABLE();721}722if (shiftop != NULL) {723UseScratchRegisterScope temps(this);724Register scratch = temps.Acquire();725CodeBufferCheckScope scope(this, 2 * kMaxInstructionSizeInBytes);726(this->*shiftop)(cond,727scratch,728operand.GetBaseRegister(),729operand.GetShiftRegister());730(this->*instruction)(cond, size, rn, scratch);731return;732}733}734if (operand.IsImmediate()) {735uint32_t imm = operand.GetImmediate();736switch (type) {737case kMov:738case kMovs:739if (!rn.IsPC()) {740// Immediate is too large, but not using PC, so handle with mov{t}.741HandleOutOfBoundsImmediate(cond, rn, imm);742if (type == kMovs) {743CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);744tst(cond, rn, rn);745}746return;747} else if (type == kMov) {748VIXL_ASSERT(IsUsingA32() || cond.Is(al));749// Immediate is too large and using PC, so handle using a temporary750// register.751UseScratchRegisterScope temps(this);752Register scratch = temps.Acquire();753HandleOutOfBoundsImmediate(al, scratch, imm);754CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);755bx(cond, scratch);756return;757}758break;759case kCmn:760case kCmp:761if (IsUsingA32() || !rn.IsPC()) {762UseScratchRegisterScope temps(this);763Register scratch = temps.Acquire();764HandleOutOfBoundsImmediate(cond, scratch, imm);765CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);766(this->*instruction)(cond, size, rn, scratch);767return;768}769break;770case kMvn:771case kMvns:772if (!rn.IsPC()) {773UseScratchRegisterScope temps(this);774Register scratch = temps.Acquire();775HandleOutOfBoundsImmediate(cond, scratch, imm);776CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);777(this->*instruction)(cond, size, rn, scratch);778return;779}780break;781case kTst:782if (IsUsingA32() || !rn.IsPC()) {783UseScratchRegisterScope temps(this);784Register scratch = temps.Acquire();785HandleOutOfBoundsImmediate(cond, scratch, imm);786CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);787(this->*instruction)(cond, size, rn, scratch);788return;789}790break;791default: // kSxtb, Sxth, Uxtb, Uxth792break;793}794}795Assembler::Delegate(type, instruction, cond, size, rn, operand);796}797798799void MacroAssembler::Delegate(InstructionType type,800InstructionCondRROp instruction,801Condition cond,802Register rd,803Register rn,804const Operand& operand) {805if ((type == kSxtab) || (type == kSxtab16) || (type == kSxtah) ||806(type == kUxtab) || (type == kUxtab16) || (type == kUxtah) ||807(type == kPkhbt) || (type == kPkhtb)) {808UnimplementedDelegate(type);809return;810}811812// This delegate only handles the following instructions.813VIXL_ASSERT((type == kOrn) || (type == kOrns) || (type == kRsc) ||814(type == kRscs));815CONTEXT_SCOPE;816817// T32 does not support register shifted register operands, emulate it.818if (IsUsingT32() && operand.IsRegisterShiftedRegister()) {819InstructionCondRROp shiftop = NULL;820switch (operand.GetShift().GetType()) {821case LSL:822shiftop = &Assembler::lsl;823break;824case LSR:825shiftop = &Assembler::lsr;826break;827case ASR:828shiftop = &Assembler::asr;829break;830case RRX:831// A RegisterShiftedRegister operand cannot have a shift of type RRX.832VIXL_UNREACHABLE();833break;834case ROR:835shiftop = &Assembler::ror;836break;837default:838VIXL_UNREACHABLE();839}840if (shiftop != NULL) {841UseScratchRegisterScope temps(this);842Register rm = operand.GetBaseRegister();843Register rs = operand.GetShiftRegister();844// Try to use rd as a scratch register. We can do this if it aliases rs or845// rm (because we read them in the first instruction), but not rn.846if (!rd.Is(rn)) temps.Include(rd);847Register scratch = temps.Acquire();848// TODO: The scope length was measured empirically. We should analyse the849// worst-case size and add targetted tests.850CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);851(this->*shiftop)(cond, scratch, rm, rs);852(this->*instruction)(cond, rd, rn, scratch);853return;854}855}856857// T32 does not have a Rsc instruction, negate the lhs input and turn it into858// an Adc. Adc and Rsc are equivalent using a bitwise NOT:859// adc rd, rn, operand <-> rsc rd, NOT(rn), operand860if (IsUsingT32() && ((type == kRsc) || (type == kRscs))) {861// The RegisterShiftRegister case should have been handled above.862VIXL_ASSERT(!operand.IsRegisterShiftedRegister());863UseScratchRegisterScope temps(this);864// Try to use rd as a scratch register. We can do this if it aliases rn865// (because we read it in the first instruction), but not rm.866temps.Include(rd);867temps.Exclude(operand);868Register negated_rn = temps.Acquire();869{870CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);871mvn(cond, negated_rn, rn);872}873if (type == kRsc) {874CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);875adc(cond, rd, negated_rn, operand);876return;877}878// TODO: We shouldn't have to specify how much space the next instruction879// needs.880CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);881adcs(cond, rd, negated_rn, operand);882return;883}884885if (operand.IsImmediate()) {886// If the immediate can be encoded when inverted, turn Orn into Orr.887// Otherwise rely on HandleOutOfBoundsImmediate to generate a series of888// mov.889int32_t imm = operand.GetSignedImmediate();890if (((type == kOrn) || (type == kOrns)) && IsModifiedImmediate(~imm)) {891CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);892switch (type) {893case kOrn:894orr(cond, rd, rn, ~imm);895return;896case kOrns:897orrs(cond, rd, rn, ~imm);898return;899default:900VIXL_UNREACHABLE();901break;902}903}904}905906// A32 does not have a Orn instruction, negate the rhs input and turn it into907// a Orr.908if (IsUsingA32() && ((type == kOrn) || (type == kOrns))) {909// TODO: orn r0, r1, imm -> orr r0, r1, neg(imm) if doable910// mvn r0, r2911// orr r0, r1, r0912Register scratch;913UseScratchRegisterScope temps(this);914// Try to use rd as a scratch register. We can do this if it aliases rs or915// rm (because we read them in the first instruction), but not rn.916if (!rd.Is(rn)) temps.Include(rd);917scratch = temps.Acquire();918{919// TODO: We shouldn't have to specify how much space the next instruction920// needs.921CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);922mvn(cond, scratch, operand);923}924if (type == kOrns) {925CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);926orrs(cond, rd, rn, scratch);927return;928}929CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);930orr(cond, rd, rn, scratch);931return;932}933934if (operand.IsImmediate()) {935UseScratchRegisterScope temps(this);936// Allow using the destination as a scratch register if possible.937if (!rd.Is(rn)) temps.Include(rd);938Register scratch = temps.Acquire();939int32_t imm = operand.GetSignedImmediate();940HandleOutOfBoundsImmediate(cond, scratch, imm);941CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);942(this->*instruction)(cond, rd, rn, scratch);943return;944}945Assembler::Delegate(type, instruction, cond, rd, rn, operand);946}947948949void MacroAssembler::Delegate(InstructionType type,950InstructionCondSizeRL instruction,951Condition cond,952EncodingSize size,953Register rd,954Location* location) {955VIXL_ASSERT((type == kLdr) || (type == kAdr));956957CONTEXT_SCOPE;958VIXL_ASSERT(size.IsBest());959960if ((type == kLdr) && location->IsBound()) {961CodeBufferCheckScope scope(this, 5 * kMaxInstructionSizeInBytes);962UseScratchRegisterScope temps(this);963temps.Include(rd);964uint32_t mask = GetOffsetMask(type, Offset);965ldr(rd, MemOperandComputationHelper(cond, temps.Acquire(), location, mask));966return;967}968969Assembler::Delegate(type, instruction, cond, size, rd, location);970}971972973bool MacroAssembler::GenerateSplitInstruction(974InstructionCondSizeRROp instruction,975Condition cond,976Register rd,977Register rn,978uint32_t imm,979uint32_t mask) {980uint32_t high = imm & ~mask;981if (!IsModifiedImmediate(high) && !rn.IsPC()) return false;982// If high is a modified immediate, we can perform the operation with983// only 2 instructions.984// Else, if rn is PC, we want to avoid moving PC into a temporary.985// Therefore, we also use the pattern even if the second call may986// generate 3 instructions.987uint32_t low = imm & mask;988CodeBufferCheckScope scope(this,989(rn.IsPC() ? 4 : 2) * kMaxInstructionSizeInBytes);990(this->*instruction)(cond, Best, rd, rn, low);991(this->*instruction)(cond, Best, rd, rd, high);992return true;993}994995996void MacroAssembler::Delegate(InstructionType type,997InstructionCondSizeRROp instruction,998Condition cond,999EncodingSize size,1000Register rd,1001Register rn,1002const Operand& operand) {1003VIXL_ASSERT(1004(type == kAdc) || (type == kAdcs) || (type == kAdd) || (type == kAdds) ||1005(type == kAnd) || (type == kAnds) || (type == kAsr) || (type == kAsrs) ||1006(type == kBic) || (type == kBics) || (type == kEor) || (type == kEors) ||1007(type == kLsl) || (type == kLsls) || (type == kLsr) || (type == kLsrs) ||1008(type == kOrr) || (type == kOrrs) || (type == kRor) || (type == kRors) ||1009(type == kRsb) || (type == kRsbs) || (type == kSbc) || (type == kSbcs) ||1010(type == kSub) || (type == kSubs));10111012CONTEXT_SCOPE;1013VIXL_ASSERT(size.IsBest());1014if (IsUsingT32() && operand.IsRegisterShiftedRegister()) {1015InstructionCondRROp shiftop = NULL;1016switch (operand.GetShift().GetType()) {1017case LSL:1018shiftop = &Assembler::lsl;1019break;1020case LSR:1021shiftop = &Assembler::lsr;1022break;1023case ASR:1024shiftop = &Assembler::asr;1025break;1026case RRX:1027// A RegisterShiftedRegister operand cannot have a shift of type RRX.1028VIXL_UNREACHABLE();1029break;1030case ROR:1031shiftop = &Assembler::ror;1032break;1033default:1034VIXL_UNREACHABLE();1035}1036if (shiftop != NULL) {1037UseScratchRegisterScope temps(this);1038Register rm = operand.GetBaseRegister();1039Register rs = operand.GetShiftRegister();1040// Try to use rd as a scratch register. We can do this if it aliases rs or1041// rm (because we read them in the first instruction), but not rn.1042if (!rd.Is(rn)) temps.Include(rd);1043Register scratch = temps.Acquire();1044CodeBufferCheckScope scope(this, 2 * kMaxInstructionSizeInBytes);1045(this->*shiftop)(cond, scratch, rm, rs);1046(this->*instruction)(cond, size, rd, rn, scratch);1047return;1048}1049}1050if (operand.IsImmediate()) {1051int32_t imm = operand.GetSignedImmediate();1052if (ImmediateT32::IsImmediateT32(~imm)) {1053if (IsUsingT32()) {1054switch (type) {1055case kOrr:1056orn(cond, rd, rn, ~imm);1057return;1058case kOrrs:1059orns(cond, rd, rn, ~imm);1060return;1061default:1062break;1063}1064}1065}1066if (imm < 0) {1067InstructionCondSizeRROp asmcb = NULL;1068// Add and sub are equivalent using an arithmetic negation:1069// add rd, rn, #imm <-> sub rd, rn, - #imm1070// Add and sub with carry are equivalent using a bitwise NOT:1071// adc rd, rn, #imm <-> sbc rd, rn, NOT #imm1072switch (type) {1073case kAdd:1074asmcb = &Assembler::sub;1075imm = -imm;1076break;1077case kAdds:1078asmcb = &Assembler::subs;1079imm = -imm;1080break;1081case kSub:1082asmcb = &Assembler::add;1083imm = -imm;1084break;1085case kSubs:1086asmcb = &Assembler::adds;1087imm = -imm;1088break;1089case kAdc:1090asmcb = &Assembler::sbc;1091imm = ~imm;1092break;1093case kAdcs:1094asmcb = &Assembler::sbcs;1095imm = ~imm;1096break;1097case kSbc:1098asmcb = &Assembler::adc;1099imm = ~imm;1100break;1101case kSbcs:1102asmcb = &Assembler::adcs;1103imm = ~imm;1104break;1105default:1106break;1107}1108if (asmcb != NULL) {1109CodeBufferCheckScope scope(this, 4 * kMaxInstructionSizeInBytes);1110(this->*asmcb)(cond, size, rd, rn, Operand(imm));1111return;1112}1113}11141115// When rn is PC, only handle negative offsets. The correct way to handle1116// positive offsets isn't clear; does the user want the offset from the1117// start of the macro, or from the end (to allow a certain amount of space)?1118// When type is Add or Sub, imm is always positive (imm < 0 has just been1119// handled and imm == 0 would have been generated without the need of a1120// delegate). Therefore, only add to PC is forbidden here.1121if ((((type == kAdd) && !rn.IsPC()) || (type == kSub)) &&1122(IsUsingA32() || (!rd.IsPC() && !rn.IsPC()))) {1123VIXL_ASSERT(imm > 0);1124// Try to break the constant into two modified immediates.1125// For T32 also try to break the constant into one imm12 and one modified1126// immediate. Count the trailing zeroes and get the biggest even value.1127int trailing_zeroes = CountTrailingZeros(imm) & ~1u;1128uint32_t mask = ((trailing_zeroes < 4) && IsUsingT32())1129? 0xfff1130: (0xff << trailing_zeroes);1131if (GenerateSplitInstruction(instruction, cond, rd, rn, imm, mask)) {1132return;1133}1134InstructionCondSizeRROp asmcb = NULL;1135switch (type) {1136case kAdd:1137asmcb = &Assembler::sub;1138break;1139case kSub:1140asmcb = &Assembler::add;1141break;1142default:1143VIXL_UNREACHABLE();1144}1145if (GenerateSplitInstruction(asmcb, cond, rd, rn, -imm, mask)) {1146return;1147}1148}11491150UseScratchRegisterScope temps(this);1151// Allow using the destination as a scratch register if possible.1152if (!rd.Is(rn)) temps.Include(rd);1153if (rn.IsPC()) {1154// If we're reading the PC, we need to do it in the first instruction,1155// otherwise we'll read the wrong value. We rely on this to handle the1156// long-range PC-relative MemOperands which can result from user-managed1157// literals.11581159// Only handle negative offsets. The correct way to handle positive1160// offsets isn't clear; does the user want the offset from the start of1161// the macro, or from the end (to allow a certain amount of space)?1162bool offset_is_negative_or_zero = (imm <= 0);1163switch (type) {1164case kAdd:1165case kAdds:1166offset_is_negative_or_zero = (imm <= 0);1167break;1168case kSub:1169case kSubs:1170offset_is_negative_or_zero = (imm >= 0);1171break;1172case kAdc:1173case kAdcs:1174offset_is_negative_or_zero = (imm < 0);1175break;1176case kSbc:1177case kSbcs:1178offset_is_negative_or_zero = (imm > 0);1179break;1180default:1181break;1182}1183if (offset_is_negative_or_zero) {1184{1185rn = temps.Acquire();1186CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);1187mov(cond, rn, pc);1188}1189// Recurse rather than falling through, to try to get the immediate into1190// a single instruction.1191CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);1192(this->*instruction)(cond, size, rd, rn, operand);1193return;1194}1195} else {1196Register scratch = temps.Acquire();1197// TODO: The scope length was measured empirically. We should analyse the1198// worst-case size and add targetted tests.1199CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);1200mov(cond, scratch, operand.GetImmediate());1201(this->*instruction)(cond, size, rd, rn, scratch);1202return;1203}1204}1205Assembler::Delegate(type, instruction, cond, size, rd, rn, operand);1206}120712081209void MacroAssembler::Delegate(InstructionType type,1210InstructionRL instruction,1211Register rn,1212Location* location) {1213VIXL_ASSERT((type == kCbz) || (type == kCbnz));12141215CONTEXT_SCOPE;1216CodeBufferCheckScope scope(this, 2 * kMaxInstructionSizeInBytes);1217if (IsUsingA32()) {1218if (type == kCbz) {1219VIXL_ABORT_WITH_MSG("Cbz is only available for T32.\n");1220} else {1221VIXL_ABORT_WITH_MSG("Cbnz is only available for T32.\n");1222}1223} else if (rn.IsLow()) {1224switch (type) {1225case kCbnz: {1226Label done;1227cbz(rn, &done);1228b(location);1229Bind(&done);1230return;1231}1232case kCbz: {1233Label done;1234cbnz(rn, &done);1235b(location);1236Bind(&done);1237return;1238}1239default:1240break;1241}1242}1243Assembler::Delegate(type, instruction, rn, location);1244}124512461247template <typename T>1248static inline bool IsI64BitPattern(T imm) {1249for (T mask = 0xff << ((sizeof(T) - 1) * 8); mask != 0; mask >>= 8) {1250if (((imm & mask) != mask) && ((imm & mask) != 0)) return false;1251}1252return true;1253}125412551256template <typename T>1257static inline bool IsI8BitPattern(T imm) {1258uint8_t imm8 = imm & 0xff;1259for (unsigned rep = sizeof(T) - 1; rep > 0; rep--) {1260imm >>= 8;1261if ((imm & 0xff) != imm8) return false;1262}1263return true;1264}126512661267static inline bool CanBeInverted(uint32_t imm32) {1268uint32_t fill8 = 0;12691270if ((imm32 & 0xffffff00) == 0xffffff00) {1271// 11111111 11111111 11111111 abcdefgh1272return true;1273}1274if (((imm32 & 0xff) == 0) || ((imm32 & 0xff) == 0xff)) {1275fill8 = imm32 & 0xff;1276imm32 >>= 8;1277if ((imm32 >> 8) == 0xffff) {1278// 11111111 11111111 abcdefgh 000000001279// or 11111111 11111111 abcdefgh 111111111280return true;1281}1282if ((imm32 & 0xff) == fill8) {1283imm32 >>= 8;1284if ((imm32 >> 8) == 0xff) {1285// 11111111 abcdefgh 00000000 000000001286// or 11111111 abcdefgh 11111111 111111111287return true;1288}1289if ((fill8 == 0xff) && ((imm32 & 0xff) == 0xff)) {1290// abcdefgh 11111111 11111111 111111111291return true;1292}1293}1294}1295return false;1296}129712981299template <typename RES, typename T>1300static inline RES replicate(T imm) {1301VIXL_ASSERT((sizeof(RES) > sizeof(T)) &&1302(((sizeof(RES) / sizeof(T)) * sizeof(T)) == sizeof(RES)));1303RES res = imm;1304for (unsigned i = sizeof(RES) / sizeof(T) - 1; i > 0; i--) {1305res = (res << (sizeof(T) * 8)) | imm;1306}1307return res;1308}130913101311void MacroAssembler::Delegate(InstructionType type,1312InstructionCondDtSSop instruction,1313Condition cond,1314DataType dt,1315SRegister rd,1316const SOperand& operand) {1317CONTEXT_SCOPE;1318if (type == kVmov) {1319if (operand.IsImmediate() && dt.Is(F32)) {1320const NeonImmediate& neon_imm = operand.GetNeonImmediate();1321if (neon_imm.CanConvert<float>()) {1322// movw ip, imm161323// movk ip, imm161324// vmov s0, ip1325UseScratchRegisterScope temps(this);1326Register scratch = temps.Acquire();1327float f = neon_imm.GetImmediate<float>();1328// TODO: The scope length was measured empirically. We should analyse1329// the1330// worst-case size and add targetted tests.1331CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);1332mov(cond, scratch, FloatToRawbits(f));1333vmov(cond, rd, scratch);1334return;1335}1336}1337}1338Assembler::Delegate(type, instruction, cond, dt, rd, operand);1339}134013411342void MacroAssembler::Delegate(InstructionType type,1343InstructionCondDtDDop instruction,1344Condition cond,1345DataType dt,1346DRegister rd,1347const DOperand& operand) {1348CONTEXT_SCOPE;1349if (type == kVmov) {1350if (operand.IsImmediate()) {1351const NeonImmediate& neon_imm = operand.GetNeonImmediate();1352switch (dt.GetValue()) {1353case I32:1354if (neon_imm.CanConvert<uint32_t>()) {1355uint32_t imm = neon_imm.GetImmediate<uint32_t>();1356// vmov.i32 d0, 0xabababab will translate into vmov.i8 d0, 0xab1357if (IsI8BitPattern(imm)) {1358CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);1359vmov(cond, I8, rd, imm & 0xff);1360return;1361}1362// vmov.i32 d0, 0xff0000ff will translate into1363// vmov.i64 d0, 0xff0000ffff0000ff1364if (IsI64BitPattern(imm)) {1365CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);1366vmov(cond, I64, rd, replicate<uint64_t>(imm));1367return;1368}1369// vmov.i32 d0, 0xffab0000 will translate into1370// vmvn.i32 d0, 0x0054ffff1371if (cond.Is(al) && CanBeInverted(imm)) {1372CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);1373vmvn(I32, rd, ~imm);1374return;1375}1376}1377break;1378case I16:1379if (neon_imm.CanConvert<uint16_t>()) {1380uint16_t imm = neon_imm.GetImmediate<uint16_t>();1381// vmov.i16 d0, 0xabab will translate into vmov.i8 d0, 0xab1382if (IsI8BitPattern(imm)) {1383CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);1384vmov(cond, I8, rd, imm & 0xff);1385return;1386}1387}1388break;1389case I64:1390if (neon_imm.CanConvert<uint64_t>()) {1391uint64_t imm = neon_imm.GetImmediate<uint64_t>();1392// vmov.i64 d0, -1 will translate into vmov.i8 d0, 0xff1393if (IsI8BitPattern(imm)) {1394CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);1395vmov(cond, I8, rd, imm & 0xff);1396return;1397}1398// mov ip, lo(imm64)1399// vdup d0, ip1400// vdup is prefered to 'vmov d0[0]' as d0[1] does not need to be1401// preserved1402{1403UseScratchRegisterScope temps(this);1404Register scratch = temps.Acquire();1405{1406// TODO: The scope length was measured empirically. We should1407// analyse the1408// worst-case size and add targetted tests.1409CodeBufferCheckScope scope(this,14102 * kMaxInstructionSizeInBytes);1411mov(cond, scratch, static_cast<uint32_t>(imm & 0xffffffff));1412}1413CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);1414vdup(cond, Untyped32, rd, scratch);1415}1416// mov ip, hi(imm64)1417// vmov d0[1], ip1418{1419UseScratchRegisterScope temps(this);1420Register scratch = temps.Acquire();1421{1422// TODO: The scope length was measured empirically. We should1423// analyse the1424// worst-case size and add targetted tests.1425CodeBufferCheckScope scope(this,14262 * kMaxInstructionSizeInBytes);1427mov(cond, scratch, static_cast<uint32_t>(imm >> 32));1428}1429CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);1430vmov(cond, Untyped32, DRegisterLane(rd, 1), scratch);1431}1432return;1433}1434break;1435default:1436break;1437}1438VIXL_ASSERT(!dt.Is(I8)); // I8 cases should have been handled already.1439if ((dt.Is(I16) || dt.Is(I32)) && neon_imm.CanConvert<uint32_t>()) {1440// mov ip, imm321441// vdup.16 d0, ip1442UseScratchRegisterScope temps(this);1443Register scratch = temps.Acquire();1444{1445CodeBufferCheckScope scope(this, 2 * kMaxInstructionSizeInBytes);1446mov(cond, scratch, neon_imm.GetImmediate<uint32_t>());1447}1448DataTypeValue vdup_dt = Untyped32;1449switch (dt.GetValue()) {1450case I16:1451vdup_dt = Untyped16;1452break;1453case I32:1454vdup_dt = Untyped32;1455break;1456default:1457VIXL_UNREACHABLE();1458}1459CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);1460vdup(cond, vdup_dt, rd, scratch);1461return;1462}1463if (dt.Is(F32) && neon_imm.CanConvert<float>()) {1464float f = neon_imm.GetImmediate<float>();1465// Punt to vmov.i321466// TODO: The scope length was guessed based on the double case below. We1467// should analyse the worst-case size and add targetted tests.1468CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);1469vmov(cond, I32, rd, FloatToRawbits(f));1470return;1471}1472if (dt.Is(F64) && neon_imm.CanConvert<double>()) {1473// Punt to vmov.i641474double d = neon_imm.GetImmediate<double>();1475// TODO: The scope length was measured empirically. We should analyse1476// the1477// worst-case size and add targetted tests.1478CodeBufferCheckScope scope(this, 6 * kMaxInstructionSizeInBytes);1479vmov(cond, I64, rd, DoubleToRawbits(d));1480return;1481}1482}1483}1484Assembler::Delegate(type, instruction, cond, dt, rd, operand);1485}148614871488void MacroAssembler::Delegate(InstructionType type,1489InstructionCondDtQQop instruction,1490Condition cond,1491DataType dt,1492QRegister rd,1493const QOperand& operand) {1494CONTEXT_SCOPE;1495if (type == kVmov) {1496if (operand.IsImmediate()) {1497const NeonImmediate& neon_imm = operand.GetNeonImmediate();1498switch (dt.GetValue()) {1499case I32:1500if (neon_imm.CanConvert<uint32_t>()) {1501uint32_t imm = neon_imm.GetImmediate<uint32_t>();1502// vmov.i32 d0, 0xabababab will translate into vmov.i8 d0, 0xab1503if (IsI8BitPattern(imm)) {1504CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);1505vmov(cond, I8, rd, imm & 0xff);1506return;1507}1508// vmov.i32 d0, 0xff0000ff will translate into1509// vmov.i64 d0, 0xff0000ffff0000ff1510if (IsI64BitPattern(imm)) {1511CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);1512vmov(cond, I64, rd, replicate<uint64_t>(imm));1513return;1514}1515// vmov.i32 d0, 0xffab0000 will translate into1516// vmvn.i32 d0, 0x0054ffff1517if (CanBeInverted(imm)) {1518CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);1519vmvn(cond, I32, rd, ~imm);1520return;1521}1522}1523break;1524case I16:1525if (neon_imm.CanConvert<uint16_t>()) {1526uint16_t imm = neon_imm.GetImmediate<uint16_t>();1527// vmov.i16 d0, 0xabab will translate into vmov.i8 d0, 0xab1528if (IsI8BitPattern(imm)) {1529CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);1530vmov(cond, I8, rd, imm & 0xff);1531return;1532}1533}1534break;1535case I64:1536if (neon_imm.CanConvert<uint64_t>()) {1537uint64_t imm = neon_imm.GetImmediate<uint64_t>();1538// vmov.i64 d0, -1 will translate into vmov.i8 d0, 0xff1539if (IsI8BitPattern(imm)) {1540CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);1541vmov(cond, I8, rd, imm & 0xff);1542return;1543}1544// mov ip, lo(imm64)1545// vdup q0, ip1546// vdup is prefered to 'vmov d0[0]' as d0[1-3] don't need to be1547// preserved1548{1549UseScratchRegisterScope temps(this);1550Register scratch = temps.Acquire();1551{1552CodeBufferCheckScope scope(this,15532 * kMaxInstructionSizeInBytes);1554mov(cond, scratch, static_cast<uint32_t>(imm & 0xffffffff));1555}1556CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);1557vdup(cond, Untyped32, rd, scratch);1558}1559// mov ip, hi(imm64)1560// vmov.i32 d0[1], ip1561// vmov d1, d01562{1563UseScratchRegisterScope temps(this);1564Register scratch = temps.Acquire();1565{1566CodeBufferCheckScope scope(this,15672 * kMaxInstructionSizeInBytes);1568mov(cond, scratch, static_cast<uint32_t>(imm >> 32));1569}1570{1571CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);1572vmov(cond,1573Untyped32,1574DRegisterLane(rd.GetLowDRegister(), 1),1575scratch);1576}1577CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);1578vmov(cond, F64, rd.GetHighDRegister(), rd.GetLowDRegister());1579}1580return;1581}1582break;1583default:1584break;1585}1586VIXL_ASSERT(!dt.Is(I8)); // I8 cases should have been handled already.1587if ((dt.Is(I16) || dt.Is(I32)) && neon_imm.CanConvert<uint32_t>()) {1588// mov ip, imm321589// vdup.16 d0, ip1590UseScratchRegisterScope temps(this);1591Register scratch = temps.Acquire();1592{1593CodeBufferCheckScope scope(this, 2 * kMaxInstructionSizeInBytes);1594mov(cond, scratch, neon_imm.GetImmediate<uint32_t>());1595}1596DataTypeValue vdup_dt = Untyped32;1597switch (dt.GetValue()) {1598case I16:1599vdup_dt = Untyped16;1600break;1601case I32:1602vdup_dt = Untyped32;1603break;1604default:1605VIXL_UNREACHABLE();1606}1607CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);1608vdup(cond, vdup_dt, rd, scratch);1609return;1610}1611if (dt.Is(F32) && neon_imm.CanConvert<float>()) {1612// Punt to vmov.i641613float f = neon_imm.GetImmediate<float>();1614CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);1615vmov(cond, I32, rd, FloatToRawbits(f));1616return;1617}1618if (dt.Is(F64) && neon_imm.CanConvert<double>()) {1619// Use vmov to create the double in the low D register, then duplicate1620// it into the high D register.1621double d = neon_imm.GetImmediate<double>();1622CodeBufferCheckScope scope(this, 7 * kMaxInstructionSizeInBytes);1623vmov(cond, F64, rd.GetLowDRegister(), d);1624vmov(cond, F64, rd.GetHighDRegister(), rd.GetLowDRegister());1625return;1626}1627}1628}1629Assembler::Delegate(type, instruction, cond, dt, rd, operand);1630}163116321633void MacroAssembler::Delegate(InstructionType type,1634InstructionCondRL instruction,1635Condition cond,1636Register rt,1637Location* location) {1638VIXL_ASSERT((type == kLdrb) || (type == kLdrh) || (type == kLdrsb) ||1639(type == kLdrsh));16401641CONTEXT_SCOPE;16421643if (location->IsBound()) {1644CodeBufferCheckScope scope(this, 5 * kMaxInstructionSizeInBytes);1645UseScratchRegisterScope temps(this);1646temps.Include(rt);1647Register scratch = temps.Acquire();1648uint32_t mask = GetOffsetMask(type, Offset);1649switch (type) {1650case kLdrb:1651ldrb(rt, MemOperandComputationHelper(cond, scratch, location, mask));1652return;1653case kLdrh:1654ldrh(rt, MemOperandComputationHelper(cond, scratch, location, mask));1655return;1656case kLdrsb:1657ldrsb(rt, MemOperandComputationHelper(cond, scratch, location, mask));1658return;1659case kLdrsh:1660ldrsh(rt, MemOperandComputationHelper(cond, scratch, location, mask));1661return;1662default:1663VIXL_UNREACHABLE();1664}1665return;1666}16671668Assembler::Delegate(type, instruction, cond, rt, location);1669}167016711672void MacroAssembler::Delegate(InstructionType type,1673InstructionCondRRL instruction,1674Condition cond,1675Register rt,1676Register rt2,1677Location* location) {1678VIXL_ASSERT(type == kLdrd);16791680CONTEXT_SCOPE;16811682if (location->IsBound()) {1683CodeBufferCheckScope scope(this, 6 * kMaxInstructionSizeInBytes);1684UseScratchRegisterScope temps(this);1685temps.Include(rt, rt2);1686Register scratch = temps.Acquire();1687uint32_t mask = GetOffsetMask(type, Offset);1688ldrd(rt, rt2, MemOperandComputationHelper(cond, scratch, location, mask));1689return;1690}16911692Assembler::Delegate(type, instruction, cond, rt, rt2, location);1693}169416951696void MacroAssembler::Delegate(InstructionType type,1697InstructionCondSizeRMop instruction,1698Condition cond,1699EncodingSize size,1700Register rd,1701const MemOperand& operand) {1702CONTEXT_SCOPE;1703VIXL_ASSERT(size.IsBest());1704VIXL_ASSERT((type == kLdr) || (type == kLdrb) || (type == kLdrh) ||1705(type == kLdrsb) || (type == kLdrsh) || (type == kStr) ||1706(type == kStrb) || (type == kStrh));1707if (operand.IsImmediate()) {1708const Register& rn = operand.GetBaseRegister();1709AddrMode addrmode = operand.GetAddrMode();1710int32_t offset = operand.GetOffsetImmediate();1711uint32_t extra_offset_mask = GetOffsetMask(type, addrmode);1712// Try to maximize the offset used by the MemOperand (load_store_offset).1713// Add the part which can't be used by the MemOperand (add_offset).1714uint32_t load_store_offset = offset & extra_offset_mask;1715uint32_t add_offset = offset & ~extra_offset_mask;1716if ((add_offset != 0) &&1717(IsModifiedImmediate(offset) || IsModifiedImmediate(-offset))) {1718load_store_offset = 0;1719add_offset = offset;1720}1721switch (addrmode) {1722case PreIndex:1723// Avoid the unpredictable case 'str r0, [r0, imm]!'1724if (!rn.Is(rd)) {1725// Pre-Indexed case:1726// ldr r0, [r1, 12345]! will translate into1727// add r1, r1, 123451728// ldr r0, [r1]1729{1730CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);1731add(cond, rn, rn, add_offset);1732}1733{1734CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);1735(this->*instruction)(cond,1736size,1737rd,1738MemOperand(rn, load_store_offset, PreIndex));1739}1740return;1741}1742break;1743case Offset: {1744UseScratchRegisterScope temps(this);1745// Allow using the destination as a scratch register if possible.1746if ((type != kStr) && (type != kStrb) && (type != kStrh) &&1747!rd.Is(rn)) {1748temps.Include(rd);1749}1750Register scratch = temps.Acquire();1751// Offset case:1752// ldr r0, [r1, 12345] will translate into1753// add r0, r1, 123451754// ldr r0, [r0]1755{1756CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);1757add(cond, scratch, rn, add_offset);1758}1759{1760CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);1761(this->*instruction)(cond,1762size,1763rd,1764MemOperand(scratch, load_store_offset));1765}1766return;1767}1768case PostIndex:1769// Avoid the unpredictable case 'ldr r0, [r0], imm'1770if (!rn.Is(rd)) {1771// Post-indexed case:1772// ldr r0. [r1], imm32 will translate into1773// ldr r0, [r1]1774// movw ip. imm32 & 0xffffffff1775// movt ip, imm32 >> 161776// add r1, r1, ip1777{1778CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);1779(this->*instruction)(cond,1780size,1781rd,1782MemOperand(rn, load_store_offset, PostIndex));1783}1784{1785CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);1786add(cond, rn, rn, add_offset);1787}1788return;1789}1790break;1791}1792} else if (operand.IsPlainRegister()) {1793const Register& rn = operand.GetBaseRegister();1794AddrMode addrmode = operand.GetAddrMode();1795const Register& rm = operand.GetOffsetRegister();1796if (rm.IsPC()) {1797VIXL_ABORT_WITH_MSG(1798"The MacroAssembler does not convert loads and stores with a PC "1799"offset register.\n");1800}1801if (rn.IsPC()) {1802if (addrmode == Offset) {1803if (IsUsingT32()) {1804VIXL_ABORT_WITH_MSG(1805"The MacroAssembler does not convert loads and stores with a PC "1806"base register for T32.\n");1807}1808} else {1809VIXL_ABORT_WITH_MSG(1810"The MacroAssembler does not convert loads and stores with a PC "1811"base register in pre-index or post-index mode.\n");1812}1813}1814switch (addrmode) {1815case PreIndex:1816// Avoid the unpredictable case 'str r0, [r0, imm]!'1817if (!rn.Is(rd)) {1818// Pre-Indexed case:1819// ldr r0, [r1, r2]! will translate into1820// add r1, r1, r21821// ldr r0, [r1]1822{1823CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);1824if (operand.GetSign().IsPlus()) {1825add(cond, rn, rn, rm);1826} else {1827sub(cond, rn, rn, rm);1828}1829}1830{1831CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);1832(this->*instruction)(cond, size, rd, MemOperand(rn, Offset));1833}1834return;1835}1836break;1837case Offset: {1838UseScratchRegisterScope temps(this);1839// Allow using the destination as a scratch register if this is not a1840// store.1841// Avoid using PC as a temporary as this has side-effects.1842if ((type != kStr) && (type != kStrb) && (type != kStrh) &&1843!rd.IsPC()) {1844temps.Include(rd);1845}1846Register scratch = temps.Acquire();1847// Offset case:1848// ldr r0, [r1, r2] will translate into1849// add r0, r1, r21850// ldr r0, [r0]1851{1852CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);1853if (operand.GetSign().IsPlus()) {1854add(cond, scratch, rn, rm);1855} else {1856sub(cond, scratch, rn, rm);1857}1858}1859{1860CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);1861(this->*instruction)(cond, size, rd, MemOperand(scratch, Offset));1862}1863return;1864}1865case PostIndex:1866// Avoid the unpredictable case 'ldr r0, [r0], imm'1867if (!rn.Is(rd)) {1868// Post-indexed case:1869// ldr r0. [r1], r2 will translate into1870// ldr r0, [r1]1871// add r1, r1, r21872{1873CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);1874(this->*instruction)(cond, size, rd, MemOperand(rn, Offset));1875}1876{1877CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);1878if (operand.GetSign().IsPlus()) {1879add(cond, rn, rn, rm);1880} else {1881sub(cond, rn, rn, rm);1882}1883}1884return;1885}1886break;1887}1888}1889Assembler::Delegate(type, instruction, cond, size, rd, operand);1890}189118921893void MacroAssembler::Delegate(InstructionType type,1894InstructionCondRRMop instruction,1895Condition cond,1896Register rt,1897Register rt2,1898const MemOperand& operand) {1899if ((type == kLdaexd) || (type == kLdrexd) || (type == kStlex) ||1900(type == kStlexb) || (type == kStlexh) || (type == kStrex) ||1901(type == kStrexb) || (type == kStrexh)) {1902UnimplementedDelegate(type);1903return;1904}19051906VIXL_ASSERT((type == kLdrd) || (type == kStrd));19071908CONTEXT_SCOPE;19091910// TODO: Should we allow these cases?1911if (IsUsingA32()) {1912// The first register needs to be even.1913if ((rt.GetCode() & 1) != 0) {1914UnimplementedDelegate(type);1915return;1916}1917// Registers need to be adjacent.1918if (((rt.GetCode() + 1) % kNumberOfRegisters) != rt2.GetCode()) {1919UnimplementedDelegate(type);1920return;1921}1922// LDRD lr, pc [...] is not allowed.1923if (rt.Is(lr)) {1924UnimplementedDelegate(type);1925return;1926}1927}19281929if (operand.IsImmediate()) {1930const Register& rn = operand.GetBaseRegister();1931AddrMode addrmode = operand.GetAddrMode();1932int32_t offset = operand.GetOffsetImmediate();1933uint32_t extra_offset_mask = GetOffsetMask(type, addrmode);1934// Try to maximize the offset used by the MemOperand (load_store_offset).1935// Add the part which can't be used by the MemOperand (add_offset).1936uint32_t load_store_offset = offset & extra_offset_mask;1937uint32_t add_offset = offset & ~extra_offset_mask;1938if ((add_offset != 0) &&1939(IsModifiedImmediate(offset) || IsModifiedImmediate(-offset))) {1940load_store_offset = 0;1941add_offset = offset;1942}1943switch (addrmode) {1944case PreIndex: {1945// Allow using the destinations as a scratch registers if possible.1946UseScratchRegisterScope temps(this);1947if (type == kLdrd) {1948if (!rt.Is(rn)) temps.Include(rt);1949if (!rt2.Is(rn)) temps.Include(rt2);1950}19511952// Pre-Indexed case:1953// ldrd r0, r1, [r2, 12345]! will translate into1954// add r2, 123451955// ldrd r0, r1, [r2]1956{1957CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);1958add(cond, rn, rn, add_offset);1959}1960{1961CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);1962(this->*instruction)(cond,1963rt,1964rt2,1965MemOperand(rn, load_store_offset, PreIndex));1966}1967return;1968}1969case Offset: {1970UseScratchRegisterScope temps(this);1971// Allow using the destinations as a scratch registers if possible.1972if (type == kLdrd) {1973if (!rt.Is(rn)) temps.Include(rt);1974if (!rt2.Is(rn)) temps.Include(rt2);1975}1976Register scratch = temps.Acquire();1977// Offset case:1978// ldrd r0, r1, [r2, 12345] will translate into1979// add r0, r2, 123451980// ldrd r0, r1, [r0]1981{1982CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);1983add(cond, scratch, rn, add_offset);1984}1985{1986CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);1987(this->*instruction)(cond,1988rt,1989rt2,1990MemOperand(scratch, load_store_offset));1991}1992return;1993}1994case PostIndex:1995// Avoid the unpredictable case 'ldrd r0, r1, [r0], imm'1996if (!rn.Is(rt) && !rn.Is(rt2)) {1997// Post-indexed case:1998// ldrd r0, r1, [r2], imm32 will translate into1999// ldrd r0, r1, [r2]2000// movw ip. imm32 & 0xffffffff2001// movt ip, imm32 >> 162002// add r2, ip2003{2004CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);2005(this->*instruction)(cond,2006rt,2007rt2,2008MemOperand(rn, load_store_offset, PostIndex));2009}2010{2011CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);2012add(cond, rn, rn, add_offset);2013}2014return;2015}2016break;2017}2018}2019if (operand.IsPlainRegister()) {2020const Register& rn = operand.GetBaseRegister();2021const Register& rm = operand.GetOffsetRegister();2022AddrMode addrmode = operand.GetAddrMode();2023switch (addrmode) {2024case PreIndex:2025// ldrd r0, r1, [r2, r3]! will translate into2026// add r2, r32027// ldrd r0, r1, [r2]2028{2029CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);2030if (operand.GetSign().IsPlus()) {2031add(cond, rn, rn, rm);2032} else {2033sub(cond, rn, rn, rm);2034}2035}2036{2037CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);2038(this->*instruction)(cond, rt, rt2, MemOperand(rn, Offset));2039}2040return;2041case PostIndex:2042// ldrd r0, r1, [r2], r3 will translate into2043// ldrd r0, r1, [r2]2044// add r2, r32045{2046CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);2047(this->*instruction)(cond, rt, rt2, MemOperand(rn, Offset));2048}2049{2050CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);2051if (operand.GetSign().IsPlus()) {2052add(cond, rn, rn, rm);2053} else {2054sub(cond, rn, rn, rm);2055}2056}2057return;2058case Offset: {2059UseScratchRegisterScope temps(this);2060// Allow using the destinations as a scratch registers if possible.2061if (type == kLdrd) {2062if (!rt.Is(rn)) temps.Include(rt);2063if (!rt2.Is(rn)) temps.Include(rt2);2064}2065Register scratch = temps.Acquire();2066// Offset case:2067// ldrd r0, r1, [r2, r3] will translate into2068// add r0, r2, r32069// ldrd r0, r1, [r0]2070{2071CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);2072if (operand.GetSign().IsPlus()) {2073add(cond, scratch, rn, rm);2074} else {2075sub(cond, scratch, rn, rm);2076}2077}2078{2079CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);2080(this->*instruction)(cond, rt, rt2, MemOperand(scratch, Offset));2081}2082return;2083}2084}2085}2086Assembler::Delegate(type, instruction, cond, rt, rt2, operand);2087}208820892090void MacroAssembler::Delegate(InstructionType type,2091InstructionCondDtSMop instruction,2092Condition cond,2093DataType dt,2094SRegister rd,2095const MemOperand& operand) {2096CONTEXT_SCOPE;2097if (operand.IsImmediate()) {2098const Register& rn = operand.GetBaseRegister();2099AddrMode addrmode = operand.GetAddrMode();2100int32_t offset = operand.GetOffsetImmediate();2101VIXL_ASSERT(((offset > 0) && operand.GetSign().IsPlus()) ||2102((offset < 0) && operand.GetSign().IsMinus()) || (offset == 0));2103if (rn.IsPC()) {2104VIXL_ABORT_WITH_MSG(2105"The MacroAssembler does not convert vldr or vstr with a PC base "2106"register.\n");2107}2108switch (addrmode) {2109case PreIndex:2110// Pre-Indexed case:2111// vldr.32 s0, [r1, 12345]! will translate into2112// add r1, 123452113// vldr.32 s0, [r1]2114if (offset != 0) {2115CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);2116add(cond, rn, rn, offset);2117}2118{2119CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);2120(this->*instruction)(cond, dt, rd, MemOperand(rn, Offset));2121}2122return;2123case Offset: {2124UseScratchRegisterScope temps(this);2125Register scratch = temps.Acquire();2126// Offset case:2127// vldr.32 s0, [r1, 12345] will translate into2128// add ip, r1, 123452129// vldr.32 s0, [ip]2130{2131VIXL_ASSERT(offset != 0);2132CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);2133add(cond, scratch, rn, offset);2134}2135{2136CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);2137(this->*instruction)(cond, dt, rd, MemOperand(scratch, Offset));2138}2139return;2140}2141case PostIndex:2142// Post-indexed case:2143// vldr.32 s0, [r1], imm32 will translate into2144// vldr.32 s0, [r1]2145// movw ip. imm32 & 0xffffffff2146// movt ip, imm32 >> 162147// add r1, ip2148{2149CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);2150(this->*instruction)(cond, dt, rd, MemOperand(rn, Offset));2151}2152if (offset != 0) {2153CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);2154add(cond, rn, rn, offset);2155}2156return;2157}2158}2159Assembler::Delegate(type, instruction, cond, dt, rd, operand);2160}216121622163void MacroAssembler::Delegate(InstructionType type,2164InstructionCondDtDMop instruction,2165Condition cond,2166DataType dt,2167DRegister rd,2168const MemOperand& operand) {2169CONTEXT_SCOPE;2170if (operand.IsImmediate()) {2171const Register& rn = operand.GetBaseRegister();2172AddrMode addrmode = operand.GetAddrMode();2173int32_t offset = operand.GetOffsetImmediate();2174VIXL_ASSERT(((offset > 0) && operand.GetSign().IsPlus()) ||2175((offset < 0) && operand.GetSign().IsMinus()) || (offset == 0));2176if (rn.IsPC()) {2177VIXL_ABORT_WITH_MSG(2178"The MacroAssembler does not convert vldr or vstr with a PC base "2179"register.\n");2180}2181switch (addrmode) {2182case PreIndex:2183// Pre-Indexed case:2184// vldr.64 d0, [r1, 12345]! will translate into2185// add r1, 123452186// vldr.64 d0, [r1]2187if (offset != 0) {2188CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);2189add(cond, rn, rn, offset);2190}2191{2192CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);2193(this->*instruction)(cond, dt, rd, MemOperand(rn, Offset));2194}2195return;2196case Offset: {2197UseScratchRegisterScope temps(this);2198Register scratch = temps.Acquire();2199// Offset case:2200// vldr.64 d0, [r1, 12345] will translate into2201// add ip, r1, 123452202// vldr.32 s0, [ip]2203{2204VIXL_ASSERT(offset != 0);2205CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);2206add(cond, scratch, rn, offset);2207}2208{2209CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);2210(this->*instruction)(cond, dt, rd, MemOperand(scratch, Offset));2211}2212return;2213}2214case PostIndex:2215// Post-indexed case:2216// vldr.64 d0. [r1], imm32 will translate into2217// vldr.64 d0, [r1]2218// movw ip. imm32 & 0xffffffff2219// movt ip, imm32 >> 162220// add r1, ip2221{2222CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);2223(this->*instruction)(cond, dt, rd, MemOperand(rn, Offset));2224}2225if (offset != 0) {2226CodeBufferCheckScope scope(this, 3 * kMaxInstructionSizeInBytes);2227add(cond, rn, rn, offset);2228}2229return;2230}2231}2232Assembler::Delegate(type, instruction, cond, dt, rd, operand);2233}223422352236void MacroAssembler::Delegate(InstructionType type,2237InstructionCondMsrOp instruction,2238Condition cond,2239MaskedSpecialRegister spec_reg,2240const Operand& operand) {2241USE(type);2242VIXL_ASSERT(type == kMsr);2243if (operand.IsImmediate()) {2244UseScratchRegisterScope temps(this);2245Register scratch = temps.Acquire();2246{2247CodeBufferCheckScope scope(this, 2 * kMaxInstructionSizeInBytes);2248mov(cond, scratch, operand);2249}2250CodeBufferCheckScope scope(this, kMaxInstructionSizeInBytes);2251msr(cond, spec_reg, scratch);2252return;2253}2254Assembler::Delegate(type, instruction, cond, spec_reg, operand);2255}225622572258void MacroAssembler::Delegate(InstructionType type,2259InstructionCondDtDL instruction,2260Condition cond,2261DataType dt,2262DRegister rd,2263Location* location) {2264VIXL_ASSERT(type == kVldr);22652266CONTEXT_SCOPE;22672268if (location->IsBound()) {2269CodeBufferCheckScope scope(this, 5 * kMaxInstructionSizeInBytes);2270UseScratchRegisterScope temps(this);2271Register scratch = temps.Acquire();2272uint32_t mask = GetOffsetMask(type, Offset);2273vldr(dt, rd, MemOperandComputationHelper(cond, scratch, location, mask));2274return;2275}22762277Assembler::Delegate(type, instruction, cond, dt, rd, location);2278}227922802281void MacroAssembler::Delegate(InstructionType type,2282InstructionCondDtSL instruction,2283Condition cond,2284DataType dt,2285SRegister rd,2286Location* location) {2287VIXL_ASSERT(type == kVldr);22882289CONTEXT_SCOPE;22902291if (location->IsBound()) {2292CodeBufferCheckScope scope(this, 5 * kMaxInstructionSizeInBytes);2293UseScratchRegisterScope temps(this);2294Register scratch = temps.Acquire();2295uint32_t mask = GetOffsetMask(type, Offset);2296vldr(dt, rd, MemOperandComputationHelper(cond, scratch, location, mask));2297return;2298}22992300Assembler::Delegate(type, instruction, cond, dt, rd, location);2301}230223032304#undef CONTEXT_SCOPE2305#undef TOSTRING2306#undef STRINGIFY23072308// Start of generated code.2309// End of generated code.2310} // namespace aarch322311} // namespace vixl231223132314