Path: blob/master/dep/vixl/src/aarch64/operands-aarch64.cc
4261 views
// Copyright 2016, VIXL authors1// All rights reserved.2//3// Redistribution and use in source and binary forms, with or without4// modification, are permitted provided that the following conditions are met:5//6// * Redistributions of source code must retain the above copyright notice,7// this list of conditions and the following disclaimer.8// * Redistributions in binary form must reproduce the above copyright notice,9// this list of conditions and the following disclaimer in the documentation10// and/or other materials provided with the distribution.11// * Neither the name of ARM Limited nor the names of its contributors may be12// used to endorse or promote products derived from this software without13// specific prior written permission.14//15// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND16// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED17// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE18// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE19// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL20// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR21// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER22// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,23// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE24// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.2526#include "operands-aarch64.h"2728namespace vixl {29namespace aarch64 {3031// CPURegList utilities.32CPURegister CPURegList::PopLowestIndex(RegList mask) {33RegList list = list_ & mask;34if (list == 0) return NoCPUReg;35int index = CountTrailingZeros(list);36VIXL_ASSERT(((static_cast<RegList>(1) << index) & list) != 0);37Remove(index);38return CPURegister(index, size_, type_);39}404142CPURegister CPURegList::PopHighestIndex(RegList mask) {43RegList list = list_ & mask;44if (list == 0) return NoCPUReg;45int index = CountLeadingZeros(list);46index = kRegListSizeInBits - 1 - index;47VIXL_ASSERT(((static_cast<RegList>(1) << index) & list) != 0);48Remove(index);49return CPURegister(index, size_, type_);50}515253bool CPURegList::IsValid() const {54if (type_ == CPURegister::kNoRegister) {55// We can't use IsEmpty here because that asserts IsValid().56return list_ == 0;57} else {58bool is_valid = true;59// Try to create a CPURegister for each element in the list.60for (int i = 0; i < kRegListSizeInBits; i++) {61if (((list_ >> i) & 1) != 0) {62is_valid &= CPURegister(i, size_, type_).IsValid();63}64}65return is_valid;66}67}686970void CPURegList::RemoveCalleeSaved() {71if (GetType() == CPURegister::kRegister) {72Remove(GetCalleeSaved(GetRegisterSizeInBits()));73} else if (GetType() == CPURegister::kVRegister) {74Remove(GetCalleeSavedV(GetRegisterSizeInBits()));75} else {76VIXL_ASSERT(GetType() == CPURegister::kNoRegister);77VIXL_ASSERT(IsEmpty());78// The list must already be empty, so do nothing.79}80}818283CPURegList CPURegList::Union(const CPURegList& list_1,84const CPURegList& list_2,85const CPURegList& list_3) {86return Union(list_1, Union(list_2, list_3));87}888990CPURegList CPURegList::Union(const CPURegList& list_1,91const CPURegList& list_2,92const CPURegList& list_3,93const CPURegList& list_4) {94return Union(Union(list_1, list_2), Union(list_3, list_4));95}969798CPURegList CPURegList::Intersection(const CPURegList& list_1,99const CPURegList& list_2,100const CPURegList& list_3) {101return Intersection(list_1, Intersection(list_2, list_3));102}103104105CPURegList CPURegList::Intersection(const CPURegList& list_1,106const CPURegList& list_2,107const CPURegList& list_3,108const CPURegList& list_4) {109return Intersection(Intersection(list_1, list_2),110Intersection(list_3, list_4));111}112113114CPURegList CPURegList::GetCalleeSaved(unsigned size) {115return CPURegList(CPURegister::kRegister, size, 19, 29);116}117118119CPURegList CPURegList::GetCalleeSavedV(unsigned size) {120return CPURegList(CPURegister::kVRegister, size, 8, 15);121}122123124CPURegList CPURegList::GetCallerSaved(unsigned size) {125// Registers x0-x18 and lr (x30) are caller-saved.126CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18);127// Do not use lr directly to avoid initialisation order fiasco bugs for users.128list.Combine(Register(30, kXRegSize));129return list;130}131132133CPURegList CPURegList::GetCallerSavedV(unsigned size) {134// Registers d0-d7 and d16-d31 are caller-saved.135CPURegList list = CPURegList(CPURegister::kVRegister, size, 0, 7);136list.Combine(CPURegList(CPURegister::kVRegister, size, 16, 31));137return list;138}139140141const CPURegList kCalleeSaved = CPURegList::GetCalleeSaved();142const CPURegList kCalleeSavedV = CPURegList::GetCalleeSavedV();143const CPURegList kCallerSaved = CPURegList::GetCallerSaved();144const CPURegList kCallerSavedV = CPURegList::GetCallerSavedV();145146// Operand.147Operand::Operand(int64_t immediate)148: immediate_(immediate),149reg_(NoReg),150shift_(NO_SHIFT),151extend_(NO_EXTEND),152shift_amount_(0) {}153154Operand::Operand(IntegerOperand immediate)155: immediate_(immediate.AsIntN(64)),156reg_(NoReg),157shift_(NO_SHIFT),158extend_(NO_EXTEND),159shift_amount_(0) {}160161Operand::Operand(Register reg, Shift shift, unsigned shift_amount)162: reg_(reg),163shift_(shift),164extend_(NO_EXTEND),165shift_amount_(shift_amount) {166VIXL_ASSERT(shift != MSL);167VIXL_ASSERT(reg.Is64Bits() || (shift_amount < kWRegSize));168VIXL_ASSERT(reg.Is32Bits() || (shift_amount < kXRegSize));169VIXL_ASSERT(!reg.IsSP());170}171172173Operand::Operand(Register reg, Extend extend, unsigned shift_amount)174: reg_(reg),175shift_(NO_SHIFT),176extend_(extend),177shift_amount_(shift_amount) {178VIXL_ASSERT(reg.IsValid());179VIXL_ASSERT(shift_amount <= 4);180VIXL_ASSERT(!reg.IsSP());181182// Extend modes SXTX and UXTX require a 64-bit register.183VIXL_ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));184}185186187bool Operand::IsImmediate() const { return reg_.Is(NoReg); }188189190bool Operand::IsPlainRegister() const {191return reg_.IsValid() &&192(((shift_ == NO_SHIFT) && (extend_ == NO_EXTEND)) ||193// No-op shifts.194((shift_ != NO_SHIFT) && (shift_amount_ == 0)) ||195// No-op extend operations.196// We can't include [US]XTW here without knowing more about the197// context; they are only no-ops for 32-bit operations.198//199// For example, this operand could be replaced with w1:200// __ Add(w0, w0, Operand(w1, UXTW));201// However, no plain register can replace it in this context:202// __ Add(x0, x0, Operand(w1, UXTW));203(((extend_ == UXTX) || (extend_ == SXTX)) && (shift_amount_ == 0)));204}205206207bool Operand::IsShiftedRegister() const {208return reg_.IsValid() && (shift_ != NO_SHIFT);209}210211212bool Operand::IsExtendedRegister() const {213return reg_.IsValid() && (extend_ != NO_EXTEND);214}215216217bool Operand::IsZero() const {218if (IsImmediate()) {219return GetImmediate() == 0;220} else {221return GetRegister().IsZero();222}223}224225226Operand Operand::ToExtendedRegister() const {227VIXL_ASSERT(IsShiftedRegister());228VIXL_ASSERT((shift_ == LSL) && (shift_amount_ <= 4));229return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);230}231232233// MemOperand234MemOperand::MemOperand()235: base_(NoReg),236regoffset_(NoReg),237offset_(0),238addrmode_(Offset),239shift_(NO_SHIFT),240extend_(NO_EXTEND) {}241242243MemOperand::MemOperand(Register base, int64_t offset, AddrMode addrmode)244: base_(base),245regoffset_(NoReg),246offset_(offset),247addrmode_(addrmode),248shift_(NO_SHIFT),249extend_(NO_EXTEND),250shift_amount_(0) {251VIXL_ASSERT(base.Is64Bits() && !base.IsZero());252}253254255MemOperand::MemOperand(Register base,256Register regoffset,257Extend extend,258unsigned shift_amount)259: base_(base),260regoffset_(regoffset),261offset_(0),262addrmode_(Offset),263shift_(NO_SHIFT),264extend_(extend),265shift_amount_(shift_amount) {266VIXL_ASSERT(base.Is64Bits() && !base.IsZero());267VIXL_ASSERT(!regoffset.IsSP());268VIXL_ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX));269270// SXTX extend mode requires a 64-bit offset register.271VIXL_ASSERT(regoffset.Is64Bits() || (extend != SXTX));272}273274275MemOperand::MemOperand(Register base,276Register regoffset,277Shift shift,278unsigned shift_amount)279: base_(base),280regoffset_(regoffset),281offset_(0),282addrmode_(Offset),283shift_(shift),284extend_(NO_EXTEND),285shift_amount_(shift_amount) {286VIXL_ASSERT(base.Is64Bits() && !base.IsZero());287VIXL_ASSERT(regoffset.Is64Bits() && !regoffset.IsSP());288VIXL_ASSERT(shift == LSL);289}290291292MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)293: base_(base),294regoffset_(NoReg),295addrmode_(addrmode),296shift_(NO_SHIFT),297extend_(NO_EXTEND),298shift_amount_(0) {299VIXL_ASSERT(base.Is64Bits() && !base.IsZero());300301if (offset.IsImmediate()) {302offset_ = offset.GetImmediate();303} else if (offset.IsShiftedRegister()) {304VIXL_ASSERT((addrmode == Offset) || (addrmode == PostIndex));305306regoffset_ = offset.GetRegister();307shift_ = offset.GetShift();308shift_amount_ = offset.GetShiftAmount();309310extend_ = NO_EXTEND;311offset_ = 0;312313// These assertions match those in the shifted-register constructor.314VIXL_ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP());315VIXL_ASSERT(shift_ == LSL);316} else {317VIXL_ASSERT(offset.IsExtendedRegister());318VIXL_ASSERT(addrmode == Offset);319320regoffset_ = offset.GetRegister();321extend_ = offset.GetExtend();322shift_amount_ = offset.GetShiftAmount();323324shift_ = NO_SHIFT;325offset_ = 0;326327// These assertions match those in the extended-register constructor.328VIXL_ASSERT(!regoffset_.IsSP());329VIXL_ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));330VIXL_ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX)));331}332}333334335bool MemOperand::IsPlainRegister() const {336return IsImmediateOffset() && (GetOffset() == 0);337}338339340bool MemOperand::IsEquivalentToPlainRegister() const {341if (regoffset_.Is(NoReg)) {342// Immediate offset, pre-index or post-index.343return GetOffset() == 0;344} else if (GetRegisterOffset().IsZero()) {345// Zero register offset, pre-index or post-index.346// We can ignore shift and extend options because they all result in zero.347return true;348}349return false;350}351352353bool MemOperand::IsImmediateOffset() const {354return (addrmode_ == Offset) && regoffset_.Is(NoReg);355}356357358bool MemOperand::IsRegisterOffset() const {359return (addrmode_ == Offset) && !regoffset_.Is(NoReg);360}361362bool MemOperand::IsPreIndex() const { return addrmode_ == PreIndex; }363bool MemOperand::IsPostIndex() const { return addrmode_ == PostIndex; }364365bool MemOperand::IsImmediatePreIndex() const {366return IsPreIndex() && regoffset_.Is(NoReg);367}368369bool MemOperand::IsImmediatePostIndex() const {370return IsPostIndex() && regoffset_.Is(NoReg);371}372373void MemOperand::AddOffset(int64_t offset) {374VIXL_ASSERT(IsImmediateOffset());375offset_ += offset;376}377378379bool SVEMemOperand::IsValid() const {380#ifdef VIXL_DEBUG381{382// It should not be possible for an SVEMemOperand to match multiple types.383int count = 0;384if (IsScalarPlusImmediate()) count++;385if (IsScalarPlusScalar()) count++;386if (IsScalarPlusVector()) count++;387if (IsVectorPlusImmediate()) count++;388if (IsVectorPlusScalar()) count++;389if (IsVectorPlusVector()) count++;390VIXL_ASSERT(count <= 1);391}392#endif393394// We can't have a register _and_ an immediate offset.395if ((offset_ != 0) && (!regoffset_.IsNone())) return false;396397if (shift_amount_ != 0) {398// Only shift and extend modifiers can take a shift amount.399switch (mod_) {400case NO_SVE_OFFSET_MODIFIER:401case SVE_MUL_VL:402return false;403case SVE_LSL:404case SVE_UXTW:405case SVE_SXTW:406// Fall through.407break;408}409}410411return IsScalarPlusImmediate() || IsScalarPlusScalar() ||412IsScalarPlusVector() || IsVectorPlusImmediate() ||413IsVectorPlusScalar() || IsVectorPlusVector();414}415416417bool SVEMemOperand::IsEquivalentToScalar() const {418if (IsScalarPlusImmediate()) {419return GetImmediateOffset() == 0;420}421if (IsScalarPlusScalar()) {422// We can ignore the shift because it will still result in zero.423return GetScalarOffset().IsZero();424}425// Forms involving vectors are never equivalent to a single scalar.426return false;427}428429bool SVEMemOperand::IsPlainRegister() const {430if (IsScalarPlusImmediate()) {431return GetImmediateOffset() == 0;432}433return false;434}435436GenericOperand::GenericOperand(const CPURegister& reg)437: cpu_register_(reg), mem_op_size_(0) {438if (reg.IsQ()) {439VIXL_ASSERT(reg.GetSizeInBits() > static_cast<int>(kXRegSize));440// Support for Q registers is not implemented yet.441VIXL_UNIMPLEMENTED();442}443}444445446GenericOperand::GenericOperand(const MemOperand& mem_op, size_t mem_op_size)447: cpu_register_(NoReg), mem_op_(mem_op), mem_op_size_(mem_op_size) {448if (mem_op_size_ > kXRegSizeInBytes) {449// We only support generic operands up to the size of X registers.450VIXL_UNIMPLEMENTED();451}452}453454bool GenericOperand::Equals(const GenericOperand& other) const {455if (!IsValid() || !other.IsValid()) {456// Two invalid generic operands are considered equal.457return !IsValid() && !other.IsValid();458}459if (IsCPURegister() && other.IsCPURegister()) {460return GetCPURegister().Is(other.GetCPURegister());461} else if (IsMemOperand() && other.IsMemOperand()) {462return GetMemOperand().Equals(other.GetMemOperand()) &&463(GetMemOperandSizeInBytes() == other.GetMemOperandSizeInBytes());464}465return false;466}467} // namespace aarch64468} // namespace vixl469470471