Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp
35294 views
1
//===--- AArch64.cpp - Implement AArch64 target feature support -----------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This file implements AArch64 TargetInfo objects.
10
//
11
//===----------------------------------------------------------------------===//
12
13
#include "AArch64.h"
14
#include "clang/Basic/Diagnostic.h"
15
#include "clang/Basic/LangOptions.h"
16
#include "clang/Basic/TargetBuiltins.h"
17
#include "clang/Basic/TargetInfo.h"
18
#include "llvm/ADT/APSInt.h"
19
#include "llvm/ADT/ArrayRef.h"
20
#include "llvm/ADT/StringExtras.h"
21
#include "llvm/ADT/StringSwitch.h"
22
#include "llvm/TargetParser/AArch64TargetParser.h"
23
#include "llvm/TargetParser/ARMTargetParserCommon.h"
24
#include <optional>
25
26
using namespace clang;
27
using namespace clang::targets;
28
29
static constexpr Builtin::Info BuiltinInfo[] = {
30
#define BUILTIN(ID, TYPE, ATTRS) \
31
{#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
32
#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
33
{#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
34
#include "clang/Basic/BuiltinsNEON.def"
35
36
#define BUILTIN(ID, TYPE, ATTRS) \
37
{#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
38
#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
39
{#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
40
#include "clang/Basic/BuiltinsSVE.def"
41
42
#define BUILTIN(ID, TYPE, ATTRS) \
43
{#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
44
#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
45
{#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
46
#include "clang/Basic/BuiltinsSME.def"
47
48
#define BUILTIN(ID, TYPE, ATTRS) \
49
{#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
50
#define LANGBUILTIN(ID, TYPE, ATTRS, LANG) \
51
{#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, LANG},
52
#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
53
{#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
54
#define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE) \
55
{#ID, TYPE, ATTRS, FEATURE, HeaderDesc::HEADER, LANGS},
56
#include "clang/Basic/BuiltinsAArch64.def"
57
};
58
59
void AArch64TargetInfo::setArchFeatures() {
60
if (*ArchInfo == llvm::AArch64::ARMV8R) {
61
HasDotProd = true;
62
HasDIT = true;
63
HasFlagM = true;
64
HasRCPC = true;
65
FPU |= NeonMode;
66
HasCCPP = true;
67
HasCRC = true;
68
HasLSE = true;
69
HasRDM = true;
70
} else if (ArchInfo->Version.getMajor() == 8) {
71
if (ArchInfo->Version.getMinor() >= 7u) {
72
HasWFxT = true;
73
}
74
if (ArchInfo->Version.getMinor() >= 6u) {
75
HasBFloat16 = true;
76
HasMatMul = true;
77
}
78
if (ArchInfo->Version.getMinor() >= 5u) {
79
HasAlternativeNZCV = true;
80
HasFRInt3264 = true;
81
HasSSBS = true;
82
HasSB = true;
83
HasPredRes = true;
84
HasBTI = true;
85
}
86
if (ArchInfo->Version.getMinor() >= 4u) {
87
HasDotProd = true;
88
HasDIT = true;
89
HasFlagM = true;
90
}
91
if (ArchInfo->Version.getMinor() >= 3u) {
92
HasRCPC = true;
93
FPU |= NeonMode;
94
}
95
if (ArchInfo->Version.getMinor() >= 2u) {
96
HasCCPP = true;
97
}
98
if (ArchInfo->Version.getMinor() >= 1u) {
99
HasCRC = true;
100
HasLSE = true;
101
HasRDM = true;
102
}
103
} else if (ArchInfo->Version.getMajor() == 9) {
104
if (ArchInfo->Version.getMinor() >= 2u) {
105
HasWFxT = true;
106
}
107
if (ArchInfo->Version.getMinor() >= 1u) {
108
HasBFloat16 = true;
109
HasMatMul = true;
110
}
111
FPU |= SveMode;
112
HasSVE2 = true;
113
HasFullFP16 = true;
114
HasAlternativeNZCV = true;
115
HasFRInt3264 = true;
116
HasSSBS = true;
117
HasSB = true;
118
HasPredRes = true;
119
HasBTI = true;
120
HasDotProd = true;
121
HasDIT = true;
122
HasFlagM = true;
123
HasRCPC = true;
124
FPU |= NeonMode;
125
HasCCPP = true;
126
HasCRC = true;
127
HasLSE = true;
128
HasRDM = true;
129
}
130
}
131
132
AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
133
const TargetOptions &Opts)
134
: TargetInfo(Triple), ABI("aapcs") {
135
if (getTriple().isOSOpenBSD()) {
136
Int64Type = SignedLongLong;
137
IntMaxType = SignedLongLong;
138
} else {
139
if (!getTriple().isOSDarwin() && !getTriple().isOSNetBSD())
140
WCharType = UnsignedInt;
141
142
Int64Type = SignedLong;
143
IntMaxType = SignedLong;
144
}
145
146
// All AArch64 implementations support ARMv8 FP, which makes half a legal type.
147
HasLegalHalfType = true;
148
HalfArgsAndReturns = true;
149
HasFloat16 = true;
150
HasStrictFP = true;
151
152
if (Triple.isArch64Bit())
153
LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
154
else
155
LongWidth = LongAlign = PointerWidth = PointerAlign = 32;
156
157
BitIntMaxAlign = 128;
158
MaxVectorAlign = 128;
159
MaxAtomicInlineWidth = 128;
160
MaxAtomicPromoteWidth = 128;
161
162
LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128;
163
LongDoubleFormat = &llvm::APFloat::IEEEquad();
164
165
BFloat16Width = BFloat16Align = 16;
166
BFloat16Format = &llvm::APFloat::BFloat();
167
168
// Make __builtin_ms_va_list available.
169
HasBuiltinMSVaList = true;
170
171
// Make the SVE types available. Note that this deliberately doesn't
172
// depend on SveMode, since in principle it should be possible to turn
173
// SVE on and off within a translation unit. It should also be possible
174
// to compile the global declaration:
175
//
176
// __SVInt8_t *ptr;
177
//
178
// even without SVE.
179
HasAArch64SVETypes = true;
180
181
// {} in inline assembly are neon specifiers, not assembly variant
182
// specifiers.
183
NoAsmVariants = true;
184
185
// AAPCS gives rules for bitfields. 7.1.7 says: "The container type
186
// contributes to the alignment of the containing aggregate in the same way
187
// a plain (non bit-field) member of that type would, without exception for
188
// zero-sized or anonymous bit-fields."
189
assert(UseBitFieldTypeAlignment && "bitfields affect type alignment");
190
UseZeroLengthBitfieldAlignment = true;
191
192
HasUnalignedAccess = true;
193
194
// AArch64 targets default to using the ARM C++ ABI.
195
TheCXXABI.set(TargetCXXABI::GenericAArch64);
196
197
if (Triple.getOS() == llvm::Triple::Linux)
198
this->MCountName = "\01_mcount";
199
else if (Triple.getOS() == llvm::Triple::UnknownOS)
200
this->MCountName =
201
Opts.EABIVersion == llvm::EABI::GNU ? "\01_mcount" : "mcount";
202
}
203
204
StringRef AArch64TargetInfo::getABI() const { return ABI; }
205
206
bool AArch64TargetInfo::setABI(const std::string &Name) {
207
if (Name != "aapcs" && Name != "aapcs-soft" && Name != "darwinpcs" &&
208
Name != "pauthtest")
209
return false;
210
211
ABI = Name;
212
return true;
213
}
214
215
bool AArch64TargetInfo::validateTarget(DiagnosticsEngine &Diags) const {
216
if (hasFeature("fp") && ABI == "aapcs-soft") {
217
// aapcs-soft is not allowed for targets with an FPU, to avoid there being
218
// two incomatible ABIs.
219
Diags.Report(diag::err_target_unsupported_abi_with_fpu) << ABI;
220
return false;
221
}
222
if (getTriple().getEnvironment() == llvm::Triple::PAuthTest &&
223
getTriple().getOS() != llvm::Triple::Linux) {
224
Diags.Report(diag::err_target_unsupported_abi_for_triple)
225
<< getTriple().getEnvironmentName() << getTriple().getTriple();
226
return false;
227
}
228
return true;
229
}
230
231
bool AArch64TargetInfo::validateGlobalRegisterVariable(
232
StringRef RegName, unsigned RegSize, bool &HasSizeMismatch) const {
233
if ((RegName == "sp") || RegName.starts_with("x")) {
234
HasSizeMismatch = RegSize != 64;
235
return true;
236
} else if (RegName.starts_with("w")) {
237
HasSizeMismatch = RegSize != 32;
238
return true;
239
}
240
return false;
241
}
242
243
bool AArch64TargetInfo::validateBranchProtection(StringRef Spec, StringRef,
244
BranchProtectionInfo &BPI,
245
StringRef &Err) const {
246
llvm::ARM::ParsedBranchProtection PBP;
247
if (!llvm::ARM::parseBranchProtection(Spec, PBP, Err, HasPAuthLR))
248
return false;
249
250
BPI.SignReturnAddr =
251
llvm::StringSwitch<LangOptions::SignReturnAddressScopeKind>(PBP.Scope)
252
.Case("non-leaf", LangOptions::SignReturnAddressScopeKind::NonLeaf)
253
.Case("all", LangOptions::SignReturnAddressScopeKind::All)
254
.Default(LangOptions::SignReturnAddressScopeKind::None);
255
256
if (PBP.Key == "a_key")
257
BPI.SignKey = LangOptions::SignReturnAddressKeyKind::AKey;
258
else
259
BPI.SignKey = LangOptions::SignReturnAddressKeyKind::BKey;
260
261
BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
262
BPI.BranchProtectionPAuthLR = PBP.BranchProtectionPAuthLR;
263
BPI.GuardedControlStack = PBP.GuardedControlStack;
264
return true;
265
}
266
267
bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
268
return llvm::AArch64::parseCpu(Name).has_value();
269
}
270
271
bool AArch64TargetInfo::setCPU(const std::string &Name) {
272
return isValidCPUName(Name);
273
}
274
275
void AArch64TargetInfo::fillValidCPUList(
276
SmallVectorImpl<StringRef> &Values) const {
277
llvm::AArch64::fillValidCPUArchList(Values);
278
}
279
280
void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
281
MacroBuilder &Builder) const {
282
Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
283
}
284
285
void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
286
MacroBuilder &Builder) const {
287
// Also include the ARMv8.1 defines
288
getTargetDefinesARMV81A(Opts, Builder);
289
}
290
291
void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions &Opts,
292
MacroBuilder &Builder) const {
293
Builder.defineMacro("__ARM_FEATURE_COMPLEX", "1");
294
Builder.defineMacro("__ARM_FEATURE_JCVT", "1");
295
// Also include the Armv8.2 defines
296
getTargetDefinesARMV82A(Opts, Builder);
297
}
298
299
void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
300
MacroBuilder &Builder) const {
301
// Also include the Armv8.3 defines
302
getTargetDefinesARMV83A(Opts, Builder);
303
}
304
305
void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
306
MacroBuilder &Builder) const {
307
Builder.defineMacro("__ARM_FEATURE_FRINT", "1");
308
// Also include the Armv8.4 defines
309
getTargetDefinesARMV84A(Opts, Builder);
310
}
311
312
void AArch64TargetInfo::getTargetDefinesARMV86A(const LangOptions &Opts,
313
MacroBuilder &Builder) const {
314
// Also include the Armv8.5 defines
315
// FIXME: Armv8.6 makes the following extensions mandatory:
316
// - __ARM_FEATURE_BF16
317
// - __ARM_FEATURE_MATMUL_INT8
318
// Handle them here.
319
getTargetDefinesARMV85A(Opts, Builder);
320
}
321
322
void AArch64TargetInfo::getTargetDefinesARMV87A(const LangOptions &Opts,
323
MacroBuilder &Builder) const {
324
// Also include the Armv8.6 defines
325
getTargetDefinesARMV86A(Opts, Builder);
326
}
327
328
void AArch64TargetInfo::getTargetDefinesARMV88A(const LangOptions &Opts,
329
MacroBuilder &Builder) const {
330
// Also include the Armv8.7 defines
331
getTargetDefinesARMV87A(Opts, Builder);
332
}
333
334
void AArch64TargetInfo::getTargetDefinesARMV89A(const LangOptions &Opts,
335
MacroBuilder &Builder) const {
336
// Also include the Armv8.8 defines
337
getTargetDefinesARMV88A(Opts, Builder);
338
}
339
340
void AArch64TargetInfo::getTargetDefinesARMV9A(const LangOptions &Opts,
341
MacroBuilder &Builder) const {
342
// Armv9-A maps to Armv8.5-A
343
getTargetDefinesARMV85A(Opts, Builder);
344
}
345
346
void AArch64TargetInfo::getTargetDefinesARMV91A(const LangOptions &Opts,
347
MacroBuilder &Builder) const {
348
// Armv9.1-A maps to Armv8.6-A
349
getTargetDefinesARMV86A(Opts, Builder);
350
}
351
352
void AArch64TargetInfo::getTargetDefinesARMV92A(const LangOptions &Opts,
353
MacroBuilder &Builder) const {
354
// Armv9.2-A maps to Armv8.7-A
355
getTargetDefinesARMV87A(Opts, Builder);
356
}
357
358
void AArch64TargetInfo::getTargetDefinesARMV93A(const LangOptions &Opts,
359
MacroBuilder &Builder) const {
360
// Armv9.3-A maps to Armv8.8-A
361
getTargetDefinesARMV88A(Opts, Builder);
362
}
363
364
void AArch64TargetInfo::getTargetDefinesARMV94A(const LangOptions &Opts,
365
MacroBuilder &Builder) const {
366
// Armv9.4-A maps to Armv8.9-A
367
getTargetDefinesARMV89A(Opts, Builder);
368
}
369
370
void AArch64TargetInfo::getTargetDefinesARMV95A(const LangOptions &Opts,
371
MacroBuilder &Builder) const {
372
// Armv9.5-A does not have a v8.* equivalent, but is a superset of v9.4-A.
373
getTargetDefinesARMV94A(Opts, Builder);
374
}
375
376
void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
377
MacroBuilder &Builder) const {
378
// Target identification.
379
if (getTriple().isWindowsArm64EC()) {
380
// Define the same set of macros as would be defined on x86_64 to ensure that
381
// ARM64EC datatype layouts match those of x86_64 compiled code
382
Builder.defineMacro("__amd64__");
383
Builder.defineMacro("__amd64");
384
Builder.defineMacro("__x86_64");
385
Builder.defineMacro("__x86_64__");
386
Builder.defineMacro("__arm64ec__");
387
} else {
388
Builder.defineMacro("__aarch64__");
389
}
390
391
// Inline assembly supports AArch64 flag outputs.
392
Builder.defineMacro("__GCC_ASM_FLAG_OUTPUTS__");
393
394
std::string CodeModel = getTargetOpts().CodeModel;
395
if (CodeModel == "default")
396
CodeModel = "small";
397
for (char &c : CodeModel)
398
c = toupper(c);
399
Builder.defineMacro("__AARCH64_CMODEL_" + CodeModel + "__");
400
401
// ACLE predefines. Many can only have one possible value on v8 AArch64.
402
Builder.defineMacro("__ARM_ACLE", "200");
403
Builder.defineMacro("__ARM_ARCH",
404
std::to_string(ArchInfo->Version.getMajor()));
405
Builder.defineMacro("__ARM_ARCH_PROFILE",
406
std::string("'") + (char)ArchInfo->Profile + "'");
407
408
Builder.defineMacro("__ARM_64BIT_STATE", "1");
409
Builder.defineMacro("__ARM_PCS_AAPCS64", "1");
410
Builder.defineMacro("__ARM_ARCH_ISA_A64", "1");
411
412
Builder.defineMacro("__ARM_FEATURE_CLZ", "1");
413
Builder.defineMacro("__ARM_FEATURE_FMA", "1");
414
Builder.defineMacro("__ARM_FEATURE_LDREX", "0xF");
415
Builder.defineMacro("__ARM_FEATURE_IDIV", "1"); // As specified in ACLE
416
Builder.defineMacro("__ARM_FEATURE_DIV"); // For backwards compatibility
417
Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN", "1");
418
Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING", "1");
419
420
Builder.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4");
421
422
// These macros are set when Clang can parse declarations with these
423
// attributes.
424
Builder.defineMacro("__ARM_STATE_ZA", "1");
425
Builder.defineMacro("__ARM_STATE_ZT0", "1");
426
427
// 0xe implies support for half, single and double precision operations.
428
if (FPU & FPUMode)
429
Builder.defineMacro("__ARM_FP", "0xE");
430
431
// PCS specifies this for SysV variants, which is all we support. Other ABIs
432
// may choose __ARM_FP16_FORMAT_ALTERNATIVE.
433
Builder.defineMacro("__ARM_FP16_FORMAT_IEEE", "1");
434
Builder.defineMacro("__ARM_FP16_ARGS", "1");
435
436
if (Opts.UnsafeFPMath)
437
Builder.defineMacro("__ARM_FP_FAST", "1");
438
439
Builder.defineMacro("__ARM_SIZEOF_WCHAR_T",
440
Twine(Opts.WCharSize ? Opts.WCharSize : 4));
441
442
Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM", Opts.ShortEnums ? "1" : "4");
443
444
if (FPU & NeonMode) {
445
Builder.defineMacro("__ARM_NEON", "1");
446
// 64-bit NEON supports half, single and double precision operations.
447
Builder.defineMacro("__ARM_NEON_FP", "0xE");
448
}
449
450
if (FPU & SveMode)
451
Builder.defineMacro("__ARM_FEATURE_SVE", "1");
452
453
if ((FPU & NeonMode) && (FPU & SveMode))
454
Builder.defineMacro("__ARM_NEON_SVE_BRIDGE", "1");
455
456
if (HasSVE2)
457
Builder.defineMacro("__ARM_FEATURE_SVE2", "1");
458
459
if (HasSVE2p1)
460
Builder.defineMacro("__ARM_FEATURE_SVE2p1", "1");
461
462
if (HasSVE2 && HasSVE2AES)
463
Builder.defineMacro("__ARM_FEATURE_SVE2_AES", "1");
464
465
if (HasSVE2 && HasSVE2BitPerm)
466
Builder.defineMacro("__ARM_FEATURE_SVE2_BITPERM", "1");
467
468
if (HasSVE2 && HasSVE2SHA3)
469
Builder.defineMacro("__ARM_FEATURE_SVE2_SHA3", "1");
470
471
if (HasSVE2 && HasSVE2SM4)
472
Builder.defineMacro("__ARM_FEATURE_SVE2_SM4", "1");
473
474
if (HasSVEB16B16)
475
Builder.defineMacro("__ARM_FEATURE_SVE_B16B16", "1");
476
477
if (HasSME) {
478
Builder.defineMacro("__ARM_FEATURE_SME");
479
Builder.defineMacro("__ARM_FEATURE_LOCALLY_STREAMING", "1");
480
}
481
482
if (HasSME2)
483
Builder.defineMacro("__ARM_FEATURE_SME2", "1");
484
485
if (HasSME2p1)
486
Builder.defineMacro("__ARM_FEATURE_SME2p1", "1");
487
488
if (HasSMEF16F16)
489
Builder.defineMacro("__ARM_FEATURE_SME_F16F16", "1");
490
491
if (HasSMEB16B16)
492
Builder.defineMacro("__ARM_FEATURE_SME_B16B16", "1");
493
494
if (HasCRC)
495
Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
496
497
if (HasRCPC3)
498
Builder.defineMacro("__ARM_FEATURE_RCPC", "3");
499
else if (HasRCPC)
500
Builder.defineMacro("__ARM_FEATURE_RCPC", "1");
501
502
if (HasFMV)
503
Builder.defineMacro("__HAVE_FUNCTION_MULTI_VERSIONING", "1");
504
505
// The __ARM_FEATURE_CRYPTO is deprecated in favor of finer grained feature
506
// macros for AES, SHA2, SHA3 and SM4
507
if (HasAES && HasSHA2)
508
Builder.defineMacro("__ARM_FEATURE_CRYPTO", "1");
509
510
if (HasAES)
511
Builder.defineMacro("__ARM_FEATURE_AES", "1");
512
513
if (HasSHA2)
514
Builder.defineMacro("__ARM_FEATURE_SHA2", "1");
515
516
if (HasSHA3) {
517
Builder.defineMacro("__ARM_FEATURE_SHA3", "1");
518
Builder.defineMacro("__ARM_FEATURE_SHA512", "1");
519
}
520
521
if (HasSM4) {
522
Builder.defineMacro("__ARM_FEATURE_SM3", "1");
523
Builder.defineMacro("__ARM_FEATURE_SM4", "1");
524
}
525
526
if (HasPAuth)
527
Builder.defineMacro("__ARM_FEATURE_PAUTH", "1");
528
529
if (HasPAuthLR)
530
Builder.defineMacro("__ARM_FEATURE_PAUTH_LR", "1");
531
532
if (HasBTI)
533
Builder.defineMacro("__ARM_FEATURE_BTI", "1");
534
535
if (HasUnalignedAccess)
536
Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
537
538
if ((FPU & NeonMode) && HasFullFP16)
539
Builder.defineMacro("__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", "1");
540
if (HasFullFP16)
541
Builder.defineMacro("__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", "1");
542
543
if (HasDotProd)
544
Builder.defineMacro("__ARM_FEATURE_DOTPROD", "1");
545
546
if (HasMTE)
547
Builder.defineMacro("__ARM_FEATURE_MEMORY_TAGGING", "1");
548
549
if (HasTME)
550
Builder.defineMacro("__ARM_FEATURE_TME", "1");
551
552
if (HasMatMul)
553
Builder.defineMacro("__ARM_FEATURE_MATMUL_INT8", "1");
554
555
if (HasLSE)
556
Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
557
558
if (HasBFloat16) {
559
Builder.defineMacro("__ARM_FEATURE_BF16", "1");
560
Builder.defineMacro("__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", "1");
561
Builder.defineMacro("__ARM_BF16_FORMAT_ALTERNATIVE", "1");
562
Builder.defineMacro("__ARM_FEATURE_BF16_SCALAR_ARITHMETIC", "1");
563
}
564
565
if ((FPU & SveMode) && HasBFloat16) {
566
Builder.defineMacro("__ARM_FEATURE_SVE_BF16", "1");
567
}
568
569
if ((FPU & SveMode) && HasMatmulFP64)
570
Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP64", "1");
571
572
if ((FPU & SveMode) && HasMatmulFP32)
573
Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP32", "1");
574
575
if ((FPU & SveMode) && HasMatMul)
576
Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_INT8", "1");
577
578
if ((FPU & NeonMode) && HasFP16FML)
579
Builder.defineMacro("__ARM_FEATURE_FP16_FML", "1");
580
581
if (Opts.hasSignReturnAddress()) {
582
// Bitmask:
583
// 0: Protection using the A key
584
// 1: Protection using the B key
585
// 2: Protection including leaf functions
586
// 3: Protection using PC as a diversifier
587
unsigned Value = 0;
588
589
if (Opts.isSignReturnAddressWithAKey())
590
Value |= (1 << 0);
591
else
592
Value |= (1 << 1);
593
594
if (Opts.isSignReturnAddressScopeAll())
595
Value |= (1 << 2);
596
597
if (Opts.BranchProtectionPAuthLR)
598
Value |= (1 << 3);
599
600
Builder.defineMacro("__ARM_FEATURE_PAC_DEFAULT", std::to_string(Value));
601
}
602
603
if (Opts.BranchTargetEnforcement)
604
Builder.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
605
606
if (Opts.GuardedControlStack)
607
Builder.defineMacro("__ARM_FEATURE_GCS_DEFAULT", "1");
608
609
if (HasLS64)
610
Builder.defineMacro("__ARM_FEATURE_LS64", "1");
611
612
if (HasRandGen)
613
Builder.defineMacro("__ARM_FEATURE_RNG", "1");
614
615
if (HasMOPS)
616
Builder.defineMacro("__ARM_FEATURE_MOPS", "1");
617
618
if (HasD128)
619
Builder.defineMacro("__ARM_FEATURE_SYSREG128", "1");
620
621
if (HasGCS)
622
Builder.defineMacro("__ARM_FEATURE_GCS", "1");
623
624
if (*ArchInfo == llvm::AArch64::ARMV8_1A)
625
getTargetDefinesARMV81A(Opts, Builder);
626
else if (*ArchInfo == llvm::AArch64::ARMV8_2A)
627
getTargetDefinesARMV82A(Opts, Builder);
628
else if (*ArchInfo == llvm::AArch64::ARMV8_3A)
629
getTargetDefinesARMV83A(Opts, Builder);
630
else if (*ArchInfo == llvm::AArch64::ARMV8_4A)
631
getTargetDefinesARMV84A(Opts, Builder);
632
else if (*ArchInfo == llvm::AArch64::ARMV8_5A)
633
getTargetDefinesARMV85A(Opts, Builder);
634
else if (*ArchInfo == llvm::AArch64::ARMV8_6A)
635
getTargetDefinesARMV86A(Opts, Builder);
636
else if (*ArchInfo == llvm::AArch64::ARMV8_7A)
637
getTargetDefinesARMV87A(Opts, Builder);
638
else if (*ArchInfo == llvm::AArch64::ARMV8_8A)
639
getTargetDefinesARMV88A(Opts, Builder);
640
else if (*ArchInfo == llvm::AArch64::ARMV8_9A)
641
getTargetDefinesARMV89A(Opts, Builder);
642
else if (*ArchInfo == llvm::AArch64::ARMV9A)
643
getTargetDefinesARMV9A(Opts, Builder);
644
else if (*ArchInfo == llvm::AArch64::ARMV9_1A)
645
getTargetDefinesARMV91A(Opts, Builder);
646
else if (*ArchInfo == llvm::AArch64::ARMV9_2A)
647
getTargetDefinesARMV92A(Opts, Builder);
648
else if (*ArchInfo == llvm::AArch64::ARMV9_3A)
649
getTargetDefinesARMV93A(Opts, Builder);
650
else if (*ArchInfo == llvm::AArch64::ARMV9_4A)
651
getTargetDefinesARMV94A(Opts, Builder);
652
else if (*ArchInfo == llvm::AArch64::ARMV9_5A)
653
getTargetDefinesARMV95A(Opts, Builder);
654
655
// All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8|16) builtins work.
656
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
657
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
658
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
659
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
660
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16");
661
662
// Allow detection of fast FMA support.
663
Builder.defineMacro("__FP_FAST_FMA", "1");
664
Builder.defineMacro("__FP_FAST_FMAF", "1");
665
666
// C/C++ operators work on both VLS and VLA SVE types
667
if (FPU & SveMode)
668
Builder.defineMacro("__ARM_FEATURE_SVE_VECTOR_OPERATORS", "2");
669
670
if (Opts.VScaleMin && Opts.VScaleMin == Opts.VScaleMax) {
671
Builder.defineMacro("__ARM_FEATURE_SVE_BITS", Twine(Opts.VScaleMin * 128));
672
}
673
}
674
675
ArrayRef<Builtin::Info> AArch64TargetInfo::getTargetBuiltins() const {
676
return llvm::ArrayRef(BuiltinInfo, clang::AArch64::LastTSBuiltin -
677
Builtin::FirstTSBuiltin);
678
}
679
680
std::optional<std::pair<unsigned, unsigned>>
681
AArch64TargetInfo::getVScaleRange(const LangOptions &LangOpts) const {
682
if (LangOpts.VScaleMin || LangOpts.VScaleMax)
683
return std::pair<unsigned, unsigned>(
684
LangOpts.VScaleMin ? LangOpts.VScaleMin : 1, LangOpts.VScaleMax);
685
686
if (hasFeature("sve"))
687
return std::pair<unsigned, unsigned>(1, 16);
688
689
return std::nullopt;
690
}
691
692
unsigned AArch64TargetInfo::multiVersionSortPriority(StringRef Name) const {
693
if (Name == "default")
694
return 0;
695
if (auto Ext = llvm::AArch64::parseFMVExtension(Name))
696
return Ext->Priority;
697
return 0;
698
}
699
700
unsigned AArch64TargetInfo::multiVersionFeatureCost() const {
701
// Take the maximum priority as per feature cost, so more features win.
702
constexpr unsigned MaxFMVPriority = 1000;
703
return MaxFMVPriority;
704
}
705
706
bool AArch64TargetInfo::doesFeatureAffectCodeGen(StringRef Name) const {
707
// FMV extensions which imply no backend features do not affect codegen.
708
if (auto Ext = llvm::AArch64::parseFMVExtension(Name))
709
return !Ext->Features.empty();
710
return false;
711
}
712
713
bool AArch64TargetInfo::validateCpuSupports(StringRef FeatureStr) const {
714
// CPU features might be separated by '+', extract them and check
715
llvm::SmallVector<StringRef, 8> Features;
716
FeatureStr.split(Features, "+");
717
for (auto &Feature : Features)
718
if (!llvm::AArch64::parseFMVExtension(Feature.trim()).has_value())
719
return false;
720
return true;
721
}
722
723
bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
724
return llvm::StringSwitch<bool>(Feature)
725
.Cases("aarch64", "arm64", "arm", true)
726
.Case("fmv", HasFMV)
727
.Case("fp", FPU & FPUMode)
728
.Cases("neon", "simd", FPU & NeonMode)
729
.Case("jscvt", HasJSCVT)
730
.Case("fcma", HasFCMA)
731
.Case("rng", HasRandGen)
732
.Case("flagm", HasFlagM)
733
.Case("flagm2", HasAlternativeNZCV)
734
.Case("fp16fml", HasFP16FML)
735
.Case("dotprod", HasDotProd)
736
.Case("sm4", HasSM4)
737
.Case("rdm", HasRDM)
738
.Case("lse", HasLSE)
739
.Case("crc", HasCRC)
740
.Case("sha2", HasSHA2)
741
.Case("sha3", HasSHA3)
742
.Cases("aes", "pmull", HasAES)
743
.Cases("fp16", "fullfp16", HasFullFP16)
744
.Case("dit", HasDIT)
745
.Case("dpb", HasCCPP)
746
.Case("dpb2", HasCCDP)
747
.Case("rcpc", HasRCPC)
748
.Case("frintts", HasFRInt3264)
749
.Case("i8mm", HasMatMul)
750
.Case("bf16", HasBFloat16)
751
.Case("sve", FPU & SveMode)
752
.Case("sve-bf16", FPU & SveMode && HasBFloat16)
753
.Case("sve-i8mm", FPU & SveMode && HasMatMul)
754
.Case("sve-b16b16", HasSVEB16B16)
755
.Case("f32mm", FPU & SveMode && HasMatmulFP32)
756
.Case("f64mm", FPU & SveMode && HasMatmulFP64)
757
.Case("sve2", FPU & SveMode && HasSVE2)
758
.Case("sve2-pmull128", FPU & SveMode && HasSVE2AES)
759
.Case("sve2-bitperm", FPU & SveMode && HasSVE2BitPerm)
760
.Case("sve2-sha3", FPU & SveMode && HasSVE2SHA3)
761
.Case("sve2-sm4", FPU & SveMode && HasSVE2SM4)
762
.Case("sve2p1", FPU & SveMode && HasSVE2p1)
763
.Case("sme", HasSME)
764
.Case("sme2", HasSME2)
765
.Case("sme2p1", HasSME2p1)
766
.Case("sme-f64f64", HasSMEF64F64)
767
.Case("sme-i16i64", HasSMEI16I64)
768
.Case("sme-fa64", HasSMEFA64)
769
.Case("sme-f16f16", HasSMEF16F16)
770
.Case("sme-b16b16", HasSMEB16B16)
771
.Cases("memtag", "memtag2", HasMTE)
772
.Case("sb", HasSB)
773
.Case("predres", HasPredRes)
774
.Cases("ssbs", "ssbs2", HasSSBS)
775
.Case("bti", HasBTI)
776
.Cases("ls64", "ls64_v", "ls64_accdata", HasLS64)
777
.Case("wfxt", HasWFxT)
778
.Case("rcpc3", HasRCPC3)
779
.Default(false);
780
}
781
782
void AArch64TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
783
StringRef Name, bool Enabled) const {
784
Features[Name] = Enabled;
785
// If the feature is an architecture feature (like v8.2a), add all previous
786
// architecture versions and any dependant target features.
787
const std::optional<llvm::AArch64::ArchInfo> ArchInfo =
788
llvm::AArch64::ArchInfo::findBySubArch(Name);
789
790
if (!ArchInfo)
791
return; // Not an architecture, nothing more to do.
792
793
// Disabling an architecture feature does not affect dependent features
794
if (!Enabled)
795
return;
796
797
for (const auto *OtherArch : llvm::AArch64::ArchInfos)
798
if (ArchInfo->implies(*OtherArch))
799
Features[OtherArch->getSubArch()] = true;
800
801
// Set any features implied by the architecture
802
std::vector<StringRef> CPUFeats;
803
if (llvm::AArch64::getExtensionFeatures(ArchInfo->DefaultExts, CPUFeats)) {
804
for (auto F : CPUFeats) {
805
assert(F[0] == '+' && "Expected + in target feature!");
806
Features[F.drop_front(1)] = true;
807
}
808
}
809
}
810
811
bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
812
DiagnosticsEngine &Diags) {
813
for (const auto &Feature : Features) {
814
if (Feature == "-fp-armv8")
815
HasNoFP = true;
816
if (Feature == "-neon")
817
HasNoNeon = true;
818
if (Feature == "-sve")
819
HasNoSVE = true;
820
821
if (Feature == "+neon" || Feature == "+fp-armv8")
822
FPU |= NeonMode;
823
if (Feature == "+jscvt") {
824
HasJSCVT = true;
825
FPU |= NeonMode;
826
}
827
if (Feature == "+fcma") {
828
HasFCMA = true;
829
FPU |= NeonMode;
830
}
831
832
if (Feature == "+sve") {
833
FPU |= NeonMode;
834
FPU |= SveMode;
835
HasFullFP16 = true;
836
}
837
if (Feature == "+sve2") {
838
FPU |= NeonMode;
839
FPU |= SveMode;
840
HasFullFP16 = true;
841
HasSVE2 = true;
842
}
843
if (Feature == "+sve2p1") {
844
FPU |= NeonMode;
845
FPU |= SveMode;
846
HasFullFP16 = true;
847
HasSVE2 = true;
848
HasSVE2p1 = true;
849
}
850
if (Feature == "+sve2-aes") {
851
FPU |= NeonMode;
852
FPU |= SveMode;
853
HasFullFP16 = true;
854
HasSVE2 = true;
855
HasSVE2AES = true;
856
}
857
if (Feature == "+sve2-sha3") {
858
FPU |= NeonMode;
859
FPU |= SveMode;
860
HasFullFP16 = true;
861
HasSVE2 = true;
862
HasSVE2SHA3 = true;
863
}
864
if (Feature == "+sve2-sm4") {
865
FPU |= NeonMode;
866
FPU |= SveMode;
867
HasFullFP16 = true;
868
HasSVE2 = true;
869
HasSVE2SM4 = true;
870
}
871
if (Feature == "+sve-b16b16")
872
HasSVEB16B16 = true;
873
if (Feature == "+sve2-bitperm") {
874
FPU |= NeonMode;
875
FPU |= SveMode;
876
HasFullFP16 = true;
877
HasSVE2 = true;
878
HasSVE2BitPerm = true;
879
}
880
if (Feature == "+f32mm") {
881
FPU |= NeonMode;
882
FPU |= SveMode;
883
HasFullFP16 = true;
884
HasMatmulFP32 = true;
885
}
886
if (Feature == "+f64mm") {
887
FPU |= NeonMode;
888
FPU |= SveMode;
889
HasFullFP16 = true;
890
HasMatmulFP64 = true;
891
}
892
if (Feature == "+sme") {
893
HasSME = true;
894
HasBFloat16 = true;
895
HasFullFP16 = true;
896
}
897
if (Feature == "+sme2") {
898
HasSME = true;
899
HasSME2 = true;
900
HasBFloat16 = true;
901
HasFullFP16 = true;
902
}
903
if (Feature == "+sme2p1") {
904
HasSME = true;
905
HasSME2 = true;
906
HasSME2p1 = true;
907
HasBFloat16 = true;
908
HasFullFP16 = true;
909
}
910
if (Feature == "+sme-f64f64") {
911
HasSME = true;
912
HasSMEF64F64 = true;
913
HasBFloat16 = true;
914
HasFullFP16 = true;
915
}
916
if (Feature == "+sme-i16i64") {
917
HasSME = true;
918
HasSMEI16I64 = true;
919
HasBFloat16 = true;
920
HasFullFP16 = true;
921
}
922
if (Feature == "+sme-fa64") {
923
FPU |= NeonMode;
924
FPU |= SveMode;
925
HasSME = true;
926
HasSVE2 = true;
927
HasSMEFA64 = true;
928
}
929
if (Feature == "+sme-f16f16") {
930
HasSME = true;
931
HasSME2 = true;
932
HasBFloat16 = true;
933
HasFullFP16 = true;
934
HasSMEF16F16 = true;
935
}
936
if (Feature == "+sme-b16b16") {
937
HasSME = true;
938
HasSME2 = true;
939
HasBFloat16 = true;
940
HasFullFP16 = true;
941
HasSVEB16B16 = true;
942
HasSMEB16B16 = true;
943
}
944
if (Feature == "+sb")
945
HasSB = true;
946
if (Feature == "+predres")
947
HasPredRes = true;
948
if (Feature == "+ssbs")
949
HasSSBS = true;
950
if (Feature == "+bti")
951
HasBTI = true;
952
if (Feature == "+wfxt")
953
HasWFxT = true;
954
if (Feature == "-fmv")
955
HasFMV = false;
956
if (Feature == "+crc")
957
HasCRC = true;
958
if (Feature == "+rcpc")
959
HasRCPC = true;
960
if (Feature == "+aes") {
961
FPU |= NeonMode;
962
HasAES = true;
963
}
964
if (Feature == "+sha2") {
965
FPU |= NeonMode;
966
HasSHA2 = true;
967
}
968
if (Feature == "+sha3") {
969
FPU |= NeonMode;
970
HasSHA2 = true;
971
HasSHA3 = true;
972
}
973
if (Feature == "+rdm") {
974
FPU |= NeonMode;
975
HasRDM = true;
976
}
977
if (Feature == "+dit")
978
HasDIT = true;
979
if (Feature == "+cccp")
980
HasCCPP = true;
981
if (Feature == "+ccdp") {
982
HasCCPP = true;
983
HasCCDP = true;
984
}
985
if (Feature == "+fptoint")
986
HasFRInt3264 = true;
987
if (Feature == "+sm4") {
988
FPU |= NeonMode;
989
HasSM4 = true;
990
}
991
if (Feature == "+strict-align")
992
HasUnalignedAccess = false;
993
994
// All predecessor archs are added but select the latest one for ArchKind.
995
if (Feature == "+v8a" && ArchInfo->Version < llvm::AArch64::ARMV8A.Version)
996
ArchInfo = &llvm::AArch64::ARMV8A;
997
if (Feature == "+v8.1a" &&
998
ArchInfo->Version < llvm::AArch64::ARMV8_1A.Version)
999
ArchInfo = &llvm::AArch64::ARMV8_1A;
1000
if (Feature == "+v8.2a" &&
1001
ArchInfo->Version < llvm::AArch64::ARMV8_2A.Version)
1002
ArchInfo = &llvm::AArch64::ARMV8_2A;
1003
if (Feature == "+v8.3a" &&
1004
ArchInfo->Version < llvm::AArch64::ARMV8_3A.Version)
1005
ArchInfo = &llvm::AArch64::ARMV8_3A;
1006
if (Feature == "+v8.4a" &&
1007
ArchInfo->Version < llvm::AArch64::ARMV8_4A.Version)
1008
ArchInfo = &llvm::AArch64::ARMV8_4A;
1009
if (Feature == "+v8.5a" &&
1010
ArchInfo->Version < llvm::AArch64::ARMV8_5A.Version)
1011
ArchInfo = &llvm::AArch64::ARMV8_5A;
1012
if (Feature == "+v8.6a" &&
1013
ArchInfo->Version < llvm::AArch64::ARMV8_6A.Version)
1014
ArchInfo = &llvm::AArch64::ARMV8_6A;
1015
if (Feature == "+v8.7a" &&
1016
ArchInfo->Version < llvm::AArch64::ARMV8_7A.Version)
1017
ArchInfo = &llvm::AArch64::ARMV8_7A;
1018
if (Feature == "+v8.8a" &&
1019
ArchInfo->Version < llvm::AArch64::ARMV8_8A.Version)
1020
ArchInfo = &llvm::AArch64::ARMV8_8A;
1021
if (Feature == "+v8.9a" &&
1022
ArchInfo->Version < llvm::AArch64::ARMV8_9A.Version)
1023
ArchInfo = &llvm::AArch64::ARMV8_9A;
1024
if (Feature == "+v9a" && ArchInfo->Version < llvm::AArch64::ARMV9A.Version)
1025
ArchInfo = &llvm::AArch64::ARMV9A;
1026
if (Feature == "+v9.1a" &&
1027
ArchInfo->Version < llvm::AArch64::ARMV9_1A.Version)
1028
ArchInfo = &llvm::AArch64::ARMV9_1A;
1029
if (Feature == "+v9.2a" &&
1030
ArchInfo->Version < llvm::AArch64::ARMV9_2A.Version)
1031
ArchInfo = &llvm::AArch64::ARMV9_2A;
1032
if (Feature == "+v9.3a" &&
1033
ArchInfo->Version < llvm::AArch64::ARMV9_3A.Version)
1034
ArchInfo = &llvm::AArch64::ARMV9_3A;
1035
if (Feature == "+v9.4a" &&
1036
ArchInfo->Version < llvm::AArch64::ARMV9_4A.Version)
1037
ArchInfo = &llvm::AArch64::ARMV9_4A;
1038
if (Feature == "+v9.5a" &&
1039
ArchInfo->Version < llvm::AArch64::ARMV9_5A.Version)
1040
ArchInfo = &llvm::AArch64::ARMV9_5A;
1041
if (Feature == "+v8r")
1042
ArchInfo = &llvm::AArch64::ARMV8R;
1043
if (Feature == "+fullfp16") {
1044
FPU |= NeonMode;
1045
HasFullFP16 = true;
1046
}
1047
if (Feature == "+dotprod") {
1048
FPU |= NeonMode;
1049
HasDotProd = true;
1050
}
1051
if (Feature == "+fp16fml") {
1052
FPU |= NeonMode;
1053
HasFullFP16 = true;
1054
HasFP16FML = true;
1055
}
1056
if (Feature == "+mte")
1057
HasMTE = true;
1058
if (Feature == "+tme")
1059
HasTME = true;
1060
if (Feature == "+pauth")
1061
HasPAuth = true;
1062
if (Feature == "+i8mm")
1063
HasMatMul = true;
1064
if (Feature == "+bf16")
1065
HasBFloat16 = true;
1066
if (Feature == "+lse")
1067
HasLSE = true;
1068
if (Feature == "+ls64")
1069
HasLS64 = true;
1070
if (Feature == "+rand")
1071
HasRandGen = true;
1072
if (Feature == "+flagm")
1073
HasFlagM = true;
1074
if (Feature == "+altnzcv") {
1075
HasFlagM = true;
1076
HasAlternativeNZCV = true;
1077
}
1078
if (Feature == "+mops")
1079
HasMOPS = true;
1080
if (Feature == "+d128")
1081
HasD128 = true;
1082
if (Feature == "+gcs")
1083
HasGCS = true;
1084
if (Feature == "+rcpc3")
1085
HasRCPC3 = true;
1086
if (Feature == "+pauth-lr") {
1087
HasPAuthLR = true;
1088
HasPAuth = true;
1089
}
1090
}
1091
1092
// Check features that are manually disabled by command line options.
1093
// This needs to be checked after architecture-related features are handled,
1094
// making sure they are properly disabled when required.
1095
for (const auto &Feature : Features) {
1096
if (Feature == "-d128")
1097
HasD128 = false;
1098
}
1099
1100
setDataLayout();
1101
setArchFeatures();
1102
1103
if (HasNoFP) {
1104
FPU &= ~FPUMode;
1105
FPU &= ~NeonMode;
1106
FPU &= ~SveMode;
1107
}
1108
if (HasNoNeon) {
1109
FPU &= ~NeonMode;
1110
FPU &= ~SveMode;
1111
}
1112
if (HasNoSVE)
1113
FPU &= ~SveMode;
1114
1115
return true;
1116
}
1117
1118
// Parse AArch64 Target attributes, which are a comma separated list of:
1119
// "arch=<arch>" - parsed to features as per -march=..
1120
// "cpu=<cpu>" - parsed to features as per -mcpu=.., with CPU set to <cpu>
1121
// "tune=<cpu>" - TuneCPU set to <cpu>
1122
// "feature", "no-feature" - Add (or remove) feature.
1123
// "+feature", "+nofeature" - Add (or remove) feature.
1124
//
1125
// A feature may correspond to an Extension (anything with a corresponding
1126
// AEK_), in which case an ExtensionSet is used to parse it and expand its
1127
// dependencies. If the feature does not yield a successful parse then it
1128
// is passed through.
1129
ParsedTargetAttr AArch64TargetInfo::parseTargetAttr(StringRef Features) const {
1130
ParsedTargetAttr Ret;
1131
if (Features == "default")
1132
return Ret;
1133
SmallVector<StringRef, 1> AttrFeatures;
1134
Features.split(AttrFeatures, ",");
1135
bool FoundArch = false;
1136
1137
auto SplitAndAddFeatures = [](StringRef FeatString,
1138
std::vector<std::string> &Features,
1139
llvm::AArch64::ExtensionSet &FeatureBits) {
1140
SmallVector<StringRef, 8> SplitFeatures;
1141
FeatString.split(SplitFeatures, StringRef("+"), -1, false);
1142
for (StringRef Feature : SplitFeatures) {
1143
if (FeatureBits.parseModifier(Feature))
1144
continue;
1145
// Pass through anything that failed to parse so that we can emit
1146
// diagnostics, as well as valid internal feature names.
1147
//
1148
// FIXME: We should consider rejecting internal feature names like
1149
// neon, v8a, etc.
1150
// FIXME: We should consider emitting diagnostics here.
1151
if (Feature.starts_with("no"))
1152
Features.push_back("-" + Feature.drop_front(2).str());
1153
else
1154
Features.push_back("+" + Feature.str());
1155
}
1156
};
1157
1158
llvm::AArch64::ExtensionSet FeatureBits;
1159
// Reconstruct the bitset from the command line option features.
1160
FeatureBits.reconstructFromParsedFeatures(getTargetOpts().FeaturesAsWritten,
1161
Ret.Features);
1162
1163
for (auto &Feature : AttrFeatures) {
1164
Feature = Feature.trim();
1165
if (Feature.starts_with("fpmath="))
1166
continue;
1167
1168
if (Feature.starts_with("branch-protection=")) {
1169
Ret.BranchProtection = Feature.split('=').second.trim();
1170
continue;
1171
}
1172
1173
if (Feature.starts_with("arch=")) {
1174
if (FoundArch)
1175
Ret.Duplicate = "arch=";
1176
FoundArch = true;
1177
std::pair<StringRef, StringRef> Split =
1178
Feature.split("=").second.trim().split("+");
1179
const llvm::AArch64::ArchInfo *AI = llvm::AArch64::parseArch(Split.first);
1180
1181
// Parse the architecture version, adding the required features to
1182
// Ret.Features.
1183
if (!AI)
1184
continue;
1185
FeatureBits.addArchDefaults(*AI);
1186
// Add any extra features, after the +
1187
SplitAndAddFeatures(Split.second, Ret.Features, FeatureBits);
1188
} else if (Feature.starts_with("cpu=")) {
1189
if (!Ret.CPU.empty())
1190
Ret.Duplicate = "cpu=";
1191
else {
1192
// Split the cpu string into "cpu=", "cortex-a710" and any remaining
1193
// "+feat" features.
1194
std::pair<StringRef, StringRef> Split =
1195
Feature.split("=").second.trim().split("+");
1196
Ret.CPU = Split.first;
1197
if (auto CpuInfo = llvm::AArch64::parseCpu(Ret.CPU)) {
1198
FeatureBits.addCPUDefaults(*CpuInfo);
1199
SplitAndAddFeatures(Split.second, Ret.Features, FeatureBits);
1200
}
1201
}
1202
} else if (Feature.starts_with("tune=")) {
1203
if (!Ret.Tune.empty())
1204
Ret.Duplicate = "tune=";
1205
else
1206
Ret.Tune = Feature.split("=").second.trim();
1207
} else if (Feature.starts_with("+")) {
1208
SplitAndAddFeatures(Feature, Ret.Features, FeatureBits);
1209
} else {
1210
if (FeatureBits.parseModifier(Feature, /* AllowNoDashForm = */ true))
1211
continue;
1212
// Pass through anything that failed to parse so that we can emit
1213
// diagnostics, as well as valid internal feature names.
1214
//
1215
// FIXME: We should consider rejecting internal feature names like
1216
// neon, v8a, etc.
1217
// FIXME: We should consider emitting diagnostics here.
1218
if (Feature.starts_with("no-"))
1219
Ret.Features.push_back("-" + Feature.drop_front(3).str());
1220
else
1221
Ret.Features.push_back("+" + Feature.str());
1222
}
1223
}
1224
FeatureBits.toLLVMFeatureList(Ret.Features);
1225
return Ret;
1226
}
1227
1228
bool AArch64TargetInfo::hasBFloat16Type() const {
1229
return true;
1230
}
1231
1232
TargetInfo::CallingConvCheckResult
1233
AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
1234
switch (CC) {
1235
case CC_C:
1236
case CC_Swift:
1237
case CC_SwiftAsync:
1238
case CC_PreserveMost:
1239
case CC_PreserveAll:
1240
case CC_PreserveNone:
1241
case CC_OpenCLKernel:
1242
case CC_AArch64VectorCall:
1243
case CC_AArch64SVEPCS:
1244
case CC_Win64:
1245
return CCCR_OK;
1246
default:
1247
return CCCR_Warning;
1248
}
1249
}
1250
1251
bool AArch64TargetInfo::isCLZForZeroUndef() const { return false; }
1252
1253
TargetInfo::BuiltinVaListKind AArch64TargetInfo::getBuiltinVaListKind() const {
1254
return TargetInfo::AArch64ABIBuiltinVaList;
1255
}
1256
1257
const char *const AArch64TargetInfo::GCCRegNames[] = {
1258
// clang-format off
1259
1260
// 32-bit Integer registers
1261
"w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11",
1262
"w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22",
1263
"w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp",
1264
1265
// 64-bit Integer registers
1266
"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
1267
"x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22",
1268
"x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp",
1269
1270
// 32-bit floating point regsisters
1271
"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
1272
"s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22",
1273
"s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
1274
1275
// 64-bit floating point regsisters
1276
"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
1277
"d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
1278
"d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
1279
1280
// Neon vector registers
1281
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
1282
"v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22",
1283
"v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
1284
1285
// SVE vector registers
1286
"z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10",
1287
"z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21",
1288
"z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
1289
1290
// SVE predicate registers
1291
"p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10",
1292
"p11", "p12", "p13", "p14", "p15",
1293
1294
// SVE predicate-as-counter registers
1295
"pn0", "pn1", "pn2", "pn3", "pn4", "pn5", "pn6", "pn7", "pn8",
1296
"pn9", "pn10", "pn11", "pn12", "pn13", "pn14", "pn15",
1297
1298
// SME registers
1299
"za", "zt0",
1300
1301
// clang-format on
1302
};
1303
1304
ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
1305
return llvm::ArrayRef(GCCRegNames);
1306
}
1307
1308
const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
1309
{{"w31"}, "wsp"},
1310
{{"x31"}, "sp"},
1311
// GCC rN registers are aliases of xN registers.
1312
{{"r0"}, "x0"},
1313
{{"r1"}, "x1"},
1314
{{"r2"}, "x2"},
1315
{{"r3"}, "x3"},
1316
{{"r4"}, "x4"},
1317
{{"r5"}, "x5"},
1318
{{"r6"}, "x6"},
1319
{{"r7"}, "x7"},
1320
{{"r8"}, "x8"},
1321
{{"r9"}, "x9"},
1322
{{"r10"}, "x10"},
1323
{{"r11"}, "x11"},
1324
{{"r12"}, "x12"},
1325
{{"r13"}, "x13"},
1326
{{"r14"}, "x14"},
1327
{{"r15"}, "x15"},
1328
{{"r16"}, "x16"},
1329
{{"r17"}, "x17"},
1330
{{"r18"}, "x18"},
1331
{{"r19"}, "x19"},
1332
{{"r20"}, "x20"},
1333
{{"r21"}, "x21"},
1334
{{"r22"}, "x22"},
1335
{{"r23"}, "x23"},
1336
{{"r24"}, "x24"},
1337
{{"r25"}, "x25"},
1338
{{"r26"}, "x26"},
1339
{{"r27"}, "x27"},
1340
{{"r28"}, "x28"},
1341
{{"r29", "x29"}, "fp"},
1342
{{"r30", "x30"}, "lr"},
1343
// The S/D/Q and W/X registers overlap, but aren't really aliases; we
1344
// don't want to substitute one of these for a different-sized one.
1345
};
1346
1347
ArrayRef<TargetInfo::GCCRegAlias> AArch64TargetInfo::getGCCRegAliases() const {
1348
return llvm::ArrayRef(GCCRegAliases);
1349
}
1350
1351
// Returns the length of cc constraint.
1352
static unsigned matchAsmCCConstraint(const char *Name) {
1353
constexpr unsigned len = 5;
1354
auto RV = llvm::StringSwitch<unsigned>(Name)
1355
.Case("@cceq", len)
1356
.Case("@ccne", len)
1357
.Case("@cchs", len)
1358
.Case("@cccs", len)
1359
.Case("@cccc", len)
1360
.Case("@cclo", len)
1361
.Case("@ccmi", len)
1362
.Case("@ccpl", len)
1363
.Case("@ccvs", len)
1364
.Case("@ccvc", len)
1365
.Case("@cchi", len)
1366
.Case("@ccls", len)
1367
.Case("@ccge", len)
1368
.Case("@cclt", len)
1369
.Case("@ccgt", len)
1370
.Case("@ccle", len)
1371
.Default(0);
1372
return RV;
1373
}
1374
1375
std::string
1376
AArch64TargetInfo::convertConstraint(const char *&Constraint) const {
1377
std::string R;
1378
switch (*Constraint) {
1379
case 'U': // Three-character constraint; add "@3" hint for later parsing.
1380
R = std::string("@3") + std::string(Constraint, 3);
1381
Constraint += 2;
1382
break;
1383
case '@':
1384
if (const unsigned Len = matchAsmCCConstraint(Constraint)) {
1385
std::string Converted = "{" + std::string(Constraint, Len) + "}";
1386
Constraint += Len - 1;
1387
return Converted;
1388
}
1389
return std::string(1, *Constraint);
1390
default:
1391
R = TargetInfo::convertConstraint(Constraint);
1392
break;
1393
}
1394
return R;
1395
}
1396
1397
bool AArch64TargetInfo::validateAsmConstraint(
1398
const char *&Name, TargetInfo::ConstraintInfo &Info) const {
1399
switch (*Name) {
1400
default:
1401
return false;
1402
case 'w': // Floating point and SIMD registers (V0-V31)
1403
Info.setAllowsRegister();
1404
return true;
1405
case 'I': // Constant that can be used with an ADD instruction
1406
case 'J': // Constant that can be used with a SUB instruction
1407
case 'K': // Constant that can be used with a 32-bit logical instruction
1408
case 'L': // Constant that can be used with a 64-bit logical instruction
1409
case 'M': // Constant that can be used as a 32-bit MOV immediate
1410
case 'N': // Constant that can be used as a 64-bit MOV immediate
1411
case 'Y': // Floating point constant zero
1412
case 'Z': // Integer constant zero
1413
return true;
1414
case 'Q': // A memory reference with base register and no offset
1415
Info.setAllowsMemory();
1416
return true;
1417
case 'S': // A symbolic address
1418
Info.setAllowsRegister();
1419
return true;
1420
case 'U':
1421
if (Name[1] == 'p' &&
1422
(Name[2] == 'l' || Name[2] == 'a' || Name[2] == 'h')) {
1423
// SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7, "Uph"=P8-P15)
1424
Info.setAllowsRegister();
1425
Name += 2;
1426
return true;
1427
}
1428
if (Name[1] == 'c' && (Name[2] == 'i' || Name[2] == 'j')) {
1429
// Gpr registers ("Uci"=w8-11, "Ucj"=w12-15)
1430
Info.setAllowsRegister();
1431
Name += 2;
1432
return true;
1433
}
1434
// Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
1435
// Utf: A memory address suitable for ldp/stp in TF mode.
1436
// Usa: An absolute symbolic address.
1437
// Ush: The high part (bits 32:12) of a pc-relative symbolic address.
1438
1439
// Better to return an error saying that it's an unrecognised constraint
1440
// even if this is a valid constraint in gcc.
1441
return false;
1442
case 'z': // Zero register, wzr or xzr
1443
Info.setAllowsRegister();
1444
return true;
1445
case 'x': // Floating point and SIMD registers (V0-V15)
1446
Info.setAllowsRegister();
1447
return true;
1448
case 'y': // SVE registers (V0-V7)
1449
Info.setAllowsRegister();
1450
return true;
1451
case '@':
1452
// CC condition
1453
if (const unsigned Len = matchAsmCCConstraint(Name)) {
1454
Name += Len - 1;
1455
Info.setAllowsRegister();
1456
return true;
1457
}
1458
}
1459
return false;
1460
}
1461
1462
bool AArch64TargetInfo::validateConstraintModifier(
1463
StringRef Constraint, char Modifier, unsigned Size,
1464
std::string &SuggestedModifier) const {
1465
// Strip off constraint modifiers.
1466
Constraint = Constraint.ltrim("=+&");
1467
1468
switch (Constraint[0]) {
1469
default:
1470
return true;
1471
case 'z':
1472
case 'r': {
1473
switch (Modifier) {
1474
case 'x':
1475
case 'w':
1476
// For now assume that the person knows what they're
1477
// doing with the modifier.
1478
return true;
1479
default:
1480
// By default an 'r' constraint will be in the 'x'
1481
// registers.
1482
if (Size == 64)
1483
return true;
1484
1485
if (Size == 512)
1486
return HasLS64;
1487
1488
SuggestedModifier = "w";
1489
return false;
1490
}
1491
}
1492
}
1493
}
1494
1495
std::string_view AArch64TargetInfo::getClobbers() const { return ""; }
1496
1497
int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo) const {
1498
if (RegNo == 0)
1499
return 0;
1500
if (RegNo == 1)
1501
return 1;
1502
return -1;
1503
}
1504
1505
bool AArch64TargetInfo::validatePointerAuthKey(
1506
const llvm::APSInt &value) const {
1507
return 0 <= value && value <= 3;
1508
}
1509
1510
bool AArch64TargetInfo::hasInt128Type() const { return true; }
1511
1512
AArch64leTargetInfo::AArch64leTargetInfo(const llvm::Triple &Triple,
1513
const TargetOptions &Opts)
1514
: AArch64TargetInfo(Triple, Opts) {}
1515
1516
void AArch64leTargetInfo::setDataLayout() {
1517
if (getTriple().isOSBinFormatMachO()) {
1518
if(getTriple().isArch32Bit())
1519
resetDataLayout("e-m:o-p:32:32-i64:64-i128:128-n32:64-S128-Fn32", "_");
1520
else
1521
resetDataLayout("e-m:o-i64:64-i128:128-n32:64-S128-Fn32", "_");
1522
} else
1523
resetDataLayout("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32");
1524
}
1525
1526
void AArch64leTargetInfo::getTargetDefines(const LangOptions &Opts,
1527
MacroBuilder &Builder) const {
1528
Builder.defineMacro("__AARCH64EL__");
1529
AArch64TargetInfo::getTargetDefines(Opts, Builder);
1530
}
1531
1532
AArch64beTargetInfo::AArch64beTargetInfo(const llvm::Triple &Triple,
1533
const TargetOptions &Opts)
1534
: AArch64TargetInfo(Triple, Opts) {}
1535
1536
void AArch64beTargetInfo::getTargetDefines(const LangOptions &Opts,
1537
MacroBuilder &Builder) const {
1538
Builder.defineMacro("__AARCH64EB__");
1539
Builder.defineMacro("__AARCH_BIG_ENDIAN");
1540
Builder.defineMacro("__ARM_BIG_ENDIAN");
1541
AArch64TargetInfo::getTargetDefines(Opts, Builder);
1542
}
1543
1544
void AArch64beTargetInfo::setDataLayout() {
1545
assert(!getTriple().isOSBinFormatMachO());
1546
resetDataLayout("E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32");
1547
}
1548
1549
WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple &Triple,
1550
const TargetOptions &Opts)
1551
: WindowsTargetInfo<AArch64leTargetInfo>(Triple, Opts), Triple(Triple) {
1552
1553
// This is an LLP64 platform.
1554
// int:4, long:4, long long:8, long double:8.
1555
IntWidth = IntAlign = 32;
1556
LongWidth = LongAlign = 32;
1557
DoubleAlign = LongLongAlign = 64;
1558
LongDoubleWidth = LongDoubleAlign = 64;
1559
LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1560
IntMaxType = SignedLongLong;
1561
Int64Type = SignedLongLong;
1562
SizeType = UnsignedLongLong;
1563
PtrDiffType = SignedLongLong;
1564
IntPtrType = SignedLongLong;
1565
}
1566
1567
void WindowsARM64TargetInfo::setDataLayout() {
1568
resetDataLayout(Triple.isOSBinFormatMachO()
1569
? "e-m:o-i64:64-i128:128-n32:64-S128-Fn32"
1570
: "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128-Fn32",
1571
Triple.isOSBinFormatMachO() ? "_" : "");
1572
}
1573
1574
TargetInfo::BuiltinVaListKind
1575
WindowsARM64TargetInfo::getBuiltinVaListKind() const {
1576
return TargetInfo::CharPtrBuiltinVaList;
1577
}
1578
1579
TargetInfo::CallingConvCheckResult
1580
WindowsARM64TargetInfo::checkCallingConvention(CallingConv CC) const {
1581
switch (CC) {
1582
case CC_X86VectorCall:
1583
if (getTriple().isWindowsArm64EC())
1584
return CCCR_OK;
1585
return CCCR_Ignore;
1586
case CC_X86StdCall:
1587
case CC_X86ThisCall:
1588
case CC_X86FastCall:
1589
return CCCR_Ignore;
1590
case CC_C:
1591
case CC_OpenCLKernel:
1592
case CC_PreserveMost:
1593
case CC_PreserveAll:
1594
case CC_PreserveNone:
1595
case CC_Swift:
1596
case CC_SwiftAsync:
1597
case CC_Win64:
1598
return CCCR_OK;
1599
default:
1600
return CCCR_Warning;
1601
}
1602
}
1603
1604
MicrosoftARM64TargetInfo::MicrosoftARM64TargetInfo(const llvm::Triple &Triple,
1605
const TargetOptions &Opts)
1606
: WindowsARM64TargetInfo(Triple, Opts) {
1607
TheCXXABI.set(TargetCXXABI::Microsoft);
1608
}
1609
1610
void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions &Opts,
1611
MacroBuilder &Builder) const {
1612
WindowsARM64TargetInfo::getTargetDefines(Opts, Builder);
1613
if (getTriple().isWindowsArm64EC()) {
1614
Builder.defineMacro("_M_X64", "100");
1615
Builder.defineMacro("_M_AMD64", "100");
1616
Builder.defineMacro("_M_ARM64EC", "1");
1617
} else {
1618
Builder.defineMacro("_M_ARM64", "1");
1619
}
1620
}
1621
1622
TargetInfo::CallingConvKind
1623
MicrosoftARM64TargetInfo::getCallingConvKind(bool ClangABICompat4) const {
1624
return CCK_MicrosoftWin64;
1625
}
1626
1627
unsigned MicrosoftARM64TargetInfo::getMinGlobalAlign(uint64_t TypeSize,
1628
bool HasNonWeakDef) const {
1629
unsigned Align =
1630
WindowsARM64TargetInfo::getMinGlobalAlign(TypeSize, HasNonWeakDef);
1631
1632
// MSVC does size based alignment for arm64 based on alignment section in
1633
// below document, replicate that to keep alignment consistent with object
1634
// files compiled by MSVC.
1635
// https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
1636
if (TypeSize >= 512) { // TypeSize >= 64 bytes
1637
Align = std::max(Align, 128u); // align type at least 16 bytes
1638
} else if (TypeSize >= 64) { // TypeSize >= 8 bytes
1639
Align = std::max(Align, 64u); // align type at least 8 butes
1640
} else if (TypeSize >= 16) { // TypeSize >= 2 bytes
1641
Align = std::max(Align, 32u); // align type at least 4 bytes
1642
}
1643
return Align;
1644
}
1645
1646
MinGWARM64TargetInfo::MinGWARM64TargetInfo(const llvm::Triple &Triple,
1647
const TargetOptions &Opts)
1648
: WindowsARM64TargetInfo(Triple, Opts) {
1649
TheCXXABI.set(TargetCXXABI::GenericAArch64);
1650
}
1651
1652
DarwinAArch64TargetInfo::DarwinAArch64TargetInfo(const llvm::Triple &Triple,
1653
const TargetOptions &Opts)
1654
: DarwinTargetInfo<AArch64leTargetInfo>(Triple, Opts) {
1655
Int64Type = SignedLongLong;
1656
if (getTriple().isArch32Bit())
1657
IntMaxType = SignedLongLong;
1658
1659
WCharType = SignedInt;
1660
UseSignedCharForObjCBool = false;
1661
1662
LongDoubleWidth = LongDoubleAlign = SuitableAlign = 64;
1663
LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1664
1665
UseZeroLengthBitfieldAlignment = false;
1666
1667
if (getTriple().isArch32Bit()) {
1668
UseBitFieldTypeAlignment = false;
1669
ZeroLengthBitfieldBoundary = 32;
1670
UseZeroLengthBitfieldAlignment = true;
1671
TheCXXABI.set(TargetCXXABI::WatchOS);
1672
} else
1673
TheCXXABI.set(TargetCXXABI::AppleARM64);
1674
}
1675
1676
void DarwinAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
1677
const llvm::Triple &Triple,
1678
MacroBuilder &Builder) const {
1679
Builder.defineMacro("__AARCH64_SIMD__");
1680
if (Triple.isArch32Bit())
1681
Builder.defineMacro("__ARM64_ARCH_8_32__");
1682
else
1683
Builder.defineMacro("__ARM64_ARCH_8__");
1684
Builder.defineMacro("__ARM_NEON__");
1685
Builder.defineMacro("__REGISTER_PREFIX__", "");
1686
Builder.defineMacro("__arm64", "1");
1687
Builder.defineMacro("__arm64__", "1");
1688
1689
if (Triple.isArm64e())
1690
Builder.defineMacro("__arm64e__", "1");
1691
1692
getDarwinDefines(Builder, Opts, Triple, PlatformName, PlatformMinVersion);
1693
}
1694
1695
TargetInfo::BuiltinVaListKind
1696
DarwinAArch64TargetInfo::getBuiltinVaListKind() const {
1697
return TargetInfo::CharPtrBuiltinVaList;
1698
}
1699
1700
// 64-bit RenderScript is aarch64
1701
RenderScript64TargetInfo::RenderScript64TargetInfo(const llvm::Triple &Triple,
1702
const TargetOptions &Opts)
1703
: AArch64leTargetInfo(llvm::Triple("aarch64", Triple.getVendorName(),
1704
Triple.getOSName(),
1705
Triple.getEnvironmentName()),
1706
Opts) {
1707
IsRenderScriptTarget = true;
1708
}
1709
1710
void RenderScript64TargetInfo::getTargetDefines(const LangOptions &Opts,
1711
MacroBuilder &Builder) const {
1712
Builder.defineMacro("__RENDERSCRIPT__");
1713
AArch64leTargetInfo::getTargetDefines(Opts, Builder);
1714
}
1715
1716