Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm64/include/asm/cpufeature.h
26481 views
1
/* SPDX-License-Identifier: GPL-2.0-only */
2
/*
3
* Copyright (C) 2014 Linaro Ltd. <[email protected]>
4
*/
5
6
#ifndef __ASM_CPUFEATURE_H
7
#define __ASM_CPUFEATURE_H
8
9
#include <asm/alternative-macros.h>
10
#include <asm/cpucaps.h>
11
#include <asm/cputype.h>
12
#include <asm/hwcap.h>
13
#include <asm/sysreg.h>
14
15
#define MAX_CPU_FEATURES 192
16
#define cpu_feature(x) KERNEL_HWCAP_ ## x
17
18
#define ARM64_SW_FEATURE_OVERRIDE_NOKASLR 0
19
#define ARM64_SW_FEATURE_OVERRIDE_HVHE 4
20
#define ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF 8
21
22
#ifndef __ASSEMBLY__
23
24
#include <linux/bug.h>
25
#include <linux/jump_label.h>
26
#include <linux/kernel.h>
27
#include <linux/cpumask.h>
28
29
/*
30
* CPU feature register tracking
31
*
32
* The safe value of a CPUID feature field is dependent on the implications
33
* of the values assigned to it by the architecture. Based on the relationship
34
* between the values, the features are classified into 3 types - LOWER_SAFE,
35
* HIGHER_SAFE and EXACT.
36
*
37
* The lowest value of all the CPUs is chosen for LOWER_SAFE and highest
38
* for HIGHER_SAFE. It is expected that all CPUs have the same value for
39
* a field when EXACT is specified, failing which, the safe value specified
40
* in the table is chosen.
41
*/
42
43
enum ftr_type {
44
FTR_EXACT, /* Use a predefined safe value */
45
FTR_LOWER_SAFE, /* Smaller value is safe */
46
FTR_HIGHER_SAFE, /* Bigger value is safe */
47
FTR_HIGHER_OR_ZERO_SAFE, /* Bigger value is safe, but 0 is biggest */
48
};
49
50
#define FTR_STRICT true /* SANITY check strict matching required */
51
#define FTR_NONSTRICT false /* SANITY check ignored */
52
53
#define FTR_SIGNED true /* Value should be treated as signed */
54
#define FTR_UNSIGNED false /* Value should be treated as unsigned */
55
56
#define FTR_VISIBLE true /* Feature visible to the user space */
57
#define FTR_HIDDEN false /* Feature is hidden from the user */
58
59
#define FTR_VISIBLE_IF_IS_ENABLED(config) \
60
(IS_ENABLED(config) ? FTR_VISIBLE : FTR_HIDDEN)
61
62
struct arm64_ftr_bits {
63
bool sign; /* Value is signed ? */
64
bool visible;
65
bool strict; /* CPU Sanity check: strict matching required ? */
66
enum ftr_type type;
67
u8 shift;
68
u8 width;
69
s64 safe_val; /* safe value for FTR_EXACT features */
70
};
71
72
/*
73
* Describe the early feature override to the core override code:
74
*
75
* @val Values that are to be merged into the final
76
* sanitised value of the register. Only the bitfields
77
* set to 1 in @mask are valid
78
* @mask Mask of the features that are overridden by @val
79
*
80
* A @mask field set to full-1 indicates that the corresponding field
81
* in @val is a valid override.
82
*
83
* A @mask field set to full-0 with the corresponding @val field set
84
* to full-0 denotes that this field has no override
85
*
86
* A @mask field set to full-0 with the corresponding @val field set
87
* to full-1 denotes that this field has an invalid override.
88
*/
89
struct arm64_ftr_override {
90
u64 val;
91
u64 mask;
92
};
93
94
/*
95
* @arm64_ftr_reg - Feature register
96
* @strict_mask Bits which should match across all CPUs for sanity.
97
* @sys_val Safe value across the CPUs (system view)
98
*/
99
struct arm64_ftr_reg {
100
const char *name;
101
u64 strict_mask;
102
u64 user_mask;
103
u64 sys_val;
104
u64 user_val;
105
struct arm64_ftr_override *override;
106
const struct arm64_ftr_bits *ftr_bits;
107
};
108
109
extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
110
111
/*
112
* CPU capabilities:
113
*
114
* We use arm64_cpu_capabilities to represent system features, errata work
115
* arounds (both used internally by kernel and tracked in system_cpucaps) and
116
* ELF HWCAPs (which are exposed to user).
117
*
118
* To support systems with heterogeneous CPUs, we need to make sure that we
119
* detect the capabilities correctly on the system and take appropriate
120
* measures to ensure there are no incompatibilities.
121
*
122
* This comment tries to explain how we treat the capabilities.
123
* Each capability has the following list of attributes :
124
*
125
* 1) Scope of Detection : The system detects a given capability by
126
* performing some checks at runtime. This could be, e.g, checking the
127
* value of a field in CPU ID feature register or checking the cpu
128
* model. The capability provides a call back ( @matches() ) to
129
* perform the check. Scope defines how the checks should be performed.
130
* There are three cases:
131
*
132
* a) SCOPE_LOCAL_CPU: check all the CPUs and "detect" if at least one
133
* matches. This implies, we have to run the check on all the
134
* booting CPUs, until the system decides that state of the
135
* capability is finalised. (See section 2 below)
136
* Or
137
* b) SCOPE_SYSTEM: check all the CPUs and "detect" if all the CPUs
138
* matches. This implies, we run the check only once, when the
139
* system decides to finalise the state of the capability. If the
140
* capability relies on a field in one of the CPU ID feature
141
* registers, we use the sanitised value of the register from the
142
* CPU feature infrastructure to make the decision.
143
* Or
144
* c) SCOPE_BOOT_CPU: Check only on the primary boot CPU to detect the
145
* feature. This category is for features that are "finalised"
146
* (or used) by the kernel very early even before the SMP cpus
147
* are brought up.
148
*
149
* The process of detection is usually denoted by "update" capability
150
* state in the code.
151
*
152
* 2) Finalise the state : The kernel should finalise the state of a
153
* capability at some point during its execution and take necessary
154
* actions if any. Usually, this is done, after all the boot-time
155
* enabled CPUs are brought up by the kernel, so that it can make
156
* better decision based on the available set of CPUs. However, there
157
* are some special cases, where the action is taken during the early
158
* boot by the primary boot CPU. (e.g, running the kernel at EL2 with
159
* Virtualisation Host Extensions). The kernel usually disallows any
160
* changes to the state of a capability once it finalises the capability
161
* and takes any action, as it may be impossible to execute the actions
162
* safely. A CPU brought up after a capability is "finalised" is
163
* referred to as "Late CPU" w.r.t the capability. e.g, all secondary
164
* CPUs are treated "late CPUs" for capabilities determined by the boot
165
* CPU.
166
*
167
* At the moment there are two passes of finalising the capabilities.
168
* a) Boot CPU scope capabilities - Finalised by primary boot CPU via
169
* setup_boot_cpu_capabilities().
170
* b) Everything except (a) - Run via setup_system_capabilities().
171
*
172
* 3) Verification: When a CPU is brought online (e.g, by user or by the
173
* kernel), the kernel should make sure that it is safe to use the CPU,
174
* by verifying that the CPU is compliant with the state of the
175
* capabilities finalised already. This happens via :
176
*
177
* secondary_start_kernel()-> check_local_cpu_capabilities()
178
*
179
* As explained in (2) above, capabilities could be finalised at
180
* different points in the execution. Each newly booted CPU is verified
181
* against the capabilities that have been finalised by the time it
182
* boots.
183
*
184
* a) SCOPE_BOOT_CPU : All CPUs are verified against the capability
185
* except for the primary boot CPU.
186
*
187
* b) SCOPE_LOCAL_CPU, SCOPE_SYSTEM: All CPUs hotplugged on by the
188
* user after the kernel boot are verified against the capability.
189
*
190
* If there is a conflict, the kernel takes an action, based on the
191
* severity (e.g, a CPU could be prevented from booting or cause a
192
* kernel panic). The CPU is allowed to "affect" the state of the
193
* capability, if it has not been finalised already. See section 5
194
* for more details on conflicts.
195
*
196
* 4) Action: As mentioned in (2), the kernel can take an action for each
197
* detected capability, on all CPUs on the system. Appropriate actions
198
* include, turning on an architectural feature, modifying the control
199
* registers (e.g, SCTLR, TCR etc.) or patching the kernel via
200
* alternatives. The kernel patching is batched and performed at later
201
* point. The actions are always initiated only after the capability
202
* is finalised. This is usally denoted by "enabling" the capability.
203
* The actions are initiated as follows :
204
* a) Action is triggered on all online CPUs, after the capability is
205
* finalised, invoked within the stop_machine() context from
206
* enable_cpu_capabilitie().
207
*
208
* b) Any late CPU, brought up after (1), the action is triggered via:
209
*
210
* check_local_cpu_capabilities() -> verify_local_cpu_capabilities()
211
*
212
* 5) Conflicts: Based on the state of the capability on a late CPU vs.
213
* the system state, we could have the following combinations :
214
*
215
* x-----------------------------x
216
* | Type | System | Late CPU |
217
* |-----------------------------|
218
* | a | y | n |
219
* |-----------------------------|
220
* | b | n | y |
221
* x-----------------------------x
222
*
223
* Two separate flag bits are defined to indicate whether each kind of
224
* conflict can be allowed:
225
* ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU - Case(a) is allowed
226
* ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU - Case(b) is allowed
227
*
228
* Case (a) is not permitted for a capability that the system requires
229
* all CPUs to have in order for the capability to be enabled. This is
230
* typical for capabilities that represent enhanced functionality.
231
*
232
* Case (b) is not permitted for a capability that must be enabled
233
* during boot if any CPU in the system requires it in order to run
234
* safely. This is typical for erratum work arounds that cannot be
235
* enabled after the corresponding capability is finalised.
236
*
237
* In some non-typical cases either both (a) and (b), or neither,
238
* should be permitted. This can be described by including neither
239
* or both flags in the capability's type field.
240
*
241
* In case of a conflict, the CPU is prevented from booting. If the
242
* ARM64_CPUCAP_PANIC_ON_CONFLICT flag is specified for the capability,
243
* then a kernel panic is triggered.
244
*/
245
246
247
/*
248
* Decide how the capability is detected.
249
* On any local CPU vs System wide vs the primary boot CPU
250
*/
251
#define ARM64_CPUCAP_SCOPE_LOCAL_CPU ((u16)BIT(0))
252
#define ARM64_CPUCAP_SCOPE_SYSTEM ((u16)BIT(1))
253
/*
254
* The capabilitiy is detected on the Boot CPU and is used by kernel
255
* during early boot. i.e, the capability should be "detected" and
256
* "enabled" as early as possibly on all booting CPUs.
257
*/
258
#define ARM64_CPUCAP_SCOPE_BOOT_CPU ((u16)BIT(2))
259
#define ARM64_CPUCAP_SCOPE_MASK \
260
(ARM64_CPUCAP_SCOPE_SYSTEM | \
261
ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
262
ARM64_CPUCAP_SCOPE_BOOT_CPU)
263
264
#define SCOPE_SYSTEM ARM64_CPUCAP_SCOPE_SYSTEM
265
#define SCOPE_LOCAL_CPU ARM64_CPUCAP_SCOPE_LOCAL_CPU
266
#define SCOPE_BOOT_CPU ARM64_CPUCAP_SCOPE_BOOT_CPU
267
#define SCOPE_ALL ARM64_CPUCAP_SCOPE_MASK
268
269
/*
270
* Is it permitted for a late CPU to have this capability when system
271
* hasn't already enabled it ?
272
*/
273
#define ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU ((u16)BIT(4))
274
/* Is it safe for a late CPU to miss this capability when system has it */
275
#define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU ((u16)BIT(5))
276
/* Panic when a conflict is detected */
277
#define ARM64_CPUCAP_PANIC_ON_CONFLICT ((u16)BIT(6))
278
/*
279
* When paired with SCOPE_LOCAL_CPU, all early CPUs must satisfy the
280
* condition. This is different from SCOPE_SYSTEM where the check is performed
281
* only once at the end of the SMP boot on the sanitised ID registers.
282
* SCOPE_SYSTEM is not suitable for cases where the capability depends on
283
* properties local to a CPU like MIDR_EL1.
284
*/
285
#define ARM64_CPUCAP_MATCH_ALL_EARLY_CPUS ((u16)BIT(7))
286
287
/*
288
* CPU errata workarounds that need to be enabled at boot time if one or
289
* more CPUs in the system requires it. When one of these capabilities
290
* has been enabled, it is safe to allow any CPU to boot that doesn't
291
* require the workaround. However, it is not safe if a "late" CPU
292
* requires a workaround and the system hasn't enabled it already.
293
*/
294
#define ARM64_CPUCAP_LOCAL_CPU_ERRATUM \
295
(ARM64_CPUCAP_SCOPE_LOCAL_CPU | ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
296
/*
297
* CPU feature detected at boot time based on system-wide value of a
298
* feature. It is safe for a late CPU to have this feature even though
299
* the system hasn't enabled it, although the feature will not be used
300
* by Linux in this case. If the system has enabled this feature already,
301
* then every late CPU must have it.
302
*/
303
#define ARM64_CPUCAP_SYSTEM_FEATURE \
304
(ARM64_CPUCAP_SCOPE_SYSTEM | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
305
/*
306
* CPU feature detected at boot time based on feature of one or more CPUs.
307
* All possible conflicts for a late CPU are ignored.
308
* NOTE: this means that a late CPU with the feature will *not* cause the
309
* capability to be advertised by cpus_have_*cap()!
310
*/
311
#define ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE \
312
(ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
313
ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU | \
314
ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
315
/*
316
* CPU feature detected at boot time and present on all early CPUs. Late CPUs
317
* are permitted to have the feature even if it hasn't been enabled, although
318
* the feature will not be used by Linux in this case. If all early CPUs have
319
* the feature, then every late CPU must have it.
320
*/
321
#define ARM64_CPUCAP_EARLY_LOCAL_CPU_FEATURE \
322
(ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
323
ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU | \
324
ARM64_CPUCAP_MATCH_ALL_EARLY_CPUS)
325
326
/*
327
* CPU feature detected at boot time, on one or more CPUs. A late CPU
328
* is not allowed to have the capability when the system doesn't have it.
329
* It is Ok for a late CPU to miss the feature.
330
*/
331
#define ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE \
332
(ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
333
ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
334
335
/*
336
* CPU feature used early in the boot based on the boot CPU. All secondary
337
* CPUs must match the state of the capability as detected by the boot CPU. In
338
* case of a conflict, a kernel panic is triggered.
339
*/
340
#define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE \
341
(ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PANIC_ON_CONFLICT)
342
343
/*
344
* CPU feature used early in the boot based on the boot CPU. It is safe for a
345
* late CPU to have this feature even though the boot CPU hasn't enabled it,
346
* although the feature will not be used by Linux in this case. If the boot CPU
347
* has enabled this feature already, then every late CPU must have it.
348
*/
349
#define ARM64_CPUCAP_BOOT_CPU_FEATURE \
350
(ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
351
352
struct arm64_cpu_capabilities {
353
const char *desc;
354
u16 capability;
355
u16 type;
356
bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
357
/*
358
* Take the appropriate actions to configure this capability
359
* for this CPU. If the capability is detected by the kernel
360
* this will be called on all the CPUs in the system,
361
* including the hotplugged CPUs, regardless of whether the
362
* capability is available on that specific CPU. This is
363
* useful for some capabilities (e.g, working around CPU
364
* errata), where all the CPUs must take some action (e.g,
365
* changing system control/configuration). Thus, if an action
366
* is required only if the CPU has the capability, then the
367
* routine must check it before taking any action.
368
*/
369
void (*cpu_enable)(const struct arm64_cpu_capabilities *cap);
370
union {
371
struct { /* To be used for erratum handling only */
372
struct midr_range midr_range;
373
const struct arm64_midr_revidr {
374
u32 midr_rv; /* revision/variant */
375
u32 revidr_mask;
376
} * const fixed_revs;
377
};
378
379
const struct midr_range *midr_range_list;
380
struct { /* Feature register checking */
381
u32 sys_reg;
382
u8 field_pos;
383
u8 field_width;
384
u8 min_field_value;
385
u8 max_field_value;
386
u8 hwcap_type;
387
bool sign;
388
unsigned long hwcap;
389
};
390
};
391
392
/*
393
* An optional list of "matches/cpu_enable" pair for the same
394
* "capability" of the same "type" as described by the parent.
395
* Only matches(), cpu_enable() and fields relevant to these
396
* methods are significant in the list. The cpu_enable is
397
* invoked only if the corresponding entry "matches()".
398
* However, if a cpu_enable() method is associated
399
* with multiple matches(), care should be taken that either
400
* the match criteria are mutually exclusive, or that the
401
* method is robust against being called multiple times.
402
*/
403
const struct arm64_cpu_capabilities *match_list;
404
const struct cpumask *cpus;
405
};
406
407
static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap)
408
{
409
return cap->type & ARM64_CPUCAP_SCOPE_MASK;
410
}
411
412
static inline bool cpucap_match_all_early_cpus(const struct arm64_cpu_capabilities *cap)
413
{
414
return cap->type & ARM64_CPUCAP_MATCH_ALL_EARLY_CPUS;
415
}
416
417
/*
418
* Generic helper for handling capabilities with multiple (match,enable) pairs
419
* of call backs, sharing the same capability bit.
420
* Iterate over each entry to see if at least one matches.
421
*/
422
static inline bool
423
cpucap_multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry,
424
int scope)
425
{
426
const struct arm64_cpu_capabilities *caps;
427
428
for (caps = entry->match_list; caps->matches; caps++)
429
if (caps->matches(caps, scope))
430
return true;
431
432
return false;
433
}
434
435
static __always_inline bool is_vhe_hyp_code(void)
436
{
437
/* Only defined for code run in VHE hyp context */
438
return __is_defined(__KVM_VHE_HYPERVISOR__);
439
}
440
441
static __always_inline bool is_nvhe_hyp_code(void)
442
{
443
/* Only defined for code run in NVHE hyp context */
444
return __is_defined(__KVM_NVHE_HYPERVISOR__);
445
}
446
447
static __always_inline bool is_hyp_code(void)
448
{
449
return is_vhe_hyp_code() || is_nvhe_hyp_code();
450
}
451
452
extern DECLARE_BITMAP(system_cpucaps, ARM64_NCAPS);
453
454
extern DECLARE_BITMAP(boot_cpucaps, ARM64_NCAPS);
455
456
#define for_each_available_cap(cap) \
457
for_each_set_bit(cap, system_cpucaps, ARM64_NCAPS)
458
459
bool this_cpu_has_cap(unsigned int cap);
460
void cpu_set_feature(unsigned int num);
461
bool cpu_have_feature(unsigned int num);
462
unsigned long cpu_get_elf_hwcap(void);
463
unsigned long cpu_get_elf_hwcap2(void);
464
unsigned long cpu_get_elf_hwcap3(void);
465
466
#define cpu_set_named_feature(name) cpu_set_feature(cpu_feature(name))
467
#define cpu_have_named_feature(name) cpu_have_feature(cpu_feature(name))
468
469
static __always_inline bool boot_capabilities_finalized(void)
470
{
471
return alternative_has_cap_likely(ARM64_ALWAYS_BOOT);
472
}
473
474
static __always_inline bool system_capabilities_finalized(void)
475
{
476
return alternative_has_cap_likely(ARM64_ALWAYS_SYSTEM);
477
}
478
479
/*
480
* Test for a capability with a runtime check.
481
*
482
* Before the capability is detected, this returns false.
483
*/
484
static __always_inline bool cpus_have_cap(unsigned int num)
485
{
486
if (__builtin_constant_p(num) && !cpucap_is_possible(num))
487
return false;
488
if (num >= ARM64_NCAPS)
489
return false;
490
return arch_test_bit(num, system_cpucaps);
491
}
492
493
/*
494
* Test for a capability without a runtime check.
495
*
496
* Before boot capabilities are finalized, this will BUG().
497
* After boot capabilities are finalized, this is patched to avoid a runtime
498
* check.
499
*
500
* @num must be a compile-time constant.
501
*/
502
static __always_inline bool cpus_have_final_boot_cap(int num)
503
{
504
if (boot_capabilities_finalized())
505
return alternative_has_cap_unlikely(num);
506
else
507
BUG();
508
}
509
510
/*
511
* Test for a capability without a runtime check.
512
*
513
* Before system capabilities are finalized, this will BUG().
514
* After system capabilities are finalized, this is patched to avoid a runtime
515
* check.
516
*
517
* @num must be a compile-time constant.
518
*/
519
static __always_inline bool cpus_have_final_cap(int num)
520
{
521
if (system_capabilities_finalized())
522
return alternative_has_cap_unlikely(num);
523
else
524
BUG();
525
}
526
527
static inline int __attribute_const__
528
cpuid_feature_extract_signed_field_width(u64 features, int field, int width)
529
{
530
return (s64)(features << (64 - width - field)) >> (64 - width);
531
}
532
533
static inline int __attribute_const__
534
cpuid_feature_extract_signed_field(u64 features, int field)
535
{
536
return cpuid_feature_extract_signed_field_width(features, field, 4);
537
}
538
539
static __always_inline unsigned int __attribute_const__
540
cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width)
541
{
542
return (u64)(features << (64 - width - field)) >> (64 - width);
543
}
544
545
static __always_inline unsigned int __attribute_const__
546
cpuid_feature_extract_unsigned_field(u64 features, int field)
547
{
548
return cpuid_feature_extract_unsigned_field_width(features, field, 4);
549
}
550
551
static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp)
552
{
553
return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift);
554
}
555
556
static inline u64 arm64_ftr_reg_user_value(const struct arm64_ftr_reg *reg)
557
{
558
return (reg->user_val | (reg->sys_val & reg->user_mask));
559
}
560
561
static inline int __attribute_const__
562
cpuid_feature_extract_field_width(u64 features, int field, int width, bool sign)
563
{
564
if (WARN_ON_ONCE(!width))
565
width = 4;
566
return (sign) ?
567
cpuid_feature_extract_signed_field_width(features, field, width) :
568
cpuid_feature_extract_unsigned_field_width(features, field, width);
569
}
570
571
static inline int __attribute_const__
572
cpuid_feature_extract_field(u64 features, int field, bool sign)
573
{
574
return cpuid_feature_extract_field_width(features, field, 4, sign);
575
}
576
577
static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val)
578
{
579
return (s64)cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width, ftrp->sign);
580
}
581
582
static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
583
{
584
return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_BIGEND_SHIFT) == 0x1 ||
585
cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT) == 0x1;
586
}
587
588
static inline bool id_aa64pfr0_32bit_el1(u64 pfr0)
589
{
590
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL1_SHIFT);
591
592
return val == ID_AA64PFR0_EL1_EL1_AARCH32;
593
}
594
595
static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
596
{
597
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL0_SHIFT);
598
599
return val == ID_AA64PFR0_EL1_EL0_AARCH32;
600
}
601
602
static inline bool id_aa64pfr0_sve(u64 pfr0)
603
{
604
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_SVE_SHIFT);
605
606
return val > 0;
607
}
608
609
static inline bool id_aa64pfr1_sme(u64 pfr1)
610
{
611
u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_SME_SHIFT);
612
613
return val > 0;
614
}
615
616
static inline bool id_aa64pfr0_mpam(u64 pfr0)
617
{
618
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_MPAM_SHIFT);
619
620
return val > 0;
621
}
622
623
static inline bool id_aa64pfr1_mte(u64 pfr1)
624
{
625
u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_MTE_SHIFT);
626
627
return val >= ID_AA64PFR1_EL1_MTE_MTE2;
628
}
629
630
void __init setup_boot_cpu_features(void);
631
void __init setup_system_features(void);
632
void __init setup_user_features(void);
633
634
void check_local_cpu_capabilities(void);
635
636
u64 read_sanitised_ftr_reg(u32 id);
637
u64 __read_sysreg_by_encoding(u32 sys_id);
638
639
static inline bool cpu_supports_mixed_endian_el0(void)
640
{
641
return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
642
}
643
644
645
static inline bool supports_csv2p3(int scope)
646
{
647
u64 pfr0;
648
u8 csv2_val;
649
650
if (scope == SCOPE_LOCAL_CPU)
651
pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1);
652
else
653
pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
654
655
csv2_val = cpuid_feature_extract_unsigned_field(pfr0,
656
ID_AA64PFR0_EL1_CSV2_SHIFT);
657
return csv2_val == 3;
658
}
659
660
static inline bool supports_clearbhb(int scope)
661
{
662
u64 isar2;
663
664
if (scope == SCOPE_LOCAL_CPU)
665
isar2 = read_sysreg_s(SYS_ID_AA64ISAR2_EL1);
666
else
667
isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
668
669
return cpuid_feature_extract_unsigned_field(isar2,
670
ID_AA64ISAR2_EL1_CLRBHB_SHIFT);
671
}
672
673
const struct cpumask *system_32bit_el0_cpumask(void);
674
const struct cpumask *fallback_32bit_el0_cpumask(void);
675
DECLARE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
676
677
static inline bool system_supports_32bit_el0(void)
678
{
679
u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
680
681
return static_branch_unlikely(&arm64_mismatched_32bit_el0) ||
682
id_aa64pfr0_32bit_el0(pfr0);
683
}
684
685
static inline bool system_supports_4kb_granule(void)
686
{
687
u64 mmfr0;
688
u32 val;
689
690
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
691
val = cpuid_feature_extract_unsigned_field(mmfr0,
692
ID_AA64MMFR0_EL1_TGRAN4_SHIFT);
693
694
return (val >= ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN) &&
695
(val <= ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX);
696
}
697
698
static inline bool system_supports_64kb_granule(void)
699
{
700
u64 mmfr0;
701
u32 val;
702
703
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
704
val = cpuid_feature_extract_unsigned_field(mmfr0,
705
ID_AA64MMFR0_EL1_TGRAN64_SHIFT);
706
707
return (val >= ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN) &&
708
(val <= ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX);
709
}
710
711
static inline bool system_supports_16kb_granule(void)
712
{
713
u64 mmfr0;
714
u32 val;
715
716
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
717
val = cpuid_feature_extract_unsigned_field(mmfr0,
718
ID_AA64MMFR0_EL1_TGRAN16_SHIFT);
719
720
return (val >= ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN) &&
721
(val <= ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX);
722
}
723
724
static inline bool system_supports_mixed_endian_el0(void)
725
{
726
return id_aa64mmfr0_mixed_endian_el0(read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1));
727
}
728
729
static inline bool system_supports_mixed_endian(void)
730
{
731
u64 mmfr0;
732
u32 val;
733
734
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
735
val = cpuid_feature_extract_unsigned_field(mmfr0,
736
ID_AA64MMFR0_EL1_BIGEND_SHIFT);
737
738
return val == 0x1;
739
}
740
741
static __always_inline bool system_supports_fpsimd(void)
742
{
743
return alternative_has_cap_likely(ARM64_HAS_FPSIMD);
744
}
745
746
static inline bool system_uses_hw_pan(void)
747
{
748
return alternative_has_cap_unlikely(ARM64_HAS_PAN);
749
}
750
751
static inline bool system_uses_ttbr0_pan(void)
752
{
753
return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
754
!system_uses_hw_pan();
755
}
756
757
static __always_inline bool system_supports_sve(void)
758
{
759
return alternative_has_cap_unlikely(ARM64_SVE);
760
}
761
762
static __always_inline bool system_supports_sme(void)
763
{
764
return alternative_has_cap_unlikely(ARM64_SME);
765
}
766
767
static __always_inline bool system_supports_sme2(void)
768
{
769
return alternative_has_cap_unlikely(ARM64_SME2);
770
}
771
772
static __always_inline bool system_supports_fa64(void)
773
{
774
return alternative_has_cap_unlikely(ARM64_SME_FA64);
775
}
776
777
static __always_inline bool system_supports_tpidr2(void)
778
{
779
return system_supports_sme();
780
}
781
782
static __always_inline bool system_supports_fpmr(void)
783
{
784
return alternative_has_cap_unlikely(ARM64_HAS_FPMR);
785
}
786
787
static __always_inline bool system_supports_cnp(void)
788
{
789
return alternative_has_cap_unlikely(ARM64_HAS_CNP);
790
}
791
792
static inline bool system_supports_address_auth(void)
793
{
794
return cpus_have_final_boot_cap(ARM64_HAS_ADDRESS_AUTH);
795
}
796
797
static inline bool system_supports_generic_auth(void)
798
{
799
return alternative_has_cap_unlikely(ARM64_HAS_GENERIC_AUTH);
800
}
801
802
static inline bool system_has_full_ptr_auth(void)
803
{
804
return system_supports_address_auth() && system_supports_generic_auth();
805
}
806
807
static __always_inline bool system_uses_irq_prio_masking(void)
808
{
809
return alternative_has_cap_unlikely(ARM64_HAS_GIC_PRIO_MASKING);
810
}
811
812
static inline bool system_supports_mte(void)
813
{
814
return alternative_has_cap_unlikely(ARM64_MTE);
815
}
816
817
static inline bool system_has_prio_mask_debugging(void)
818
{
819
return IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING) &&
820
system_uses_irq_prio_masking();
821
}
822
823
static inline bool system_supports_bti(void)
824
{
825
return cpus_have_final_cap(ARM64_BTI);
826
}
827
828
static inline bool system_supports_bti_kernel(void)
829
{
830
return IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) &&
831
cpus_have_final_boot_cap(ARM64_BTI);
832
}
833
834
static inline bool system_supports_tlb_range(void)
835
{
836
return alternative_has_cap_unlikely(ARM64_HAS_TLB_RANGE);
837
}
838
839
static inline bool system_supports_lpa2(void)
840
{
841
return cpus_have_final_cap(ARM64_HAS_LPA2);
842
}
843
844
static inline bool system_supports_poe(void)
845
{
846
return alternative_has_cap_unlikely(ARM64_HAS_S1POE);
847
}
848
849
static inline bool system_supports_gcs(void)
850
{
851
return alternative_has_cap_unlikely(ARM64_HAS_GCS);
852
}
853
854
static inline bool system_supports_haft(void)
855
{
856
return cpus_have_final_cap(ARM64_HAFT);
857
}
858
859
static __always_inline bool system_supports_mpam(void)
860
{
861
return alternative_has_cap_unlikely(ARM64_MPAM);
862
}
863
864
static __always_inline bool system_supports_mpam_hcr(void)
865
{
866
return alternative_has_cap_unlikely(ARM64_MPAM_HCR);
867
}
868
869
static inline bool system_supports_pmuv3(void)
870
{
871
return cpus_have_final_cap(ARM64_HAS_PMUV3);
872
}
873
874
static inline bool system_supports_bbml2_noabort(void)
875
{
876
return alternative_has_cap_unlikely(ARM64_HAS_BBML2_NOABORT);
877
}
878
879
int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
880
bool try_emulate_mrs(struct pt_regs *regs, u32 isn);
881
882
static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
883
{
884
switch (parange) {
885
case ID_AA64MMFR0_EL1_PARANGE_32: return 32;
886
case ID_AA64MMFR0_EL1_PARANGE_36: return 36;
887
case ID_AA64MMFR0_EL1_PARANGE_40: return 40;
888
case ID_AA64MMFR0_EL1_PARANGE_42: return 42;
889
case ID_AA64MMFR0_EL1_PARANGE_44: return 44;
890
case ID_AA64MMFR0_EL1_PARANGE_48: return 48;
891
case ID_AA64MMFR0_EL1_PARANGE_52: return 52;
892
/*
893
* A future PE could use a value unknown to the kernel.
894
* However, by the "D10.1.4 Principles of the ID scheme
895
* for fields in ID registers", ARM DDI 0487C.a, any new
896
* value is guaranteed to be higher than what we know already.
897
* As a safe limit, we return the limit supported by the kernel.
898
*/
899
default: return CONFIG_ARM64_PA_BITS;
900
}
901
}
902
903
/* Check whether hardware update of the Access flag is supported */
904
static inline bool cpu_has_hw_af(void)
905
{
906
u64 mmfr1;
907
908
if (!IS_ENABLED(CONFIG_ARM64_HW_AFDBM))
909
return false;
910
911
/*
912
* Use cached version to avoid emulated msr operation on KVM
913
* guests.
914
*/
915
mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
916
return cpuid_feature_extract_unsigned_field(mmfr1,
917
ID_AA64MMFR1_EL1_HAFDBS_SHIFT);
918
}
919
920
static inline bool cpu_has_pan(void)
921
{
922
u64 mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
923
return cpuid_feature_extract_unsigned_field(mmfr1,
924
ID_AA64MMFR1_EL1_PAN_SHIFT);
925
}
926
927
#ifdef CONFIG_ARM64_AMU_EXTN
928
/* Check whether the cpu supports the Activity Monitors Unit (AMU) */
929
extern bool cpu_has_amu_feat(int cpu);
930
#else
931
static inline bool cpu_has_amu_feat(int cpu)
932
{
933
return false;
934
}
935
#endif
936
937
/* Get a cpu that supports the Activity Monitors Unit (AMU) */
938
extern int get_cpu_with_amu_feat(void);
939
940
static inline unsigned int get_vmid_bits(u64 mmfr1)
941
{
942
int vmid_bits;
943
944
vmid_bits = cpuid_feature_extract_unsigned_field(mmfr1,
945
ID_AA64MMFR1_EL1_VMIDBits_SHIFT);
946
if (vmid_bits == ID_AA64MMFR1_EL1_VMIDBits_16)
947
return 16;
948
949
/*
950
* Return the default here even if any reserved
951
* value is fetched from the system register.
952
*/
953
return 8;
954
}
955
956
s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new, s64 cur);
957
struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id);
958
959
extern struct arm64_ftr_override id_aa64mmfr0_override;
960
extern struct arm64_ftr_override id_aa64mmfr1_override;
961
extern struct arm64_ftr_override id_aa64mmfr2_override;
962
extern struct arm64_ftr_override id_aa64pfr0_override;
963
extern struct arm64_ftr_override id_aa64pfr1_override;
964
extern struct arm64_ftr_override id_aa64zfr0_override;
965
extern struct arm64_ftr_override id_aa64smfr0_override;
966
extern struct arm64_ftr_override id_aa64isar1_override;
967
extern struct arm64_ftr_override id_aa64isar2_override;
968
969
extern struct arm64_ftr_override arm64_sw_feature_override;
970
971
static inline
972
u64 arm64_apply_feature_override(u64 val, int feat, int width,
973
const struct arm64_ftr_override *override)
974
{
975
u64 oval = override->val;
976
977
/*
978
* When it encounters an invalid override (e.g., an override that
979
* cannot be honoured due to a missing CPU feature), the early idreg
980
* override code will set the mask to 0x0 and the value to non-zero for
981
* the field in question. In order to determine whether the override is
982
* valid or not for the field we are interested in, we first need to
983
* disregard bits belonging to other fields.
984
*/
985
oval &= GENMASK_ULL(feat + width - 1, feat);
986
987
/*
988
* The override is valid if all value bits are accounted for in the
989
* mask. If so, replace the masked bits with the override value.
990
*/
991
if (oval == (oval & override->mask)) {
992
val &= ~override->mask;
993
val |= oval;
994
}
995
996
/* Extract the field from the updated value */
997
return cpuid_feature_extract_unsigned_field(val, feat);
998
}
999
1000
static inline bool arm64_test_sw_feature_override(int feat)
1001
{
1002
/*
1003
* Software features are pseudo CPU features that have no underlying
1004
* CPUID system register value to apply the override to.
1005
*/
1006
return arm64_apply_feature_override(0, feat, 4,
1007
&arm64_sw_feature_override);
1008
}
1009
1010
static inline bool kaslr_disabled_cmdline(void)
1011
{
1012
return arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_NOKASLR);
1013
}
1014
1015
u32 get_kvm_ipa_limit(void);
1016
void dump_cpu_features(void);
1017
1018
static inline bool cpu_has_bti(void)
1019
{
1020
if (!IS_ENABLED(CONFIG_ARM64_BTI))
1021
return false;
1022
1023
return arm64_apply_feature_override(read_cpuid(ID_AA64PFR1_EL1),
1024
ID_AA64PFR1_EL1_BT_SHIFT, 4,
1025
&id_aa64pfr1_override);
1026
}
1027
1028
static inline bool cpu_has_pac(void)
1029
{
1030
u64 isar1, isar2;
1031
1032
if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH))
1033
return false;
1034
1035
isar1 = read_cpuid(ID_AA64ISAR1_EL1);
1036
isar2 = read_cpuid(ID_AA64ISAR2_EL1);
1037
1038
if (arm64_apply_feature_override(isar1, ID_AA64ISAR1_EL1_APA_SHIFT, 4,
1039
&id_aa64isar1_override))
1040
return true;
1041
1042
if (arm64_apply_feature_override(isar1, ID_AA64ISAR1_EL1_API_SHIFT, 4,
1043
&id_aa64isar1_override))
1044
return true;
1045
1046
return arm64_apply_feature_override(isar2, ID_AA64ISAR2_EL1_APA3_SHIFT, 4,
1047
&id_aa64isar2_override);
1048
}
1049
1050
static inline bool cpu_has_lva(void)
1051
{
1052
u64 mmfr2;
1053
1054
mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
1055
mmfr2 &= ~id_aa64mmfr2_override.mask;
1056
mmfr2 |= id_aa64mmfr2_override.val;
1057
return cpuid_feature_extract_unsigned_field(mmfr2,
1058
ID_AA64MMFR2_EL1_VARange_SHIFT);
1059
}
1060
1061
static inline bool cpu_has_lpa2(void)
1062
{
1063
#ifdef CONFIG_ARM64_LPA2
1064
u64 mmfr0;
1065
int feat;
1066
1067
mmfr0 = read_sysreg(id_aa64mmfr0_el1);
1068
mmfr0 &= ~id_aa64mmfr0_override.mask;
1069
mmfr0 |= id_aa64mmfr0_override.val;
1070
feat = cpuid_feature_extract_signed_field(mmfr0,
1071
ID_AA64MMFR0_EL1_TGRAN_SHIFT);
1072
1073
return feat >= ID_AA64MMFR0_EL1_TGRAN_LPA2;
1074
#else
1075
return false;
1076
#endif
1077
}
1078
1079
#endif /* __ASSEMBLY__ */
1080
1081
#endif
1082
1083