#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <asm/cpufeature.h>
#include <asm/mmu_context.h>
#include <asm/smp.h>
#include <asm/tlbflush.h>
static u32 asid_bits;
static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
static atomic64_t asid_generation;
static unsigned long *asid_map;
static DEFINE_PER_CPU(atomic64_t, active_asids);
static DEFINE_PER_CPU(u64, reserved_asids);
static cpumask_t tlb_flush_pending;
static unsigned long max_pinned_asids;
static unsigned long nr_pinned_asids;
static unsigned long *pinned_asid_map;
#define ASID_MASK (~GENMASK(asid_bits - 1, 0))
#define ASID_FIRST_VERSION (1UL << 16)
#define NUM_USER_ASIDS (1UL << asid_bits)
#define ctxid2asid(asid) ((asid) & ~ASID_MASK)
#define asid2ctxid(asid, genid) ((asid) | (genid))
static u32 get_cpu_asid_bits(void)
{
u32 asid;
int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1),
ID_AA64MMFR0_EL1_ASIDBITS_SHIFT);
switch (fld) {
default:
pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
smp_processor_id(), fld);
fallthrough;
case ID_AA64MMFR0_EL1_ASIDBITS_8:
asid = 8;
break;
case ID_AA64MMFR0_EL1_ASIDBITS_16:
asid = 16;
}
return asid;
}
void verify_cpu_asid_bits(void)
{
u32 asid = get_cpu_asid_bits();
if (asid < asid_bits) {
pr_crit("CPU%d: smaller ASID size(%u) than boot CPU (%u)\n",
smp_processor_id(), asid, asid_bits);
cpu_panic_kernel();
}
}
static void set_kpti_asid_bits(unsigned long *map)
{
unsigned int len = BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(unsigned long);
memset(map, 0xaa, len);
}
static void set_reserved_asid_bits(void)
{
if (pinned_asid_map)
bitmap_copy(asid_map, pinned_asid_map, NUM_USER_ASIDS);
else if (arm64_kernel_unmapped_at_el0())
set_kpti_asid_bits(asid_map);
else
bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
}
#define asid_gen_match(asid) \
(!(((asid) ^ atomic64_read(&asid_generation)) >> asid_bits))
static void flush_context(void)
{
int i;
u64 asid;
set_reserved_asid_bits();
for_each_possible_cpu(i) {
asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
if (asid == 0)
asid = per_cpu(reserved_asids, i);
__set_bit(ctxid2asid(asid), asid_map);
per_cpu(reserved_asids, i) = asid;
}
cpumask_setall(&tlb_flush_pending);
}
static bool check_update_reserved_asid(u64 asid, u64 newasid)
{
int cpu;
bool hit = false;
for_each_possible_cpu(cpu) {
if (per_cpu(reserved_asids, cpu) == asid) {
hit = true;
per_cpu(reserved_asids, cpu) = newasid;
}
}
return hit;
}
static u64 new_context(struct mm_struct *mm)
{
static u32 cur_idx = 1;
u64 asid = atomic64_read(&mm->context.id);
u64 generation = atomic64_read(&asid_generation);
if (asid != 0) {
u64 newasid = asid2ctxid(ctxid2asid(asid), generation);
if (check_update_reserved_asid(asid, newasid))
return newasid;
if (refcount_read(&mm->context.pinned))
return newasid;
if (!__test_and_set_bit(ctxid2asid(asid), asid_map))
return newasid;
}
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
if (asid != NUM_USER_ASIDS)
goto set_asid;
generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION,
&asid_generation);
flush_context();
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
set_asid:
__set_bit(asid, asid_map);
cur_idx = asid;
return asid2ctxid(asid, generation);
}
void check_and_switch_context(struct mm_struct *mm)
{
unsigned long flags;
unsigned int cpu;
u64 asid, old_active_asid;
if (system_supports_cnp())
cpu_set_reserved_ttbr0();
asid = atomic64_read(&mm->context.id);
old_active_asid = atomic64_read(this_cpu_ptr(&active_asids));
if (old_active_asid && asid_gen_match(asid) &&
atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_asids),
old_active_asid, asid))
goto switch_mm_fastpath;
raw_spin_lock_irqsave(&cpu_asid_lock, flags);
asid = atomic64_read(&mm->context.id);
if (!asid_gen_match(asid)) {
asid = new_context(mm);
atomic64_set(&mm->context.id, asid);
}
cpu = smp_processor_id();
if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
local_flush_tlb_all();
atomic64_set(this_cpu_ptr(&active_asids), asid);
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
switch_mm_fastpath:
arm64_apply_bp_hardening();
if (!system_uses_ttbr0_pan())
cpu_switch_mm(mm->pgd, mm);
}
unsigned long arm64_mm_context_get(struct mm_struct *mm)
{
unsigned long flags;
u64 asid;
if (!pinned_asid_map)
return 0;
raw_spin_lock_irqsave(&cpu_asid_lock, flags);
asid = atomic64_read(&mm->context.id);
if (refcount_inc_not_zero(&mm->context.pinned))
goto out_unlock;
if (nr_pinned_asids >= max_pinned_asids) {
asid = 0;
goto out_unlock;
}
if (!asid_gen_match(asid)) {
asid = new_context(mm);
atomic64_set(&mm->context.id, asid);
}
nr_pinned_asids++;
__set_bit(ctxid2asid(asid), pinned_asid_map);
refcount_set(&mm->context.pinned, 1);
out_unlock:
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
asid = ctxid2asid(asid);
if (asid && arm64_kernel_unmapped_at_el0())
asid |= 1;
return asid;
}
EXPORT_SYMBOL_GPL(arm64_mm_context_get);
void arm64_mm_context_put(struct mm_struct *mm)
{
unsigned long flags;
u64 asid = atomic64_read(&mm->context.id);
if (!pinned_asid_map)
return;
raw_spin_lock_irqsave(&cpu_asid_lock, flags);
if (refcount_dec_and_test(&mm->context.pinned)) {
__clear_bit(ctxid2asid(asid), pinned_asid_map);
nr_pinned_asids--;
}
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
}
EXPORT_SYMBOL_GPL(arm64_mm_context_put);
asmlinkage void post_ttbr_update_workaround(void)
{
if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456))
return;
asm(ALTERNATIVE("nop; nop; nop",
"ic iallu; dsb nsh; isb",
ARM64_WORKAROUND_CAVIUM_27456));
}
void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm)
{
unsigned long ttbr1 = read_sysreg(ttbr1_el1);
unsigned long asid = ASID(mm);
unsigned long ttbr0 = phys_to_ttbr(pgd_phys);
if (system_supports_cnp() && asid)
ttbr0 |= TTBR_CNP_BIT;
if (IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN))
ttbr0 |= FIELD_PREP(TTBR_ASID_MASK, asid);
ttbr1 &= ~TTBR_ASID_MASK;
ttbr1 |= FIELD_PREP(TTBR_ASID_MASK, asid);
cpu_set_reserved_ttbr0_nosync();
write_sysreg(ttbr1, ttbr1_el1);
write_sysreg(ttbr0, ttbr0_el1);
isb();
post_ttbr_update_workaround();
}
static int asids_update_limit(void)
{
unsigned long num_available_asids = NUM_USER_ASIDS;
if (arm64_kernel_unmapped_at_el0()) {
num_available_asids /= 2;
if (pinned_asid_map)
set_kpti_asid_bits(pinned_asid_map);
}
WARN_ON(num_available_asids - 1 <= num_possible_cpus());
pr_info("ASID allocator initialised with %lu entries\n",
num_available_asids);
max_pinned_asids = num_available_asids - num_possible_cpus() - 2;
return 0;
}
arch_initcall(asids_update_limit);
static int asids_init(void)
{
asid_bits = get_cpu_asid_bits();
atomic64_set(&asid_generation, ASID_FIRST_VERSION);
asid_map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL);
if (!asid_map)
panic("Failed to allocate bitmap for %lu ASIDs\n",
NUM_USER_ASIDS);
pinned_asid_map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL);
nr_pinned_asids = 0;
if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
set_kpti_asid_bits(asid_map);
return 0;
}
early_initcall(asids_init);