// SPDX-License-Identifier: GPL-2.0-only1/*2* TLB Management (flush/create/diagnostics) for MMUv3 and MMUv43*4* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)5*6*/78#include <linux/module.h>9#include <linux/bug.h>10#include <linux/mm_types.h>1112#include <asm/arcregs.h>13#include <asm/setup.h>14#include <asm/mmu_context.h>15#include <asm/mmu.h>1617/* A copy of the ASID from the PID reg is kept in asid_cache */18DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;1920static struct cpuinfo_arc_mmu {21unsigned int ver, pg_sz_k, s_pg_sz_m, pae, sets, ways;22} mmuinfo;2324/*25* Utility Routine to erase a J-TLB entry26* Caller needs to setup Index Reg (manually or via getIndex)27*/28static inline void __tlb_entry_erase(void)29{30write_aux_reg(ARC_REG_TLBPD1, 0);3132if (is_pae40_enabled())33write_aux_reg(ARC_REG_TLBPD1HI, 0);3435write_aux_reg(ARC_REG_TLBPD0, 0);36write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);37}3839static void utlb_invalidate(void)40{41write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB);42}4344#ifdef CONFIG_ARC_MMU_V34546static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid)47{48unsigned int idx;4950write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid);5152write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);53idx = read_aux_reg(ARC_REG_TLBINDEX);5455return idx;56}5758static void tlb_entry_erase(unsigned int vaddr_n_asid)59{60unsigned int idx;6162/* Locate the TLB entry for this vaddr + ASID */63idx = tlb_entry_lkup(vaddr_n_asid);6465/* No error means entry found, zero it out */66if (likely(!(idx & TLB_LKUP_ERR))) {67__tlb_entry_erase();68} else {69/* Duplicate entry error */70WARN(idx == TLB_DUP_ERR, "Probe returned Dup PD for %x\n",71vaddr_n_asid);72}73}7475static void tlb_entry_insert(unsigned int pd0, phys_addr_t pd1)76{77unsigned int idx;7879/*80* First verify if entry for this vaddr+ASID already exists81* This also sets up PD0 (vaddr, ASID..) for final commit82*/83idx = tlb_entry_lkup(pd0);8485/*86* If Not already present get a free slot from MMU.87* Otherwise, Probe would have located the entry and set INDEX Reg88* with existing location. This will cause Write CMD to over-write89* existing entry with new PD0 and PD190*/91if (likely(idx & TLB_LKUP_ERR))92write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex);9394/* setup the other half of TLB entry (pfn, rwx..) */95write_aux_reg(ARC_REG_TLBPD1, pd1);9697/*98* Commit the Entry to MMU99* It doesn't sound safe to use the TLBWriteNI cmd here100* which doesn't flush uTLBs. I'd rather be safe than sorry.101*/102write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);103}104105#else /* MMUv4 */106107static void tlb_entry_erase(unsigned int vaddr_n_asid)108{109write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid | _PAGE_PRESENT);110write_aux_reg(ARC_REG_TLBCOMMAND, TLBDeleteEntry);111}112113static void tlb_entry_insert(unsigned int pd0, phys_addr_t pd1)114{115write_aux_reg(ARC_REG_TLBPD0, pd0);116117if (!is_pae40_enabled()) {118write_aux_reg(ARC_REG_TLBPD1, pd1);119} else {120write_aux_reg(ARC_REG_TLBPD1, pd1 & 0xFFFFFFFF);121write_aux_reg(ARC_REG_TLBPD1HI, (u64)pd1 >> 32);122}123124write_aux_reg(ARC_REG_TLBCOMMAND, TLBInsertEntry);125}126127#endif128129/*130* Un-conditionally (without lookup) erase the entire MMU contents131*/132133noinline void local_flush_tlb_all(void)134{135struct cpuinfo_arc_mmu *mmu = &mmuinfo;136unsigned long flags;137unsigned int entry;138int num_tlb = mmu->sets * mmu->ways;139140local_irq_save(flags);141142/* Load PD0 and PD1 with template for a Blank Entry */143write_aux_reg(ARC_REG_TLBPD1, 0);144145if (is_pae40_enabled())146write_aux_reg(ARC_REG_TLBPD1HI, 0);147148write_aux_reg(ARC_REG_TLBPD0, 0);149150for (entry = 0; entry < num_tlb; entry++) {151/* write this entry to the TLB */152write_aux_reg(ARC_REG_TLBINDEX, entry);153write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI);154}155156if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {157const int stlb_idx = 0x800;158159/* Blank sTLB entry */160write_aux_reg(ARC_REG_TLBPD0, _PAGE_HW_SZ);161162for (entry = stlb_idx; entry < stlb_idx + 16; entry++) {163write_aux_reg(ARC_REG_TLBINDEX, entry);164write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI);165}166}167168utlb_invalidate();169170local_irq_restore(flags);171}172173/*174* Flush the entire MM for userland. The fastest way is to move to Next ASID175*/176noinline void local_flush_tlb_mm(struct mm_struct *mm)177{178/*179* Small optimisation courtesy IA64180* flush_mm called during fork,exit,munmap etc, multiple times as well.181* Only for fork( ) do we need to move parent to a new MMU ctxt,182* all other cases are NOPs, hence this check.183*/184if (atomic_read(&mm->mm_users) == 0)185return;186187/*188* - Move to a new ASID, but only if the mm is still wired in189* (Android Binder ended up calling this for vma->mm != tsk->mm,190* causing h/w - s/w ASID to get out of sync)191* - Also get_new_mmu_context() new implementation allocates a new192* ASID only if it is not allocated already - so unallocate first193*/194destroy_context(mm);195if (current->mm == mm)196get_new_mmu_context(mm);197}198199/*200* Flush a Range of TLB entries for userland.201* @start is inclusive, while @end is exclusive202* Difference between this and Kernel Range Flush is203* -Here the fastest way (if range is too large) is to move to next ASID204* without doing any explicit Shootdown205* -In case of kernel Flush, entry has to be shot down explicitly206*/207void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,208unsigned long end)209{210const unsigned int cpu = smp_processor_id();211unsigned long flags;212213/* If range @start to @end is more than 32 TLB entries deep,214* it's better to move to a new ASID rather than searching for215* individual entries and then shooting them down216*217* The calc above is rough, doesn't account for unaligned parts,218* since this is heuristics based anyways219*/220if (unlikely((end - start) >= PAGE_SIZE * 32)) {221local_flush_tlb_mm(vma->vm_mm);222return;223}224225/*226* @start moved to page start: this alone suffices for checking227* loop end condition below, w/o need for aligning @end to end228* e.g. 2000 to 4001 will anyhow loop twice229*/230start &= PAGE_MASK;231232local_irq_save(flags);233234if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {235while (start < end) {236tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu));237start += PAGE_SIZE;238}239}240241local_irq_restore(flags);242}243244/* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective)245* @start, @end interpreted as kvaddr246* Interestingly, shared TLB entries can also be flushed using just247* @start,@end alone (interpreted as user vaddr), although technically SASID248* is also needed. However our smart TLbProbe lookup takes care of that.249*/250void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)251{252unsigned long flags;253254/* exactly same as above, except for TLB entry not taking ASID */255256if (unlikely((end - start) >= PAGE_SIZE * 32)) {257local_flush_tlb_all();258return;259}260261start &= PAGE_MASK;262263local_irq_save(flags);264while (start < end) {265tlb_entry_erase(start);266start += PAGE_SIZE;267}268269local_irq_restore(flags);270}271272/*273* Delete TLB entry in MMU for a given page (??? address)274* NOTE One TLB entry contains translation for single PAGE275*/276277void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)278{279const unsigned int cpu = smp_processor_id();280unsigned long flags;281282/* Note that it is critical that interrupts are DISABLED between283* checking the ASID and using it flush the TLB entry284*/285local_irq_save(flags);286287if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {288tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu));289}290291local_irq_restore(flags);292}293294#ifdef CONFIG_SMP295296struct tlb_args {297struct vm_area_struct *ta_vma;298unsigned long ta_start;299unsigned long ta_end;300};301302static inline void ipi_flush_tlb_page(void *arg)303{304struct tlb_args *ta = arg;305306local_flush_tlb_page(ta->ta_vma, ta->ta_start);307}308309static inline void ipi_flush_tlb_range(void *arg)310{311struct tlb_args *ta = arg;312313local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);314}315316#ifdef CONFIG_TRANSPARENT_HUGEPAGE317static inline void ipi_flush_pmd_tlb_range(void *arg)318{319struct tlb_args *ta = arg;320321local_flush_pmd_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);322}323#endif324325static inline void ipi_flush_tlb_kernel_range(void *arg)326{327struct tlb_args *ta = (struct tlb_args *)arg;328329local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);330}331332void flush_tlb_all(void)333{334on_each_cpu((smp_call_func_t)local_flush_tlb_all, NULL, 1);335}336337void flush_tlb_mm(struct mm_struct *mm)338{339on_each_cpu_mask(mm_cpumask(mm), (smp_call_func_t)local_flush_tlb_mm,340mm, 1);341}342343void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)344{345struct tlb_args ta = {346.ta_vma = vma,347.ta_start = uaddr348};349350on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1);351}352353void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,354unsigned long end)355{356struct tlb_args ta = {357.ta_vma = vma,358.ta_start = start,359.ta_end = end360};361362on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1);363}364365#ifdef CONFIG_TRANSPARENT_HUGEPAGE366void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,367unsigned long end)368{369struct tlb_args ta = {370.ta_vma = vma,371.ta_start = start,372.ta_end = end373};374375on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1);376}377#endif378379void flush_tlb_kernel_range(unsigned long start, unsigned long end)380{381struct tlb_args ta = {382.ta_start = start,383.ta_end = end384};385386on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);387}388#endif389390/*391* Routine to create a TLB entry392*/393static void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)394{395unsigned long flags;396unsigned int asid_or_sasid, rwx;397unsigned long pd0;398phys_addr_t pd1;399400/*401* create_tlb() assumes that current->mm == vma->mm, since402* -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr)403* -completes the lazy write to SASID reg (again valid for curr tsk)404*405* Removing the assumption involves406* -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg.407* -More importantly it makes this handler inconsistent with fast-path408* TLB Refill handler which always deals with "current"409*410* Let's see the use cases when current->mm != vma->mm and we land here411* 1. execve->copy_strings()->__get_user_pages->handle_mm_fault412* Here VM wants to pre-install a TLB entry for user stack while413* current->mm still points to pre-execve mm (hence the condition).414* However the stack vaddr is soon relocated (randomization) and415* move_page_tables() tries to undo that TLB entry.416* Thus not creating TLB entry is not any worse.417*418* 2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a419* breakpoint in debugged task. Not creating a TLB now is not420* performance critical.421*422* Both the cases above are not good enough for code churn.423*/424if (current->active_mm != vma->vm_mm)425return;426427local_irq_save(flags);428429vaddr &= PAGE_MASK;430431/* update this PTE credentials */432pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED);433434/* Create HW TLB(PD0,PD1) from PTE */435436/* ASID for this task */437asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff;438439pd0 = vaddr | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0);440441/*442* ARC MMU provides fully orthogonal access bits for K/U mode,443* however Linux only saves 1 set to save PTE real-estate444* Here we convert 3 PTE bits into 6 MMU bits:445* -Kernel only entries have Kr Kw Kx 0 0 0446* -User entries have mirrored K and U bits447*/448rwx = pte_val(*ptep) & PTE_BITS_RWX;449450if (pte_val(*ptep) & _PAGE_GLOBAL)451rwx <<= 3; /* r w x => Kr Kw Kx 0 0 0 */452else453rwx |= (rwx << 3); /* r w x => Kr Kw Kx Ur Uw Ux */454455pd1 = rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1);456457tlb_entry_insert(pd0, pd1);458459local_irq_restore(flags);460}461462/*463* Called at the end of pagefault, for a userspace mapped page464* -pre-install the corresponding TLB entry into MMU465* -Finalize the delayed D-cache flush of kernel mapping of page due to466* flush_dcache_page(), copy_user_page()467*468* Note that flush (when done) involves both WBACK - so physical page is469* in sync as well as INV - so any non-congruent aliases don't remain470*/471void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,472unsigned long vaddr_unaligned, pte_t *ptep, unsigned int nr)473{474unsigned long vaddr = vaddr_unaligned & PAGE_MASK;475phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;476struct page *page = pfn_to_page(pte_pfn(*ptep));477478create_tlb(vma, vaddr, ptep);479480if (page == ZERO_PAGE(0))481return;482483/*484* For executable pages, since icache doesn't snoop dcache, any485* dirty K-mapping of a code page needs to be wback+inv so that486* icache fetch by userspace sees code correctly.487*/488if (vma->vm_flags & VM_EXEC) {489struct folio *folio = page_folio(page);490int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags);491if (dirty) {492unsigned long offset = offset_in_folio(folio, paddr);493nr = folio_nr_pages(folio);494paddr -= offset;495vaddr -= offset;496/* wback + inv dcache lines (K-mapping) */497__flush_dcache_pages(paddr, paddr, nr);498499/* invalidate any existing icache lines (U-mapping) */500if (vma->vm_flags & VM_EXEC)501__inv_icache_pages(paddr, vaddr, nr);502}503}504}505506#ifdef CONFIG_TRANSPARENT_HUGEPAGE507508/*509* MMUv4 in HS38x cores supports Super Pages which are basis for Linux THP510* support.511*512* Normal and Super pages can co-exist (ofcourse not overlap) in TLB with a513* new bit "SZ" in TLB page descriptor to distinguish between them.514* Super Page size is configurable in hardware (4K to 16M), but fixed once515* RTL builds.516*517* The exact THP size a Linux configuration will support is a function of:518* - MMU page size (typical 8K, RTL fixed)519* - software page walker address split between PGD:PTE:PFN (typical520* 11:8:13, but can be changed with 1 line)521* So for above default, THP size supported is 8K * (2^8) = 2M522*523* Default Page Walker is 2 levels, PGD:PTE:PFN, which in THP regime524* reduces to 1 level (as PTE is folded into PGD and canonically referred525* to as PMD).526* Thus THP PMD accessors are implemented in terms of PTE (just like sparc)527*/528529void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,530pmd_t *pmd)531{532pte_t pte = __pte(pmd_val(*pmd));533update_mmu_cache_range(NULL, vma, addr, &pte, HPAGE_PMD_NR);534}535536void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,537unsigned long end)538{539unsigned int cpu;540unsigned long flags;541542local_irq_save(flags);543544cpu = smp_processor_id();545546if (likely(asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID)) {547unsigned int asid = hw_pid(vma->vm_mm, cpu);548549/* No need to loop here: this will always be for 1 Huge Page */550tlb_entry_erase(start | _PAGE_HW_SZ | asid);551}552553local_irq_restore(flags);554}555556#endif557558/* Read the Cache Build Configuration Registers, Decode them and save into559* the cpuinfo structure for later use.560* No Validation is done here, simply read/convert the BCRs561*/562int arc_mmu_mumbojumbo(int c, char *buf, int len)563{564struct cpuinfo_arc_mmu *mmu = &mmuinfo;565unsigned int bcr, u_dtlb, u_itlb, sasid;566struct bcr_mmu_3 *mmu3;567struct bcr_mmu_4 *mmu4;568char super_pg[64] = "";569int n = 0;570571bcr = read_aux_reg(ARC_REG_MMU_BCR);572mmu->ver = (bcr >> 24);573574if (is_isa_arcompact() && mmu->ver == 3) {575mmu3 = (struct bcr_mmu_3 *)&bcr;576mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1);577mmu->sets = 1 << mmu3->sets;578mmu->ways = 1 << mmu3->ways;579u_dtlb = mmu3->u_dtlb;580u_itlb = mmu3->u_itlb;581sasid = mmu3->sasid;582} else {583mmu4 = (struct bcr_mmu_4 *)&bcr;584mmu->pg_sz_k = 1 << (mmu4->sz0 - 1);585mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11);586mmu->sets = 64 << mmu4->n_entry;587mmu->ways = mmu4->n_ways * 2;588u_dtlb = mmu4->u_dtlb * 4;589u_itlb = mmu4->u_itlb * 4;590sasid = mmu4->sasid;591mmu->pae = mmu4->pae;592}593594if (mmu->s_pg_sz_m)595scnprintf(super_pg, 64, "/%dM%s",596mmu->s_pg_sz_m,597IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) ? " (THP enabled)":"");598599n += scnprintf(buf + n, len - n,600"MMU [v%x]\t: %dk%s, swalk %d lvl, JTLB %dx%d, uDTLB %d, uITLB %d%s%s%s\n",601mmu->ver, mmu->pg_sz_k, super_pg, CONFIG_PGTABLE_LEVELS,602mmu->sets, mmu->ways,603u_dtlb, u_itlb,604IS_AVAIL1(sasid, ", SASID"),605IS_AVAIL2(mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40));606607return n;608}609610int pae40_exist_but_not_enab(void)611{612return mmuinfo.pae && !is_pae40_enabled();613}614615void arc_mmu_init(void)616{617struct cpuinfo_arc_mmu *mmu = &mmuinfo;618int compat = 0;619620/*621* Can't be done in processor.h due to header include dependencies622*/623BUILD_BUG_ON(!IS_ALIGNED((CONFIG_ARC_KVADDR_SIZE << 20), PMD_SIZE));624625/*626* stack top size sanity check,627* Can't be done in processor.h due to header include dependencies628*/629BUILD_BUG_ON(!IS_ALIGNED(STACK_TOP, PMD_SIZE));630631/*632* Ensure that MMU features assumed by kernel exist in hardware.633* - For older ARC700 cpus, only v3 supported634* - For HS cpus, v4 was baseline and v5 is backwards compatible635* (will run older software).636*/637if (is_isa_arcompact() && mmu->ver == 3)638compat = 1;639else if (is_isa_arcv2() && mmu->ver >= 4)640compat = 1;641642if (!compat)643panic("MMU ver %d doesn't match kernel built for\n", mmu->ver);644645if (mmu->pg_sz_k != TO_KB(PAGE_SIZE))646panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));647648if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&649mmu->s_pg_sz_m != TO_MB(HPAGE_PMD_SIZE))650panic("MMU Super pg size != Linux HPAGE_PMD_SIZE (%luM)\n",651(unsigned long)TO_MB(HPAGE_PMD_SIZE));652653if (IS_ENABLED(CONFIG_ARC_HAS_PAE40) && !mmu->pae)654panic("Hardware doesn't support PAE40\n");655656/* Enable the MMU with ASID 0 */657mmu_setup_asid(NULL, 0);658659/* cache the pgd pointer in MMU SCRATCH reg (ARCv2 only) */660mmu_setup_pgd(NULL, swapper_pg_dir);661662if (pae40_exist_but_not_enab())663write_aux_reg(ARC_REG_TLBPD1HI, 0);664}665666/*667* TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4}668* The mapping is Column-first.669* --------------------- -----------670* |way0|way1|way2|way3| |way0|way1|671* --------------------- -----------672* [set0] | 0 | 1 | 2 | 3 | | 0 | 1 |673* [set1] | 4 | 5 | 6 | 7 | | 2 | 3 |674* ~ ~ ~ ~675* [set127] | 508| 509| 510| 511| | 254| 255|676* --------------------- -----------677* For normal operations we don't(must not) care how above works since678* MMU cmd getIndex(vaddr) abstracts that out.679* However for walking WAYS of a SET, we need to know this680*/681#define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way))682683/* Handling of Duplicate PD (TLB entry) in MMU.684* -Could be due to buggy customer tapeouts or obscure kernel bugs685* -MMU complaints not at the time of duplicate PD installation, but at the686* time of lookup matching multiple ways.687* -Ideally these should never happen - but if they do - workaround by deleting688* the duplicate one.689* -Knob to be verbose abt it.(TODO: hook them up to debugfs)690*/691volatile int dup_pd_silent; /* Be silent abt it or complain (default) */692693void do_tlb_overlap_fault(unsigned long cause, unsigned long address,694struct pt_regs *regs)695{696struct cpuinfo_arc_mmu *mmu = &mmuinfo;697unsigned long flags;698int set, n_ways = mmu->ways;699700n_ways = min(n_ways, 4);701BUG_ON(mmu->ways > 4);702703local_irq_save(flags);704705/* loop thru all sets of TLB */706for (set = 0; set < mmu->sets; set++) {707708int is_valid, way;709unsigned int pd0[4];710711/* read out all the ways of current set */712for (way = 0, is_valid = 0; way < n_ways; way++) {713write_aux_reg(ARC_REG_TLBINDEX,714SET_WAY_TO_IDX(mmu, set, way));715write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);716pd0[way] = read_aux_reg(ARC_REG_TLBPD0);717is_valid |= pd0[way] & _PAGE_PRESENT;718pd0[way] &= PAGE_MASK;719}720721/* If all the WAYS in SET are empty, skip to next SET */722if (!is_valid)723continue;724725/* Scan the set for duplicate ways: needs a nested loop */726for (way = 0; way < n_ways - 1; way++) {727728int n;729730if (!pd0[way])731continue;732733for (n = way + 1; n < n_ways; n++) {734if (pd0[way] != pd0[n])735continue;736737if (!dup_pd_silent)738pr_info("Dup TLB PD0 %08x @ set %d ways %d,%d\n",739pd0[way], set, way, n);740741/*742* clear entry @way and not @n.743* This is critical to our optimised loop744*/745pd0[way] = 0;746write_aux_reg(ARC_REG_TLBINDEX,747SET_WAY_TO_IDX(mmu, set, way));748__tlb_entry_erase();749}750}751}752753local_irq_restore(flags);754}755756757