Path: blob/master/arch/powerpc/include/asm/book3s/32/mmu-hash.h
26519 views
/* SPDX-License-Identifier: GPL-2.0 */1#ifndef _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_2#define _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_34/*5* 32-bit hash table MMU support6*/78/*9* BATs10*/1112/* Block size masks */13#define BL_128K 0x00014#define BL_256K 0x00115#define BL_512K 0x00316#define BL_1M 0x00717#define BL_2M 0x00F18#define BL_4M 0x01F19#define BL_8M 0x03F20#define BL_16M 0x07F21#define BL_32M 0x0FF22#define BL_64M 0x1FF23#define BL_128M 0x3FF24#define BL_256M 0x7FF2526/* BAT Access Protection */27#define BPP_XX 0x00 /* No access */28#define BPP_RX 0x01 /* Read only */29#define BPP_RW 0x02 /* Read/write */3031#ifndef __ASSEMBLY__32/* Contort a phys_addr_t into the right format/bits for a BAT */33#ifdef CONFIG_PHYS_64BIT34#define BAT_PHYS_ADDR(x) ((u32)((x & 0x00000000fffe0000ULL) | \35((x & 0x0000000e00000000ULL) >> 24) | \36((x & 0x0000000100000000ULL) >> 30)))37#define PHYS_BAT_ADDR(x) (((u64)(x) & 0x00000000fffe0000ULL) | \38(((u64)(x) << 24) & 0x0000000e00000000ULL) | \39(((u64)(x) << 30) & 0x0000000100000000ULL))40#else41#define BAT_PHYS_ADDR(x) (x)42#define PHYS_BAT_ADDR(x) ((x) & 0xfffe0000)43#endif4445struct ppc_bat {46u32 batu;47u32 batl;48};49#endif /* !__ASSEMBLY__ */5051/*52* Hash table53*/5455/* Values for PP (assumes Ks=0, Kp=1) */56#define PP_RWXX 0 /* Supervisor read/write, User none */57#define PP_RWRX 1 /* Supervisor read/write, User read */58#define PP_RWRW 2 /* Supervisor read/write, User read/write */59#define PP_RXRX 3 /* Supervisor read, User read */6061/* Values for Segment Registers */62#define SR_NX 0x10000000 /* No Execute */63#define SR_KP 0x20000000 /* User key */64#define SR_KS 0x40000000 /* Supervisor key */6566#ifdef __ASSEMBLY__6768#include <asm/asm-offsets.h>6970.macro uus_addi sr reg1 reg2 imm71.if NUM_USER_SEGMENTS > \sr72addi \reg1,\reg2,\imm73.endif74.endm7576.macro uus_mtsr sr reg177.if NUM_USER_SEGMENTS > \sr78mtsr \sr, \reg179.endif80.endm8182/*83* This isync() shouldn't be necessary as the kernel is not excepted to run84* any instruction in userspace soon after the update of segments and 'rfi'85* instruction is used to return to userspace, but hash based cores86* (at least G3) seem to exhibit a random behaviour when the 'isync' is not87* there. 603 cores don't have this behaviour so don't do the 'isync' as it88* saves several CPU cycles.89*/90.macro uus_isync91#ifdef CONFIG_PPC_BOOK3S_60492BEGIN_MMU_FTR_SECTION93isync94END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)95#endif96.endm9798.macro update_user_segments_by_4 tmp1 tmp2 tmp3 tmp499uus_addi 1, \tmp2, \tmp1, 0x111100uus_addi 2, \tmp3, \tmp1, 0x222101uus_addi 3, \tmp4, \tmp1, 0x333102103uus_mtsr 0, \tmp1104uus_mtsr 1, \tmp2105uus_mtsr 2, \tmp3106uus_mtsr 3, \tmp4107108uus_addi 4, \tmp1, \tmp1, 0x444109uus_addi 5, \tmp2, \tmp2, 0x444110uus_addi 6, \tmp3, \tmp3, 0x444111uus_addi 7, \tmp4, \tmp4, 0x444112113uus_mtsr 4, \tmp1114uus_mtsr 5, \tmp2115uus_mtsr 6, \tmp3116uus_mtsr 7, \tmp4117118uus_addi 8, \tmp1, \tmp1, 0x444119uus_addi 9, \tmp2, \tmp2, 0x444120uus_addi 10, \tmp3, \tmp3, 0x444121uus_addi 11, \tmp4, \tmp4, 0x444122123uus_mtsr 8, \tmp1124uus_mtsr 9, \tmp2125uus_mtsr 10, \tmp3126uus_mtsr 11, \tmp4127128uus_addi 12, \tmp1, \tmp1, 0x444129uus_addi 13, \tmp2, \tmp2, 0x444130uus_addi 14, \tmp3, \tmp3, 0x444131uus_addi 15, \tmp4, \tmp4, 0x444132133uus_mtsr 12, \tmp1134uus_mtsr 13, \tmp2135uus_mtsr 14, \tmp3136uus_mtsr 15, \tmp4137138uus_isync139.endm140141#else142143/*144* This macro defines the mapping from contexts to VSIDs (virtual145* segment IDs). We use a skew on both the context and the high 4 bits146* of the 32-bit virtual address (the "effective segment ID") in order147* to spread out the entries in the MMU hash table. Note, if this148* function is changed then hash functions will have to be149* changed to correspond.150*/151#define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff)152153/*154* Hardware Page Table Entry155* Note that the xpn and x bitfields are used only by processors that156* support extended addressing; otherwise, those bits are reserved.157*/158struct hash_pte {159unsigned long v:1; /* Entry is valid */160unsigned long vsid:24; /* Virtual segment identifier */161unsigned long h:1; /* Hash algorithm indicator */162unsigned long api:6; /* Abbreviated page index */163unsigned long rpn:20; /* Real (physical) page number */164unsigned long xpn:3; /* Real page number bits 0-2, optional */165unsigned long r:1; /* Referenced */166unsigned long c:1; /* Changed */167unsigned long w:1; /* Write-thru cache mode */168unsigned long i:1; /* Cache inhibited */169unsigned long m:1; /* Memory coherence */170unsigned long g:1; /* Guarded */171unsigned long x:1; /* Real page number bit 3, optional */172unsigned long pp:2; /* Page protection */173};174175typedef struct {176unsigned long id;177unsigned long sr0;178void __user *vdso;179} mm_context_t;180181#ifdef CONFIG_PPC_KUEP182#define INIT_MM_CONTEXT(mm) .context.sr0 = SR_NX183#endif184185void update_bats(void);186static inline void cleanup_cpu_mmu_context(void) { }187188/* patch sites */189extern s32 patch__hash_page_A0, patch__hash_page_A1, patch__hash_page_A2;190extern s32 patch__hash_page_B, patch__hash_page_C;191extern s32 patch__flush_hash_A0, patch__flush_hash_A1, patch__flush_hash_A2;192extern s32 patch__flush_hash_B;193194#include <asm/reg.h>195#include <asm/task_size_32.h>196197static __always_inline void update_user_segment(u32 n, u32 val)198{199if (n << 28 < TASK_SIZE)200mtsr(val + n * 0x111, n << 28);201}202203static __always_inline void update_user_segments(u32 val)204{205val &= 0xf0ffffff;206207update_user_segment(0, val);208update_user_segment(1, val);209update_user_segment(2, val);210update_user_segment(3, val);211update_user_segment(4, val);212update_user_segment(5, val);213update_user_segment(6, val);214update_user_segment(7, val);215update_user_segment(8, val);216update_user_segment(9, val);217update_user_segment(10, val);218update_user_segment(11, val);219update_user_segment(12, val);220update_user_segment(13, val);221update_user_segment(14, val);222update_user_segment(15, val);223}224225int __init find_free_bat(void);226unsigned int bat_block_size(unsigned long base, unsigned long top);227#endif /* !__ASSEMBLY__ */228229/* We happily ignore the smaller BATs on 601, we don't actually use230* those definitions on hash32 at the moment anyway231*/232#define mmu_virtual_psize MMU_PAGE_4K233#define mmu_linear_psize MMU_PAGE_256M234235#endif /* _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_ */236237238