Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/parisc/mm/hugetlbpage.c
26288 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* PARISC64 Huge TLB page support.
4
*
5
* This parisc implementation is heavily based on the SPARC and x86 code.
6
*
7
* Copyright (C) 2015 Helge Deller <[email protected]>
8
*/
9
10
#include <linux/fs.h>
11
#include <linux/mm.h>
12
#include <linux/sched/mm.h>
13
#include <linux/hugetlb.h>
14
#include <linux/pagemap.h>
15
#include <linux/sysctl.h>
16
17
#include <asm/mman.h>
18
#include <asm/tlb.h>
19
#include <asm/tlbflush.h>
20
#include <asm/cacheflush.h>
21
#include <asm/mmu_context.h>
22
23
24
25
26
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
27
unsigned long addr, unsigned long sz)
28
{
29
pgd_t *pgd;
30
p4d_t *p4d;
31
pud_t *pud;
32
pmd_t *pmd;
33
pte_t *pte = NULL;
34
35
/* We must align the address, because our caller will run
36
* set_huge_pte_at() on whatever we return, which writes out
37
* all of the sub-ptes for the hugepage range. So we have
38
* to give it the first such sub-pte.
39
*/
40
addr &= HPAGE_MASK;
41
42
pgd = pgd_offset(mm, addr);
43
p4d = p4d_offset(pgd, addr);
44
pud = pud_alloc(mm, p4d, addr);
45
if (pud) {
46
pmd = pmd_alloc(mm, pud, addr);
47
if (pmd)
48
pte = pte_alloc_huge(mm, pmd, addr);
49
}
50
return pte;
51
}
52
53
pte_t *huge_pte_offset(struct mm_struct *mm,
54
unsigned long addr, unsigned long sz)
55
{
56
pgd_t *pgd;
57
p4d_t *p4d;
58
pud_t *pud;
59
pmd_t *pmd;
60
pte_t *pte = NULL;
61
62
addr &= HPAGE_MASK;
63
64
pgd = pgd_offset(mm, addr);
65
if (!pgd_none(*pgd)) {
66
p4d = p4d_offset(pgd, addr);
67
if (!p4d_none(*p4d)) {
68
pud = pud_offset(p4d, addr);
69
if (!pud_none(*pud)) {
70
pmd = pmd_offset(pud, addr);
71
if (!pmd_none(*pmd))
72
pte = pte_offset_huge(pmd, addr);
73
}
74
}
75
}
76
return pte;
77
}
78
79
/* Purge data and instruction TLB entries. Must be called holding
80
* the pa_tlb_lock. The TLB purge instructions are slow on SMP
81
* machines since the purge must be broadcast to all CPUs.
82
*/
83
static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr)
84
{
85
int i;
86
87
/* We may use multiple physical huge pages (e.g. 2x1 MB) to emulate
88
* Linux standard huge pages (e.g. 2 MB) */
89
BUILD_BUG_ON(REAL_HPAGE_SHIFT > HPAGE_SHIFT);
90
91
addr &= HPAGE_MASK;
92
addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT;
93
94
for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) {
95
purge_tlb_entries(mm, addr);
96
addr += (1UL << REAL_HPAGE_SHIFT);
97
}
98
}
99
100
/* __set_huge_pte_at() must be called holding the pa_tlb_lock. */
101
static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
102
pte_t *ptep, pte_t entry)
103
{
104
unsigned long addr_start;
105
int i;
106
107
addr &= HPAGE_MASK;
108
addr_start = addr;
109
110
for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
111
set_pte(ptep, entry);
112
ptep++;
113
114
addr += PAGE_SIZE;
115
pte_val(entry) += PAGE_SIZE;
116
}
117
118
purge_tlb_entries_huge(mm, addr_start);
119
}
120
121
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
122
pte_t *ptep, pte_t entry, unsigned long sz)
123
{
124
__set_huge_pte_at(mm, addr, ptep, entry);
125
}
126
127
128
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
129
pte_t *ptep, unsigned long sz)
130
{
131
pte_t entry;
132
133
entry = *ptep;
134
__set_huge_pte_at(mm, addr, ptep, __pte(0));
135
136
return entry;
137
}
138
139
140
void huge_ptep_set_wrprotect(struct mm_struct *mm,
141
unsigned long addr, pte_t *ptep)
142
{
143
pte_t old_pte;
144
145
old_pte = *ptep;
146
__set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
147
}
148
149
int huge_ptep_set_access_flags(struct vm_area_struct *vma,
150
unsigned long addr, pte_t *ptep,
151
pte_t pte, int dirty)
152
{
153
int changed;
154
struct mm_struct *mm = vma->vm_mm;
155
156
changed = !pte_same(*ptep, pte);
157
if (changed) {
158
__set_huge_pte_at(mm, addr, ptep, pte);
159
}
160
return changed;
161
}
162
163