Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
CTCaer
GitHub Repository: CTCaer/hekate
Path: blob/master/bdk/mem/smmu.c
3694 views
1
/*
2
* Copyright (c) 2018 naehrwert
3
* Copyright (c) 2018 balika011
4
* Copyright (c) 2018-2025 CTCaer
5
*
6
* This program is free software; you can redistribute it and/or modify it
7
* under the terms and conditions of the GNU General Public License,
8
* version 2, as published by the Free Software Foundation.
9
*
10
* This program is distributed in the hope it will be useful, but WITHOUT
11
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13
* more details.
14
*
15
* You should have received a copy of the GNU General Public License
16
* along with this program. If not, see <http://www.gnu.org/licenses/>.
17
*/
18
19
#include <string.h>
20
21
#include <soc/bpmp.h>
22
#include <soc/ccplex.h>
23
#include <soc/timer.h>
24
#include <soc/t210.h>
25
#include <mem/mc_t210.h>
26
#include <mem/smmu.h>
27
#include <memory_map.h>
28
29
/*! SMMU register defines */
30
#define SMMU_ASID(asid) (((asid) << 24u) | ((asid) << 16u) | ((asid) << 8u) | (asid))
31
#define SMMU_ENABLE BIT(31)
32
#define SMMU_TLB_ACTIVE_LINES(l) ((l) << 0u)
33
#define SMMU_TLB_RR_ARBITRATION BIT(28)
34
#define SMMU_TLB_HIT_UNDER_MISS BIT(29)
35
#define SMMU_TLB_STATS_ENABLE BIT(31)
36
#define SMUU_PTC_INDEX_MAP(m) ((m) << 0u)
37
#define SMUU_PTC_LINE_MASK(m) ((m) << 8u)
38
#define SMUU_PTC_REQ_LIMIT(l) ((l) << 24u)
39
#define SMUU_PTC_CACHE_ENABLE BIT(29)
40
#define SMUU_PTC_STATS_ENABLE BIT(31)
41
42
/*! Page table defines */
43
#define SMMU_4MB_REGION 0
44
#define SMMU_PAGE_TABLE 1
45
#define SMMU_PDIR_COUNT 1024
46
#define SMMU_PTBL_COUNT 1024
47
#define SMMU_PAGE_SHIFT 12u
48
#define SMMU_PTN_SHIFT SMMU_PAGE_SHIFT
49
#define SMMU_PDN_SHIFT 22u
50
#define SMMU_ADDR_TO_PFN(addr) ((addr) >> SMMU_PAGE_SHIFT)
51
#define SMMU_ADDR_TO_PTN(addr) ((addr) >> SMMU_PTN_SHIFT)
52
#define SMMU_ADDR_TO_PDN(addr) ((addr) >> SMMU_PDN_SHIFT)
53
#define SMMU_PTN_TO_ADDR(ptn) ((ptn) << SMMU_PTN_SHIFT)
54
#define SMMU_PDN_TO_ADDR(pdn) ((pdn) << SMMU_PDN_SHIFT)
55
#define SMMU_PTB(page, attr) (((attr) << 29u) | ((page) >> SMMU_PAGE_SHIFT))
56
57
#define SMMU_PAYLOAD_EN_SHIFT 4
58
#define SMMU_PAYLOAD_EN_SET 0x20
59
#define SMMU_PAYLOAD_EN_UNSET 0x00
60
61
// Enabling SMMU requires a TZ (EL3) secure write. MC(MC_SMMU_CONFIG) = 1;
62
static u8 smmu_enable_payload[] = {
63
0xC1, 0x00, 0x00, 0x18, // 0x00: LDR W1, =0x70019010
64
0x20, 0x00, 0x80, 0xD2, // 0x04: MOV X0, #0x1
65
0x20, 0x00, 0x00, 0xB9, // 0x08: STR W0, [X1]
66
0x1F, 0x71, 0x08, 0xD5, // 0x0C: IC IALLUIS
67
0x9F, 0x3B, 0x03, 0xD5, // 0x10: DSB ISH
68
0xFE, 0xFF, 0xFF, 0x17, // 0x14: B loop
69
0x10, 0x90, 0x01, 0x70, // 0x18: MC_SMMU_CONFIG
70
};
71
72
static void *smmu_heap = (void *)SMMU_HEAP_ADDR;
73
static bool smmu_enabled = false;
74
75
void *smmu_page_zalloc(u32 num)
76
{
77
void *page = smmu_heap;
78
memset(page, 0, SZ_PAGE * num);
79
80
smmu_heap += SZ_PAGE * num;
81
82
return page;
83
}
84
85
static pde_t *_smmu_pdir_alloc()
86
{
87
pde_t *pdir = (pde_t *)smmu_page_zalloc(1);
88
89
// Initialize pdes with no permissions.
90
for (u32 pdn = 0; pdn < SMMU_PDIR_COUNT; pdn++)
91
pdir[pdn].huge.page = pdn;
92
93
return pdir;
94
}
95
96
static void _smmu_flush_regs()
97
{
98
(void)MC(MC_SMMU_PTB_DATA);
99
}
100
101
void smmu_flush_all()
102
{
103
// Flush the entire page table cache.
104
MC(MC_SMMU_PTC_FLUSH) = 0;
105
_smmu_flush_regs();
106
107
// Flush the entire table.
108
MC(MC_SMMU_TLB_FLUSH) = 0;
109
_smmu_flush_regs();
110
}
111
112
void smmu_init()
113
{
114
MC(MC_SMMU_PTB_ASID) = 0;
115
MC(MC_SMMU_PTB_DATA) = 0;
116
MC(MC_SMMU_TLB_CONFIG) = SMMU_TLB_HIT_UNDER_MISS | SMMU_TLB_RR_ARBITRATION | SMMU_TLB_ACTIVE_LINES(48);
117
MC(MC_SMMU_PTC_CONFIG) = SMUU_PTC_CACHE_ENABLE | SMUU_PTC_REQ_LIMIT(8) | SMUU_PTC_LINE_MASK(0xF) | SMUU_PTC_INDEX_MAP(0x3F);
118
MC(MC_SMMU_PTC_FLUSH) = 0;
119
MC(MC_SMMU_TLB_FLUSH) = 0;
120
}
121
122
void smmu_enable()
123
{
124
if (smmu_enabled)
125
return;
126
127
// Launch payload on CCPLEX in order to set SMMU enable bit.
128
ccplex_boot_cpu0((u32)smmu_enable_payload, false);
129
msleep(100);
130
ccplex_powergate_cpu0();
131
132
smmu_flush_all();
133
134
smmu_enabled = true;
135
}
136
137
void smmu_disable()
138
{
139
if (!smmu_enabled)
140
return;
141
142
// Set payload to disable SMMU.
143
smmu_enable_payload[SMMU_PAYLOAD_EN_SHIFT] = SMMU_PAYLOAD_EN_UNSET;
144
145
smmu_flush_all();
146
bpmp_mmu_maintenance(BPMP_MMU_MAINT_CLN_INV_WAY, false);
147
148
// Launch payload on CCPLEX in order to set SMMU enable bit.
149
ccplex_boot_cpu0((u32)smmu_enable_payload, false);
150
msleep(100);
151
ccplex_powergate_cpu0();
152
153
smmu_flush_all();
154
155
// Restore payload to SMMU enable.
156
smmu_enable_payload[SMMU_PAYLOAD_EN_SHIFT] = SMMU_PAYLOAD_EN_SET;
157
158
smmu_enabled = false;
159
smmu_heap = (void *)SMMU_HEAP_ADDR;
160
}
161
162
void smmu_reset_heap()
163
{
164
smmu_heap = (void *)SMMU_HEAP_ADDR;
165
}
166
167
void *smmu_domain_init(u32 dev_base, u32 asid)
168
{
169
void *ptb = _smmu_pdir_alloc();
170
171
MC(MC_SMMU_PTB_ASID) = asid;
172
MC(MC_SMMU_PTB_DATA) = SMMU_PTB((u32)ptb, SMMU_ATTR_ALL);
173
_smmu_flush_regs();
174
175
// Use the same macro for both quad and single domains. Reserved bits are not set anyway.
176
MC(dev_base) = SMMU_ENABLE | SMMU_ASID(asid);
177
_smmu_flush_regs();
178
179
return ptb;
180
}
181
182
void smmu_domain_deinit(u32 dev_base, u32 asid)
183
{
184
MC(MC_SMMU_PTB_ASID) = asid;
185
MC(MC_SMMU_PTB_DATA) = 0;
186
MC(dev_base) = 0;
187
_smmu_flush_regs();
188
}
189
190
void smmu_domain_bypass(u32 dev_base, bool bypass)
191
{
192
if (bypass)
193
{
194
smmu_flush_all();
195
bpmp_mmu_maintenance(BPMP_MMU_MAINT_CLN_INV_WAY, false);
196
MC(dev_base) &= ~SMMU_ENABLE;
197
}
198
else
199
{
200
bpmp_mmu_maintenance(BPMP_MMU_MAINT_CLN_INV_WAY, false);
201
MC(dev_base) |= SMMU_ENABLE;
202
smmu_flush_all();
203
}
204
_smmu_flush_regs();
205
}
206
207
static pte_t *_smmu_get_pte(pde_t *pdir, u32 iova)
208
{
209
u32 pdn = SMMU_ADDR_TO_PDN(iova);
210
pte_t *ptbl;
211
212
// Get 4MB page table or initialize one.
213
if (pdir[pdn].tbl.attr)
214
ptbl = (pte_t *)(SMMU_PTN_TO_ADDR(pdir[pdn].tbl.table));
215
else
216
{
217
// Allocate page table.
218
ptbl = (pte_t *)smmu_page_zalloc(1);
219
220
// Get address.
221
u32 addr = SMMU_PDN_TO_ADDR(pdn);
222
223
// Initialize page table with no permissions.
224
for (u32 pn = 0; pn < SMMU_PTBL_COUNT; pn++, addr += SZ_PAGE)
225
ptbl[pn].page = SMMU_ADDR_TO_PFN(addr);
226
227
// Set page table to the page directory.
228
pdir[pdn].tbl.table = SMMU_ADDR_TO_PTN((u32)ptbl);
229
pdir[pdn].tbl.next = SMMU_PAGE_TABLE;
230
pdir[pdn].tbl.attr = SMMU_ATTR_ALL;
231
232
smmu_flush_all();
233
}
234
235
return &ptbl[SMMU_ADDR_TO_PTN(iova) % SMMU_PTBL_COUNT];
236
}
237
238
void smmu_map(void *ptb, u32 iova, u64 iopa, u32 pages, u32 attr)
239
{
240
// Map pages to page table entries. VA/PA should be aligned to 4KB.
241
for (u32 i = 0; i < pages; i++)
242
{
243
pte_t *pte = _smmu_get_pte((pde_t *)ptb, iova);
244
245
pte->page = SMMU_ADDR_TO_PFN(iopa);
246
pte->attr = attr;
247
248
iova += SZ_PAGE;
249
iopa += SZ_PAGE;
250
}
251
252
smmu_flush_all();
253
}
254
255
void smmu_map_huge(void *ptb, u32 iova, u64 iopa, u32 regions, u32 attr)
256
{
257
pde_t *pdir = (pde_t *)ptb;
258
259
// Map 4MB regions to page directory entries. VA/PA should be aligned to 4MB.
260
for (u32 i = 0; i < regions; i++)
261
{
262
u32 pdn = SMMU_ADDR_TO_PDN(iova);
263
pdir[pdn].huge.page = SMMU_ADDR_TO_PDN(iopa);
264
pdir[pdn].huge.next = SMMU_4MB_REGION;
265
pdir[pdn].huge.attr = attr;
266
267
iova += SZ_4M;
268
iopa += SZ_4M;
269
}
270
271
smmu_flush_all();
272
}
273
274