Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm64/kernel/hibernate.c
26439 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*:
3
* Hibernate support specific for ARM64
4
*
5
* Derived from work on ARM hibernation support by:
6
*
7
* Ubuntu project, hibernation support for mach-dove
8
* Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu)
9
* Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.)
10
* Copyright (C) 2006 Rafael J. Wysocki <[email protected]>
11
*/
12
#define pr_fmt(x) "hibernate: " x
13
#include <linux/cpu.h>
14
#include <linux/kvm_host.h>
15
#include <linux/pm.h>
16
#include <linux/sched.h>
17
#include <linux/suspend.h>
18
#include <linux/utsname.h>
19
20
#include <asm/barrier.h>
21
#include <asm/cacheflush.h>
22
#include <asm/cputype.h>
23
#include <asm/daifflags.h>
24
#include <asm/irqflags.h>
25
#include <asm/kexec.h>
26
#include <asm/memory.h>
27
#include <asm/mmu_context.h>
28
#include <asm/mte.h>
29
#include <asm/sections.h>
30
#include <asm/smp.h>
31
#include <asm/smp_plat.h>
32
#include <asm/suspend.h>
33
#include <asm/sysreg.h>
34
#include <asm/trans_pgd.h>
35
#include <asm/virt.h>
36
37
/*
38
* Hibernate core relies on this value being 0 on resume, and marks it
39
* __nosavedata assuming it will keep the resume kernel's '0' value. This
40
* doesn't happen with either KASLR.
41
*
42
* defined as "__visible int in_suspend __nosavedata" in
43
* kernel/power/hibernate.c
44
*/
45
extern int in_suspend;
46
47
/* Do we need to reset el2? */
48
#define el2_reset_needed() (is_hyp_nvhe())
49
50
/* hyp-stub vectors, used to restore el2 during resume from hibernate. */
51
extern char __hyp_stub_vectors[];
52
53
/*
54
* The logical cpu number we should resume on, initialised to a non-cpu
55
* number.
56
*/
57
static int sleep_cpu = -EINVAL;
58
59
/*
60
* Values that may not change over hibernate/resume. We put the build number
61
* and date in here so that we guarantee not to resume with a different
62
* kernel.
63
*/
64
struct arch_hibernate_hdr_invariants {
65
char uts_version[__NEW_UTS_LEN + 1];
66
};
67
68
/* These values need to be know across a hibernate/restore. */
69
static struct arch_hibernate_hdr {
70
struct arch_hibernate_hdr_invariants invariants;
71
72
/* These are needed to find the relocated kernel if built with kaslr */
73
phys_addr_t ttbr1_el1;
74
void (*reenter_kernel)(void);
75
76
/*
77
* We need to know where the __hyp_stub_vectors are after restore to
78
* re-configure el2.
79
*/
80
phys_addr_t __hyp_stub_vectors;
81
82
u64 sleep_cpu_mpidr;
83
} resume_hdr;
84
85
static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i)
86
{
87
memset(i, 0, sizeof(*i));
88
memcpy(i->uts_version, init_utsname()->version, sizeof(i->uts_version));
89
}
90
91
int pfn_is_nosave(unsigned long pfn)
92
{
93
unsigned long nosave_begin_pfn = sym_to_pfn(&__nosave_begin);
94
unsigned long nosave_end_pfn = sym_to_pfn(&__nosave_end - 1);
95
96
return ((pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn)) ||
97
crash_is_nosave(pfn);
98
}
99
100
void notrace save_processor_state(void)
101
{
102
}
103
104
void notrace restore_processor_state(void)
105
{
106
}
107
108
int arch_hibernation_header_save(void *addr, unsigned int max_size)
109
{
110
struct arch_hibernate_hdr *hdr = addr;
111
112
if (max_size < sizeof(*hdr))
113
return -EOVERFLOW;
114
115
arch_hdr_invariants(&hdr->invariants);
116
hdr->ttbr1_el1 = __pa_symbol(swapper_pg_dir);
117
hdr->reenter_kernel = _cpu_resume;
118
119
/* We can't use __hyp_get_vectors() because kvm may still be loaded */
120
if (el2_reset_needed())
121
hdr->__hyp_stub_vectors = __pa_symbol(__hyp_stub_vectors);
122
else
123
hdr->__hyp_stub_vectors = 0;
124
125
/* Save the mpidr of the cpu we called cpu_suspend() on... */
126
if (sleep_cpu < 0) {
127
pr_err("Failing to hibernate on an unknown CPU.\n");
128
return -ENODEV;
129
}
130
hdr->sleep_cpu_mpidr = cpu_logical_map(sleep_cpu);
131
pr_info("Hibernating on CPU %d [mpidr:0x%llx]\n", sleep_cpu,
132
hdr->sleep_cpu_mpidr);
133
134
return 0;
135
}
136
EXPORT_SYMBOL(arch_hibernation_header_save);
137
138
int arch_hibernation_header_restore(void *addr)
139
{
140
int ret;
141
struct arch_hibernate_hdr_invariants invariants;
142
struct arch_hibernate_hdr *hdr = addr;
143
144
arch_hdr_invariants(&invariants);
145
if (memcmp(&hdr->invariants, &invariants, sizeof(invariants))) {
146
pr_crit("Hibernate image not generated by this kernel!\n");
147
return -EINVAL;
148
}
149
150
sleep_cpu = get_logical_index(hdr->sleep_cpu_mpidr);
151
pr_info("Hibernated on CPU %d [mpidr:0x%llx]\n", sleep_cpu,
152
hdr->sleep_cpu_mpidr);
153
if (sleep_cpu < 0) {
154
pr_crit("Hibernated on a CPU not known to this kernel!\n");
155
sleep_cpu = -EINVAL;
156
return -EINVAL;
157
}
158
159
ret = bringup_hibernate_cpu(sleep_cpu);
160
if (ret) {
161
sleep_cpu = -EINVAL;
162
return ret;
163
}
164
165
resume_hdr = *hdr;
166
167
return 0;
168
}
169
EXPORT_SYMBOL(arch_hibernation_header_restore);
170
171
static void *hibernate_page_alloc(void *arg)
172
{
173
return (void *)get_safe_page((__force gfp_t)(unsigned long)arg);
174
}
175
176
/*
177
* Copies length bytes, starting at src_start into an new page,
178
* perform cache maintenance, then maps it at the specified address low
179
* address as executable.
180
*
181
* This is used by hibernate to copy the code it needs to execute when
182
* overwriting the kernel text. This function generates a new set of page
183
* tables, which it loads into ttbr0.
184
*
185
* Length is provided as we probably only want 4K of data, even on a 64K
186
* page system.
187
*/
188
static int create_safe_exec_page(void *src_start, size_t length,
189
phys_addr_t *phys_dst_addr)
190
{
191
struct trans_pgd_info trans_info = {
192
.trans_alloc_page = hibernate_page_alloc,
193
.trans_alloc_arg = (__force void *)GFP_ATOMIC,
194
};
195
196
void *page = (void *)get_safe_page(GFP_ATOMIC);
197
phys_addr_t trans_ttbr0;
198
unsigned long t0sz;
199
int rc;
200
201
if (!page)
202
return -ENOMEM;
203
204
memcpy(page, src_start, length);
205
caches_clean_inval_pou((unsigned long)page, (unsigned long)page + length);
206
rc = trans_pgd_idmap_page(&trans_info, &trans_ttbr0, &t0sz, page);
207
if (rc)
208
return rc;
209
210
cpu_install_ttbr0(trans_ttbr0, t0sz);
211
*phys_dst_addr = virt_to_phys(page);
212
213
return 0;
214
}
215
216
#ifdef CONFIG_ARM64_MTE
217
218
static DEFINE_XARRAY(mte_pages);
219
220
static int save_tags(struct page *page, unsigned long pfn)
221
{
222
void *tag_storage, *ret;
223
224
tag_storage = mte_allocate_tag_storage();
225
if (!tag_storage)
226
return -ENOMEM;
227
228
mte_save_page_tags(page_address(page), tag_storage);
229
230
ret = xa_store(&mte_pages, pfn, tag_storage, GFP_KERNEL);
231
if (WARN(xa_is_err(ret), "Failed to store MTE tags")) {
232
mte_free_tag_storage(tag_storage);
233
return xa_err(ret);
234
} else if (WARN(ret, "swsusp: %s: Duplicate entry", __func__)) {
235
mte_free_tag_storage(ret);
236
}
237
238
return 0;
239
}
240
241
static void swsusp_mte_free_storage(void)
242
{
243
XA_STATE(xa_state, &mte_pages, 0);
244
void *tags;
245
246
xa_lock(&mte_pages);
247
xas_for_each(&xa_state, tags, ULONG_MAX) {
248
mte_free_tag_storage(tags);
249
}
250
xa_unlock(&mte_pages);
251
252
xa_destroy(&mte_pages);
253
}
254
255
static int swsusp_mte_save_tags(void)
256
{
257
struct zone *zone;
258
unsigned long pfn, max_zone_pfn;
259
int ret = 0;
260
int n = 0;
261
262
if (!system_supports_mte())
263
return 0;
264
265
for_each_populated_zone(zone) {
266
max_zone_pfn = zone_end_pfn(zone);
267
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
268
struct page *page = pfn_to_online_page(pfn);
269
struct folio *folio;
270
271
if (!page)
272
continue;
273
folio = page_folio(page);
274
275
if (folio_test_hugetlb(folio) &&
276
!folio_test_hugetlb_mte_tagged(folio))
277
continue;
278
279
if (!page_mte_tagged(page))
280
continue;
281
282
ret = save_tags(page, pfn);
283
if (ret) {
284
swsusp_mte_free_storage();
285
goto out;
286
}
287
288
n++;
289
}
290
}
291
pr_info("Saved %d MTE pages\n", n);
292
293
out:
294
return ret;
295
}
296
297
static void swsusp_mte_restore_tags(void)
298
{
299
XA_STATE(xa_state, &mte_pages, 0);
300
int n = 0;
301
void *tags;
302
303
xa_lock(&mte_pages);
304
xas_for_each(&xa_state, tags, ULONG_MAX) {
305
unsigned long pfn = xa_state.xa_index;
306
struct page *page = pfn_to_online_page(pfn);
307
308
mte_restore_page_tags(page_address(page), tags);
309
310
mte_free_tag_storage(tags);
311
n++;
312
}
313
xa_unlock(&mte_pages);
314
315
pr_info("Restored %d MTE pages\n", n);
316
317
xa_destroy(&mte_pages);
318
}
319
320
#else /* CONFIG_ARM64_MTE */
321
322
static int swsusp_mte_save_tags(void)
323
{
324
return 0;
325
}
326
327
static void swsusp_mte_restore_tags(void)
328
{
329
}
330
331
#endif /* CONFIG_ARM64_MTE */
332
333
int swsusp_arch_suspend(void)
334
{
335
int ret = 0;
336
unsigned long flags;
337
struct sleep_stack_data state;
338
339
if (cpus_are_stuck_in_kernel()) {
340
pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n");
341
return -EBUSY;
342
}
343
344
flags = local_daif_save();
345
346
if (__cpu_suspend_enter(&state)) {
347
/* make the crash dump kernel image visible/saveable */
348
crash_prepare_suspend();
349
350
ret = swsusp_mte_save_tags();
351
if (ret)
352
return ret;
353
354
sleep_cpu = smp_processor_id();
355
ret = swsusp_save();
356
} else {
357
/* Clean kernel core startup/idle code to PoC*/
358
dcache_clean_inval_poc((unsigned long)__mmuoff_data_start,
359
(unsigned long)__mmuoff_data_end);
360
dcache_clean_inval_poc((unsigned long)__idmap_text_start,
361
(unsigned long)__idmap_text_end);
362
363
/* Clean kvm setup code to PoC? */
364
if (el2_reset_needed()) {
365
dcache_clean_inval_poc(
366
(unsigned long)__hyp_idmap_text_start,
367
(unsigned long)__hyp_idmap_text_end);
368
dcache_clean_inval_poc((unsigned long)__hyp_text_start,
369
(unsigned long)__hyp_text_end);
370
}
371
372
swsusp_mte_restore_tags();
373
374
/* make the crash dump kernel image protected again */
375
crash_post_resume();
376
377
/*
378
* Tell the hibernation core that we've just restored
379
* the memory
380
*/
381
in_suspend = 0;
382
383
sleep_cpu = -EINVAL;
384
__cpu_suspend_exit();
385
386
/*
387
* Just in case the boot kernel did turn the SSBD
388
* mitigation off behind our back, let's set the state
389
* to what we expect it to be.
390
*/
391
spectre_v4_enable_mitigation(NULL);
392
}
393
394
local_daif_restore(flags);
395
396
return ret;
397
}
398
399
/*
400
* Setup then Resume from the hibernate image using swsusp_arch_suspend_exit().
401
*
402
* Memory allocated by get_safe_page() will be dealt with by the hibernate code,
403
* we don't need to free it here.
404
*/
405
int swsusp_arch_resume(void)
406
{
407
int rc;
408
void *zero_page;
409
size_t exit_size;
410
pgd_t *tmp_pg_dir;
411
phys_addr_t el2_vectors;
412
void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *,
413
void *, phys_addr_t, phys_addr_t);
414
struct trans_pgd_info trans_info = {
415
.trans_alloc_page = hibernate_page_alloc,
416
.trans_alloc_arg = (__force void *)GFP_ATOMIC,
417
};
418
419
/*
420
* Restoring the memory image will overwrite the ttbr1 page tables.
421
* Create a second copy of just the linear map, and use this when
422
* restoring.
423
*/
424
rc = trans_pgd_create_copy(&trans_info, &tmp_pg_dir, PAGE_OFFSET,
425
PAGE_END);
426
if (rc)
427
return rc;
428
429
/*
430
* We need a zero page that is zero before & after resume in order
431
* to break before make on the ttbr1 page tables.
432
*/
433
zero_page = (void *)get_safe_page(GFP_ATOMIC);
434
if (!zero_page) {
435
pr_err("Failed to allocate zero page.\n");
436
return -ENOMEM;
437
}
438
439
if (el2_reset_needed()) {
440
rc = trans_pgd_copy_el2_vectors(&trans_info, &el2_vectors);
441
if (rc) {
442
pr_err("Failed to setup el2 vectors\n");
443
return rc;
444
}
445
}
446
447
exit_size = __hibernate_exit_text_end - __hibernate_exit_text_start;
448
/*
449
* Copy swsusp_arch_suspend_exit() to a safe page. This will generate
450
* a new set of ttbr0 page tables and load them.
451
*/
452
rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size,
453
(phys_addr_t *)&hibernate_exit);
454
if (rc) {
455
pr_err("Failed to create safe executable page for hibernate_exit code.\n");
456
return rc;
457
}
458
459
/*
460
* KASLR will cause the el2 vectors to be in a different location in
461
* the resumed kernel. Load hibernate's temporary copy into el2.
462
*
463
* We can skip this step if we booted at EL1, or are running with VHE.
464
*/
465
if (el2_reset_needed())
466
__hyp_set_vectors(el2_vectors);
467
468
hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
469
resume_hdr.reenter_kernel, restore_pblist,
470
resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
471
472
return 0;
473
}
474
475
int hibernate_resume_nonboot_cpu_disable(void)
476
{
477
if (sleep_cpu < 0) {
478
pr_err("Failing to resume from hibernate on an unknown CPU.\n");
479
return -ENODEV;
480
}
481
482
return freeze_secondary_cpus(sleep_cpu);
483
}
484
485