Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/mips/kernel/machine_kexec.c
26424 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* machine_kexec.c for kexec
4
* Created by <[email protected]> on Thu Oct 12 15:15:06 2006
5
*/
6
#include <linux/compiler.h>
7
#include <linux/kexec.h>
8
#include <linux/mm.h>
9
#include <linux/delay.h>
10
#include <linux/libfdt.h>
11
#include <linux/reboot.h>
12
13
#include <asm/cacheflush.h>
14
#include <asm/page.h>
15
16
extern const unsigned char relocate_new_kernel[];
17
extern const size_t relocate_new_kernel_size;
18
19
extern unsigned long kexec_start_address;
20
extern unsigned long kexec_indirection_page;
21
22
static unsigned long reboot_code_buffer;
23
24
#ifdef CONFIG_SMP
25
static void (*relocated_kexec_smp_wait)(void *);
26
27
atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
28
void (*_crash_smp_send_stop)(void) = NULL;
29
#endif
30
31
void (*_machine_kexec_shutdown)(void) = NULL;
32
void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL;
33
34
static void kexec_image_info(const struct kimage *kimage)
35
{
36
unsigned long i;
37
38
pr_debug("kexec kimage info:\n");
39
pr_debug(" type: %d\n", kimage->type);
40
pr_debug(" start: %lx\n", kimage->start);
41
pr_debug(" head: %lx\n", kimage->head);
42
pr_debug(" nr_segments: %lu\n", kimage->nr_segments);
43
44
for (i = 0; i < kimage->nr_segments; i++) {
45
pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
46
i,
47
kimage->segment[i].mem,
48
kimage->segment[i].mem + kimage->segment[i].memsz,
49
(unsigned long)kimage->segment[i].memsz,
50
(unsigned long)kimage->segment[i].memsz / PAGE_SIZE);
51
}
52
}
53
54
#ifdef CONFIG_UHI_BOOT
55
56
static int uhi_machine_kexec_prepare(struct kimage *kimage)
57
{
58
int i;
59
60
/*
61
* In case DTB file is not passed to the new kernel, a flat device
62
* tree will be created by kexec tool. It holds modified command
63
* line for the new kernel.
64
*/
65
for (i = 0; i < kimage->nr_segments; i++) {
66
struct fdt_header fdt;
67
68
if (kimage->segment[i].memsz <= sizeof(fdt))
69
continue;
70
71
if (copy_from_user(&fdt, kimage->segment[i].buf, sizeof(fdt)))
72
continue;
73
74
if (fdt_check_header(&fdt))
75
continue;
76
77
kexec_args[0] = -2;
78
kexec_args[1] = (unsigned long)
79
phys_to_virt((unsigned long)kimage->segment[i].mem);
80
break;
81
}
82
83
return 0;
84
}
85
86
int (*_machine_kexec_prepare)(struct kimage *) = uhi_machine_kexec_prepare;
87
88
#else
89
90
int (*_machine_kexec_prepare)(struct kimage *) = NULL;
91
92
#endif /* CONFIG_UHI_BOOT */
93
94
int
95
machine_kexec_prepare(struct kimage *kimage)
96
{
97
#ifdef CONFIG_SMP
98
if (!kexec_nonboot_cpu_func())
99
return -EINVAL;
100
#endif
101
102
kexec_image_info(kimage);
103
104
if (_machine_kexec_prepare)
105
return _machine_kexec_prepare(kimage);
106
107
return 0;
108
}
109
110
void
111
machine_kexec_cleanup(struct kimage *kimage)
112
{
113
}
114
115
#ifdef CONFIG_SMP
116
static void kexec_shutdown_secondary(void *param)
117
{
118
int cpu = smp_processor_id();
119
120
if (!cpu_online(cpu))
121
return;
122
123
/* We won't be sent IPIs any more. */
124
set_cpu_online(cpu, false);
125
126
local_irq_disable();
127
while (!atomic_read(&kexec_ready_to_reboot))
128
cpu_relax();
129
130
kexec_reboot();
131
132
/* NOTREACHED */
133
}
134
#endif
135
136
void
137
machine_shutdown(void)
138
{
139
if (_machine_kexec_shutdown)
140
_machine_kexec_shutdown();
141
142
#ifdef CONFIG_SMP
143
smp_call_function(kexec_shutdown_secondary, NULL, 0);
144
145
while (num_online_cpus() > 1) {
146
cpu_relax();
147
mdelay(1);
148
}
149
#endif
150
}
151
152
void
153
machine_crash_shutdown(struct pt_regs *regs)
154
{
155
if (_machine_crash_shutdown)
156
_machine_crash_shutdown(regs);
157
else
158
default_machine_crash_shutdown(regs);
159
}
160
161
#ifdef CONFIG_SMP
162
void kexec_nonboot_cpu_jump(void)
163
{
164
local_flush_icache_range((unsigned long)relocated_kexec_smp_wait,
165
reboot_code_buffer + relocate_new_kernel_size);
166
167
relocated_kexec_smp_wait(NULL);
168
}
169
#endif
170
171
void kexec_reboot(void)
172
{
173
void (*do_kexec)(void) __noreturn;
174
175
/*
176
* We know we were online, and there will be no incoming IPIs at
177
* this point. Mark online again before rebooting so that the crash
178
* analysis tool will see us correctly.
179
*/
180
set_cpu_online(smp_processor_id(), true);
181
182
/* Ensure remote CPUs observe that we're online before rebooting. */
183
smp_mb__after_atomic();
184
185
#ifdef CONFIG_SMP
186
if (smp_processor_id() > 0) {
187
/*
188
* Instead of cpu_relax() or wait, this is needed for kexec
189
* smp reboot. Kdump usually doesn't require an smp new
190
* kernel, but kexec may do.
191
*/
192
kexec_nonboot_cpu();
193
194
/* NOTREACHED */
195
}
196
#endif
197
198
/*
199
* Make sure we get correct instructions written by the
200
* machine_kexec() CPU.
201
*/
202
local_flush_icache_range(reboot_code_buffer,
203
reboot_code_buffer + relocate_new_kernel_size);
204
205
do_kexec = (void *)reboot_code_buffer;
206
do_kexec();
207
}
208
209
void
210
machine_kexec(struct kimage *image)
211
{
212
unsigned long entry;
213
unsigned long *ptr;
214
215
reboot_code_buffer =
216
(unsigned long)page_address(image->control_code_page);
217
218
kexec_start_address =
219
(unsigned long) phys_to_virt(image->start);
220
221
if (image->type == KEXEC_TYPE_DEFAULT) {
222
kexec_indirection_page =
223
(unsigned long) phys_to_virt(image->head & PAGE_MASK);
224
} else {
225
kexec_indirection_page = (unsigned long)&image->head;
226
}
227
228
memcpy((void*)reboot_code_buffer, relocate_new_kernel,
229
relocate_new_kernel_size);
230
231
/*
232
* The generic kexec code builds a page list with physical
233
* addresses. they are directly accessible through KSEG0 (or
234
* CKSEG0 or XPHYS if on 64bit system), hence the
235
* phys_to_virt() call.
236
*/
237
for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE);
238
ptr = (entry & IND_INDIRECTION) ?
239
phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
240
if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
241
*ptr & IND_DESTINATION)
242
*ptr = (unsigned long) phys_to_virt(*ptr);
243
}
244
245
/* Mark offline BEFORE disabling local irq. */
246
set_cpu_online(smp_processor_id(), false);
247
248
/*
249
* we do not want to be bothered.
250
*/
251
local_irq_disable();
252
253
printk("Will call new kernel at %08lx\n", image->start);
254
printk("Bye ...\n");
255
/* Make reboot code buffer available to the boot CPU. */
256
__flush_cache_all();
257
#ifdef CONFIG_SMP
258
/* All secondary cpus now may jump to kexec_wait cycle */
259
relocated_kexec_smp_wait = reboot_code_buffer +
260
(void *)(kexec_smp_wait - relocate_new_kernel);
261
smp_wmb();
262
atomic_set(&kexec_ready_to_reboot, 1);
263
#endif
264
kexec_reboot();
265
}
266
267