Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/loongarch/kernel/acpi.c
26442 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* acpi.c - Architecture-Specific Low-Level ACPI Boot Support
4
*
5
* Author: Jianmin Lv <[email protected]>
6
* Huacai Chen <[email protected]>
7
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
8
*/
9
10
#include <linux/init.h>
11
#include <linux/acpi.h>
12
#include <linux/efi-bgrt.h>
13
#include <linux/export.h>
14
#include <linux/irq.h>
15
#include <linux/irqdomain.h>
16
#include <linux/memblock.h>
17
#include <linux/of_fdt.h>
18
#include <linux/serial_core.h>
19
#include <asm/io.h>
20
#include <asm/numa.h>
21
#include <asm/loongson.h>
22
23
int acpi_disabled;
24
EXPORT_SYMBOL(acpi_disabled);
25
int acpi_noirq;
26
int acpi_pci_disabled;
27
EXPORT_SYMBOL(acpi_pci_disabled);
28
int acpi_strict = 1; /* We have no workarounds on LoongArch */
29
int num_processors;
30
int disabled_cpus;
31
32
u64 acpi_saved_sp;
33
34
#define PREFIX "ACPI: "
35
36
struct acpi_madt_core_pic acpi_core_pic[MAX_CORE_PIC];
37
38
void __init __iomem * __acpi_map_table(unsigned long phys, unsigned long size)
39
{
40
41
if (!phys || !size)
42
return NULL;
43
44
return early_memremap(phys, size);
45
}
46
void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
47
{
48
if (!map || !size)
49
return;
50
51
early_memunmap(map, size);
52
}
53
54
void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
55
{
56
if (!memblock_is_memory(phys))
57
return ioremap(phys, size);
58
else
59
return ioremap_cache(phys, size);
60
}
61
62
#ifdef CONFIG_SMP
63
static int set_processor_mask(u32 id, u32 pass)
64
{
65
int cpu = -1, cpuid = id;
66
67
if (num_processors >= NR_CPUS) {
68
pr_warn(PREFIX "nr_cpus limit of %i reached."
69
" processor 0x%x ignored.\n", NR_CPUS, cpuid);
70
71
return -ENODEV;
72
73
}
74
75
if (cpuid == loongson_sysconf.boot_cpu_id)
76
cpu = 0;
77
78
switch (pass) {
79
case 1: /* Pass 1 handle enabled processors */
80
if (cpu < 0)
81
cpu = find_first_zero_bit(cpumask_bits(cpu_present_mask), NR_CPUS);
82
num_processors++;
83
set_cpu_present(cpu, true);
84
break;
85
case 2: /* Pass 2 handle disabled processors */
86
if (cpu < 0)
87
cpu = find_first_zero_bit(cpumask_bits(cpu_possible_mask), NR_CPUS);
88
disabled_cpus++;
89
break;
90
default:
91
return cpu;
92
}
93
94
set_cpu_possible(cpu, true);
95
__cpu_number_map[cpuid] = cpu;
96
__cpu_logical_map[cpu] = cpuid;
97
98
return cpu;
99
}
100
#endif
101
102
static int __init
103
acpi_parse_p1_processor(union acpi_subtable_headers *header, const unsigned long end)
104
{
105
struct acpi_madt_core_pic *processor = NULL;
106
107
processor = (struct acpi_madt_core_pic *)header;
108
if (BAD_MADT_ENTRY(processor, end))
109
return -EINVAL;
110
111
acpi_table_print_madt_entry(&header->common);
112
#ifdef CONFIG_SMP
113
acpi_core_pic[processor->core_id] = *processor;
114
if (processor->flags & ACPI_MADT_ENABLED)
115
set_processor_mask(processor->core_id, 1);
116
#endif
117
118
return 0;
119
}
120
121
static int __init
122
acpi_parse_p2_processor(union acpi_subtable_headers *header, const unsigned long end)
123
{
124
struct acpi_madt_core_pic *processor = NULL;
125
126
processor = (struct acpi_madt_core_pic *)header;
127
if (BAD_MADT_ENTRY(processor, end))
128
return -EINVAL;
129
130
#ifdef CONFIG_SMP
131
if (!(processor->flags & ACPI_MADT_ENABLED))
132
set_processor_mask(processor->core_id, 2);
133
#endif
134
135
return 0;
136
}
137
static int __init
138
acpi_parse_eio_master(union acpi_subtable_headers *header, const unsigned long end)
139
{
140
static int core = 0;
141
struct acpi_madt_eio_pic *eiointc = NULL;
142
143
eiointc = (struct acpi_madt_eio_pic *)header;
144
if (BAD_MADT_ENTRY(eiointc, end))
145
return -EINVAL;
146
147
core = eiointc->node * CORES_PER_EIO_NODE;
148
set_bit(core, loongson_sysconf.cores_io_master);
149
150
return 0;
151
}
152
153
static void __init acpi_process_madt(void)
154
{
155
#ifdef CONFIG_SMP
156
int i;
157
158
for (i = 0; i < NR_CPUS; i++) {
159
__cpu_number_map[i] = -1;
160
__cpu_logical_map[i] = -1;
161
}
162
#endif
163
acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC,
164
acpi_parse_p1_processor, MAX_CORE_PIC);
165
166
acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC,
167
acpi_parse_p2_processor, MAX_CORE_PIC);
168
169
acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC,
170
acpi_parse_eio_master, MAX_IO_PICS);
171
172
loongson_sysconf.nr_cpus = num_processors;
173
}
174
175
int pptt_enabled;
176
177
int __init parse_acpi_topology(void)
178
{
179
int cpu, topology_id;
180
181
for_each_possible_cpu(cpu) {
182
topology_id = find_acpi_cpu_topology(cpu, 0);
183
if (topology_id < 0) {
184
pr_warn("Invalid BIOS PPTT\n");
185
return -ENOENT;
186
}
187
188
if (acpi_pptt_cpu_is_thread(cpu) <= 0)
189
cpu_data[cpu].core = topology_id;
190
else {
191
topology_id = find_acpi_cpu_topology(cpu, 1);
192
if (topology_id < 0)
193
return -ENOENT;
194
195
cpu_data[cpu].core = topology_id;
196
}
197
}
198
199
pptt_enabled = 1;
200
201
return 0;
202
}
203
204
#ifndef CONFIG_SUSPEND
205
int (*acpi_suspend_lowlevel)(void);
206
#else
207
int (*acpi_suspend_lowlevel)(void) = loongarch_acpi_suspend;
208
#endif
209
210
void __init acpi_boot_table_init(void)
211
{
212
/*
213
* If acpi_disabled, bail out
214
*/
215
if (acpi_disabled)
216
goto fdt_earlycon;
217
218
/*
219
* Initialize the ACPI boot-time table parser.
220
*/
221
if (acpi_table_init()) {
222
disable_acpi();
223
goto fdt_earlycon;
224
}
225
226
loongson_sysconf.boot_cpu_id = read_csr_cpuid();
227
228
/*
229
* Process the Multiple APIC Description Table (MADT), if present
230
*/
231
acpi_process_madt();
232
233
/* Do not enable ACPI SPCR console by default */
234
acpi_parse_spcr(earlycon_acpi_spcr_enable, false);
235
236
if (IS_ENABLED(CONFIG_ACPI_BGRT))
237
acpi_table_parse(ACPI_SIG_BGRT, acpi_parse_bgrt);
238
239
return;
240
241
fdt_earlycon:
242
if (earlycon_acpi_spcr_enable)
243
early_init_dt_scan_chosen_stdout();
244
}
245
246
#ifdef CONFIG_ACPI_NUMA
247
248
/* Callback for Proximity Domain -> CPUID mapping */
249
void __init
250
acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
251
{
252
int pxm, node;
253
254
if (srat_disabled())
255
return;
256
if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
257
bad_srat();
258
return;
259
}
260
if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
261
return;
262
pxm = pa->proximity_domain_lo;
263
if (acpi_srat_revision >= 2) {
264
pxm |= (pa->proximity_domain_hi[0] << 8);
265
pxm |= (pa->proximity_domain_hi[1] << 16);
266
pxm |= (pa->proximity_domain_hi[2] << 24);
267
}
268
node = acpi_map_pxm_to_node(pxm);
269
if (node < 0) {
270
pr_err("SRAT: Too many proximity domains %x\n", pxm);
271
bad_srat();
272
return;
273
}
274
275
if (pa->apic_id >= CONFIG_NR_CPUS) {
276
pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u skipped apicid that is too big\n",
277
pxm, pa->apic_id, node);
278
return;
279
}
280
281
early_numa_add_cpu(pa->apic_id, node);
282
283
set_cpuid_to_node(pa->apic_id, node);
284
node_set(node, numa_nodes_parsed);
285
pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u\n", pxm, pa->apic_id, node);
286
}
287
288
void __init
289
acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
290
{
291
int pxm, node;
292
293
if (srat_disabled())
294
return;
295
if (pa->header.length < sizeof(struct acpi_srat_x2apic_cpu_affinity)) {
296
bad_srat();
297
return;
298
}
299
if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
300
return;
301
pxm = pa->proximity_domain;
302
node = acpi_map_pxm_to_node(pxm);
303
if (node < 0) {
304
pr_err("SRAT: Too many proximity domains %x\n", pxm);
305
bad_srat();
306
return;
307
}
308
309
if (pa->apic_id >= CONFIG_NR_CPUS) {
310
pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u skipped apicid that is too big\n",
311
pxm, pa->apic_id, node);
312
return;
313
}
314
315
early_numa_add_cpu(pa->apic_id, node);
316
317
set_cpuid_to_node(pa->apic_id, node);
318
node_set(node, numa_nodes_parsed);
319
pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u\n", pxm, pa->apic_id, node);
320
}
321
322
#endif
323
324
void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size)
325
{
326
memblock_reserve(addr, size);
327
}
328
329
#ifdef CONFIG_ACPI_HOTPLUG_CPU
330
331
#include <acpi/processor.h>
332
333
static int __ref acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
334
{
335
#ifdef CONFIG_ACPI_NUMA
336
int nid;
337
338
nid = acpi_get_node(handle);
339
340
if (nid != NUMA_NO_NODE)
341
nid = early_cpu_to_node(cpu);
342
343
if (nid != NUMA_NO_NODE) {
344
set_cpuid_to_node(physid, nid);
345
node_set(nid, numa_nodes_parsed);
346
set_cpu_numa_node(cpu, nid);
347
cpumask_set_cpu(cpu, cpumask_of_node(nid));
348
}
349
#endif
350
return 0;
351
}
352
353
int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu)
354
{
355
int cpu;
356
357
cpu = cpu_number_map(physid);
358
if (cpu < 0 || cpu >= nr_cpu_ids) {
359
pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
360
return -ERANGE;
361
}
362
363
num_processors++;
364
set_cpu_present(cpu, true);
365
acpi_map_cpu2node(handle, cpu, physid);
366
367
*pcpu = cpu;
368
369
return 0;
370
}
371
EXPORT_SYMBOL(acpi_map_cpu);
372
373
int acpi_unmap_cpu(int cpu)
374
{
375
#ifdef CONFIG_ACPI_NUMA
376
set_cpuid_to_node(cpu_logical_map(cpu), NUMA_NO_NODE);
377
#endif
378
set_cpu_present(cpu, false);
379
num_processors--;
380
381
pr_info("cpu%d hot remove!\n", cpu);
382
383
return 0;
384
}
385
EXPORT_SYMBOL(acpi_unmap_cpu);
386
387
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
388
389