Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/acpi/processor_core.c
15109 views
1
/*
2
* Copyright (C) 2005 Intel Corporation
3
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
4
*
5
* Alex Chiang <[email protected]>
6
* - Unified x86/ia64 implementations
7
* Venkatesh Pallipadi <[email protected]>
8
* - Added _PDC for platforms with Intel CPUs
9
*/
10
#include <linux/dmi.h>
11
#include <linux/slab.h>
12
13
#include <acpi/acpi_drivers.h>
14
#include <acpi/processor.h>
15
16
#include "internal.h"
17
18
#define PREFIX "ACPI: "
19
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
20
ACPI_MODULE_NAME("processor_core");
21
22
static int __init set_no_mwait(const struct dmi_system_id *id)
23
{
24
printk(KERN_NOTICE PREFIX "%s detected - "
25
"disabling mwait for CPU C-states\n", id->ident);
26
boot_option_idle_override = IDLE_NOMWAIT;
27
return 0;
28
}
29
30
static struct dmi_system_id __initdata processor_idle_dmi_table[] = {
31
{
32
set_no_mwait, "Extensa 5220", {
33
DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
34
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
35
DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
36
DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL},
37
{},
38
};
39
40
static int map_lapic_id(struct acpi_subtable_header *entry,
41
u32 acpi_id, int *apic_id)
42
{
43
struct acpi_madt_local_apic *lapic =
44
(struct acpi_madt_local_apic *)entry;
45
46
if (!(lapic->lapic_flags & ACPI_MADT_ENABLED))
47
return 0;
48
49
if (lapic->processor_id != acpi_id)
50
return 0;
51
52
*apic_id = lapic->id;
53
return 1;
54
}
55
56
static int map_x2apic_id(struct acpi_subtable_header *entry,
57
int device_declaration, u32 acpi_id, int *apic_id)
58
{
59
struct acpi_madt_local_x2apic *apic =
60
(struct acpi_madt_local_x2apic *)entry;
61
62
if (!(apic->lapic_flags & ACPI_MADT_ENABLED))
63
return 0;
64
65
if (device_declaration && (apic->uid == acpi_id)) {
66
*apic_id = apic->local_apic_id;
67
return 1;
68
}
69
70
return 0;
71
}
72
73
static int map_lsapic_id(struct acpi_subtable_header *entry,
74
int device_declaration, u32 acpi_id, int *apic_id)
75
{
76
struct acpi_madt_local_sapic *lsapic =
77
(struct acpi_madt_local_sapic *)entry;
78
79
if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))
80
return 0;
81
82
if (device_declaration) {
83
if ((entry->length < 16) || (lsapic->uid != acpi_id))
84
return 0;
85
} else if (lsapic->processor_id != acpi_id)
86
return 0;
87
88
*apic_id = (lsapic->id << 8) | lsapic->eid;
89
return 1;
90
}
91
92
static int map_madt_entry(int type, u32 acpi_id)
93
{
94
unsigned long madt_end, entry;
95
static struct acpi_table_madt *madt;
96
static int read_madt;
97
int apic_id = -1;
98
99
if (!read_madt) {
100
if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
101
(struct acpi_table_header **)&madt)))
102
madt = NULL;
103
read_madt++;
104
}
105
106
if (!madt)
107
return apic_id;
108
109
entry = (unsigned long)madt;
110
madt_end = entry + madt->header.length;
111
112
/* Parse all entries looking for a match. */
113
114
entry += sizeof(struct acpi_table_madt);
115
while (entry + sizeof(struct acpi_subtable_header) < madt_end) {
116
struct acpi_subtable_header *header =
117
(struct acpi_subtable_header *)entry;
118
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
119
if (map_lapic_id(header, acpi_id, &apic_id))
120
break;
121
} else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
122
if (map_x2apic_id(header, type, acpi_id, &apic_id))
123
break;
124
} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
125
if (map_lsapic_id(header, type, acpi_id, &apic_id))
126
break;
127
}
128
entry += header->length;
129
}
130
return apic_id;
131
}
132
133
static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
134
{
135
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
136
union acpi_object *obj;
137
struct acpi_subtable_header *header;
138
int apic_id = -1;
139
140
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
141
goto exit;
142
143
if (!buffer.length || !buffer.pointer)
144
goto exit;
145
146
obj = buffer.pointer;
147
if (obj->type != ACPI_TYPE_BUFFER ||
148
obj->buffer.length < sizeof(struct acpi_subtable_header)) {
149
goto exit;
150
}
151
152
header = (struct acpi_subtable_header *)obj->buffer.pointer;
153
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
154
map_lapic_id(header, acpi_id, &apic_id);
155
} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
156
map_lsapic_id(header, type, acpi_id, &apic_id);
157
}
158
159
exit:
160
if (buffer.pointer)
161
kfree(buffer.pointer);
162
return apic_id;
163
}
164
165
int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
166
{
167
#ifdef CONFIG_SMP
168
int i;
169
#endif
170
int apic_id = -1;
171
172
apic_id = map_mat_entry(handle, type, acpi_id);
173
if (apic_id == -1)
174
apic_id = map_madt_entry(type, acpi_id);
175
if (apic_id == -1)
176
return apic_id;
177
178
#ifdef CONFIG_SMP
179
for_each_possible_cpu(i) {
180
if (cpu_physical_id(i) == apic_id)
181
return i;
182
}
183
#else
184
/* In UP kernel, only processor 0 is valid */
185
if (apic_id == 0)
186
return apic_id;
187
#endif
188
return -1;
189
}
190
EXPORT_SYMBOL_GPL(acpi_get_cpuid);
191
192
static bool __init processor_physically_present(acpi_handle handle)
193
{
194
int cpuid, type;
195
u32 acpi_id;
196
acpi_status status;
197
acpi_object_type acpi_type;
198
unsigned long long tmp;
199
union acpi_object object = { 0 };
200
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
201
202
status = acpi_get_type(handle, &acpi_type);
203
if (ACPI_FAILURE(status))
204
return false;
205
206
switch (acpi_type) {
207
case ACPI_TYPE_PROCESSOR:
208
status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
209
if (ACPI_FAILURE(status))
210
return false;
211
acpi_id = object.processor.proc_id;
212
break;
213
case ACPI_TYPE_DEVICE:
214
status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp);
215
if (ACPI_FAILURE(status))
216
return false;
217
acpi_id = tmp;
218
break;
219
default:
220
return false;
221
}
222
223
type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
224
cpuid = acpi_get_cpuid(handle, type, acpi_id);
225
226
if (cpuid == -1)
227
return false;
228
229
return true;
230
}
231
232
static void __cpuinit acpi_set_pdc_bits(u32 *buf)
233
{
234
buf[0] = ACPI_PDC_REVISION_ID;
235
buf[1] = 1;
236
237
/* Enable coordination with firmware's _TSD info */
238
buf[2] = ACPI_PDC_SMP_T_SWCOORD;
239
240
/* Twiddle arch-specific bits needed for _PDC */
241
arch_acpi_set_pdc_bits(buf);
242
}
243
244
static struct acpi_object_list *__cpuinit acpi_processor_alloc_pdc(void)
245
{
246
struct acpi_object_list *obj_list;
247
union acpi_object *obj;
248
u32 *buf;
249
250
/* allocate and initialize pdc. It will be used later. */
251
obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
252
if (!obj_list) {
253
printk(KERN_ERR "Memory allocation error\n");
254
return NULL;
255
}
256
257
obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
258
if (!obj) {
259
printk(KERN_ERR "Memory allocation error\n");
260
kfree(obj_list);
261
return NULL;
262
}
263
264
buf = kmalloc(12, GFP_KERNEL);
265
if (!buf) {
266
printk(KERN_ERR "Memory allocation error\n");
267
kfree(obj);
268
kfree(obj_list);
269
return NULL;
270
}
271
272
acpi_set_pdc_bits(buf);
273
274
obj->type = ACPI_TYPE_BUFFER;
275
obj->buffer.length = 12;
276
obj->buffer.pointer = (u8 *) buf;
277
obj_list->count = 1;
278
obj_list->pointer = obj;
279
280
return obj_list;
281
}
282
283
/*
284
* _PDC is required for a BIOS-OS handshake for most of the newer
285
* ACPI processor features.
286
*/
287
static int __cpuinit
288
acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in)
289
{
290
acpi_status status = AE_OK;
291
292
if (boot_option_idle_override == IDLE_NOMWAIT) {
293
/*
294
* If mwait is disabled for CPU C-states, the C2C3_FFH access
295
* mode will be disabled in the parameter of _PDC object.
296
* Of course C1_FFH access mode will also be disabled.
297
*/
298
union acpi_object *obj;
299
u32 *buffer = NULL;
300
301
obj = pdc_in->pointer;
302
buffer = (u32 *)(obj->buffer.pointer);
303
buffer[2] &= ~(ACPI_PDC_C_C2C3_FFH | ACPI_PDC_C_C1_FFH);
304
305
}
306
status = acpi_evaluate_object(handle, "_PDC", pdc_in, NULL);
307
308
if (ACPI_FAILURE(status))
309
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
310
"Could not evaluate _PDC, using legacy perf. control.\n"));
311
312
return status;
313
}
314
315
void __cpuinit acpi_processor_set_pdc(acpi_handle handle)
316
{
317
struct acpi_object_list *obj_list;
318
319
if (arch_has_acpi_pdc() == false)
320
return;
321
322
obj_list = acpi_processor_alloc_pdc();
323
if (!obj_list)
324
return;
325
326
acpi_processor_eval_pdc(handle, obj_list);
327
328
kfree(obj_list->pointer->buffer.pointer);
329
kfree(obj_list->pointer);
330
kfree(obj_list);
331
}
332
333
static acpi_status __init
334
early_init_pdc(acpi_handle handle, u32 lvl, void *context, void **rv)
335
{
336
if (processor_physically_present(handle) == false)
337
return AE_OK;
338
339
acpi_processor_set_pdc(handle);
340
return AE_OK;
341
}
342
343
void __init acpi_early_processor_set_pdc(void)
344
{
345
/*
346
* Check whether the system is DMI table. If yes, OSPM
347
* should not use mwait for CPU-states.
348
*/
349
dmi_check_system(processor_idle_dmi_table);
350
351
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
352
ACPI_UINT32_MAX,
353
early_init_pdc, NULL, NULL, NULL);
354
acpi_get_devices("ACPI0007", early_init_pdc, NULL, NULL);
355
}
356
357