Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/hwmon/coretemp.c
15109 views
1
/*
2
* coretemp.c - Linux kernel module for hardware monitoring
3
*
4
* Copyright (C) 2007 Rudolf Marek <[email protected]>
5
*
6
* Inspired from many hwmon drivers
7
*
8
* This program is free software; you can redistribute it and/or modify
9
* it under the terms of the GNU General Public License as published by
10
* the Free Software Foundation; version 2 of the License.
11
*
12
* This program is distributed in the hope that it will be useful,
13
* but WITHOUT ANY WARRANTY; without even the implied warranty of
14
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15
* GNU General Public License for more details.
16
*
17
* You should have received a copy of the GNU General Public License
18
* along with this program; if not, write to the Free Software
19
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20
* 02110-1301 USA.
21
*/
22
23
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25
#include <linux/module.h>
26
#include <linux/init.h>
27
#include <linux/slab.h>
28
#include <linux/jiffies.h>
29
#include <linux/hwmon.h>
30
#include <linux/sysfs.h>
31
#include <linux/hwmon-sysfs.h>
32
#include <linux/err.h>
33
#include <linux/mutex.h>
34
#include <linux/list.h>
35
#include <linux/platform_device.h>
36
#include <linux/cpu.h>
37
#include <linux/pci.h>
38
#include <linux/smp.h>
39
#include <asm/msr.h>
40
#include <asm/processor.h>
41
42
#define DRVNAME "coretemp"
43
44
#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
45
#define NUM_REAL_CORES 16 /* Number of Real cores per cpu */
46
#define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */
47
#define MAX_ATTRS 5 /* Maximum no of per-core attrs */
48
#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
49
50
#ifdef CONFIG_SMP
51
#define TO_PHYS_ID(cpu) cpu_data(cpu).phys_proc_id
52
#define TO_CORE_ID(cpu) cpu_data(cpu).cpu_core_id
53
#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
54
#define for_each_sibling(i, cpu) for_each_cpu(i, cpu_sibling_mask(cpu))
55
#else
56
#define TO_PHYS_ID(cpu) (cpu)
57
#define TO_CORE_ID(cpu) (cpu)
58
#define TO_ATTR_NO(cpu) (cpu)
59
#define for_each_sibling(i, cpu) for (i = 0; false; )
60
#endif
61
62
/*
63
* Per-Core Temperature Data
64
* @last_updated: The time when the current temperature value was updated
65
* earlier (in jiffies).
66
* @cpu_core_id: The CPU Core from which temperature values should be read
67
* This value is passed as "id" field to rdmsr/wrmsr functions.
68
* @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS,
69
* from where the temperature values should be read.
70
* @is_pkg_data: If this is 1, the temp_data holds pkgtemp data.
71
* Otherwise, temp_data holds coretemp data.
72
* @valid: If this is 1, the current temperature is valid.
73
*/
74
struct temp_data {
75
int temp;
76
int ttarget;
77
int tjmax;
78
unsigned long last_updated;
79
unsigned int cpu;
80
u32 cpu_core_id;
81
u32 status_reg;
82
bool is_pkg_data;
83
bool valid;
84
struct sensor_device_attribute sd_attrs[MAX_ATTRS];
85
char attr_name[MAX_ATTRS][CORETEMP_NAME_LENGTH];
86
struct mutex update_lock;
87
};
88
89
/* Platform Data per Physical CPU */
90
struct platform_data {
91
struct device *hwmon_dev;
92
u16 phys_proc_id;
93
struct temp_data *core_data[MAX_CORE_DATA];
94
struct device_attribute name_attr;
95
};
96
97
struct pdev_entry {
98
struct list_head list;
99
struct platform_device *pdev;
100
u16 phys_proc_id;
101
};
102
103
static LIST_HEAD(pdev_list);
104
static DEFINE_MUTEX(pdev_list_mutex);
105
106
static ssize_t show_name(struct device *dev,
107
struct device_attribute *devattr, char *buf)
108
{
109
return sprintf(buf, "%s\n", DRVNAME);
110
}
111
112
static ssize_t show_label(struct device *dev,
113
struct device_attribute *devattr, char *buf)
114
{
115
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
116
struct platform_data *pdata = dev_get_drvdata(dev);
117
struct temp_data *tdata = pdata->core_data[attr->index];
118
119
if (tdata->is_pkg_data)
120
return sprintf(buf, "Physical id %u\n", pdata->phys_proc_id);
121
122
return sprintf(buf, "Core %u\n", tdata->cpu_core_id);
123
}
124
125
static ssize_t show_crit_alarm(struct device *dev,
126
struct device_attribute *devattr, char *buf)
127
{
128
u32 eax, edx;
129
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
130
struct platform_data *pdata = dev_get_drvdata(dev);
131
struct temp_data *tdata = pdata->core_data[attr->index];
132
133
rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
134
135
return sprintf(buf, "%d\n", (eax >> 5) & 1);
136
}
137
138
static ssize_t show_tjmax(struct device *dev,
139
struct device_attribute *devattr, char *buf)
140
{
141
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
142
struct platform_data *pdata = dev_get_drvdata(dev);
143
144
return sprintf(buf, "%d\n", pdata->core_data[attr->index]->tjmax);
145
}
146
147
static ssize_t show_ttarget(struct device *dev,
148
struct device_attribute *devattr, char *buf)
149
{
150
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
151
struct platform_data *pdata = dev_get_drvdata(dev);
152
153
return sprintf(buf, "%d\n", pdata->core_data[attr->index]->ttarget);
154
}
155
156
static ssize_t show_temp(struct device *dev,
157
struct device_attribute *devattr, char *buf)
158
{
159
u32 eax, edx;
160
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
161
struct platform_data *pdata = dev_get_drvdata(dev);
162
struct temp_data *tdata = pdata->core_data[attr->index];
163
164
mutex_lock(&tdata->update_lock);
165
166
/* Check whether the time interval has elapsed */
167
if (!tdata->valid || time_after(jiffies, tdata->last_updated + HZ)) {
168
rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
169
tdata->valid = 0;
170
/* Check whether the data is valid */
171
if (eax & 0x80000000) {
172
tdata->temp = tdata->tjmax -
173
((eax >> 16) & 0x7f) * 1000;
174
tdata->valid = 1;
175
}
176
tdata->last_updated = jiffies;
177
}
178
179
mutex_unlock(&tdata->update_lock);
180
return tdata->valid ? sprintf(buf, "%d\n", tdata->temp) : -EAGAIN;
181
}
182
183
static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
184
{
185
/* The 100C is default for both mobile and non mobile CPUs */
186
187
int tjmax = 100000;
188
int tjmax_ee = 85000;
189
int usemsr_ee = 1;
190
int err;
191
u32 eax, edx;
192
struct pci_dev *host_bridge;
193
194
/* Early chips have no MSR for TjMax */
195
196
if (c->x86_model == 0xf && c->x86_mask < 4)
197
usemsr_ee = 0;
198
199
/* Atom CPUs */
200
201
if (c->x86_model == 0x1c) {
202
usemsr_ee = 0;
203
204
host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
205
206
if (host_bridge && host_bridge->vendor == PCI_VENDOR_ID_INTEL
207
&& (host_bridge->device == 0xa000 /* NM10 based nettop */
208
|| host_bridge->device == 0xa010)) /* NM10 based netbook */
209
tjmax = 100000;
210
else
211
tjmax = 90000;
212
213
pci_dev_put(host_bridge);
214
}
215
216
if (c->x86_model > 0xe && usemsr_ee) {
217
u8 platform_id;
218
219
/*
220
* Now we can detect the mobile CPU using Intel provided table
221
* http://softwarecommunity.intel.com/Wiki/Mobility/720.htm
222
* For Core2 cores, check MSR 0x17, bit 28 1 = Mobile CPU
223
*/
224
err = rdmsr_safe_on_cpu(id, 0x17, &eax, &edx);
225
if (err) {
226
dev_warn(dev,
227
"Unable to access MSR 0x17, assuming desktop"
228
" CPU\n");
229
usemsr_ee = 0;
230
} else if (c->x86_model < 0x17 && !(eax & 0x10000000)) {
231
/*
232
* Trust bit 28 up to Penryn, I could not find any
233
* documentation on that; if you happen to know
234
* someone at Intel please ask
235
*/
236
usemsr_ee = 0;
237
} else {
238
/* Platform ID bits 52:50 (EDX starts at bit 32) */
239
platform_id = (edx >> 18) & 0x7;
240
241
/*
242
* Mobile Penryn CPU seems to be platform ID 7 or 5
243
* (guesswork)
244
*/
245
if (c->x86_model == 0x17 &&
246
(platform_id == 5 || platform_id == 7)) {
247
/*
248
* If MSR EE bit is set, set it to 90 degrees C,
249
* otherwise 105 degrees C
250
*/
251
tjmax_ee = 90000;
252
tjmax = 105000;
253
}
254
}
255
}
256
257
if (usemsr_ee) {
258
err = rdmsr_safe_on_cpu(id, 0xee, &eax, &edx);
259
if (err) {
260
dev_warn(dev,
261
"Unable to access MSR 0xEE, for Tjmax, left"
262
" at default\n");
263
} else if (eax & 0x40000000) {
264
tjmax = tjmax_ee;
265
}
266
} else if (tjmax == 100000) {
267
/*
268
* If we don't use msr EE it means we are desktop CPU
269
* (with exeception of Atom)
270
*/
271
dev_warn(dev, "Using relative temperature scale!\n");
272
}
273
274
return tjmax;
275
}
276
277
static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
278
{
279
/* The 100C is default for both mobile and non mobile CPUs */
280
int err;
281
u32 eax, edx;
282
u32 val;
283
284
/*
285
* A new feature of current Intel(R) processors, the
286
* IA32_TEMPERATURE_TARGET contains the TjMax value
287
*/
288
err = rdmsr_safe_on_cpu(id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
289
if (err) {
290
dev_warn(dev, "Unable to read TjMax from CPU.\n");
291
} else {
292
val = (eax >> 16) & 0xff;
293
/*
294
* If the TjMax is not plausible, an assumption
295
* will be used
296
*/
297
if (val) {
298
dev_info(dev, "TjMax is %d C.\n", val);
299
return val * 1000;
300
}
301
}
302
303
/*
304
* An assumption is made for early CPUs and unreadable MSR.
305
* NOTE: the calculated value may not be correct.
306
*/
307
return adjust_tjmax(c, id, dev);
308
}
309
310
static void __devinit get_ucode_rev_on_cpu(void *edx)
311
{
312
u32 eax;
313
314
wrmsr(MSR_IA32_UCODE_REV, 0, 0);
315
sync_core();
316
rdmsr(MSR_IA32_UCODE_REV, eax, *(u32 *)edx);
317
}
318
319
static int get_pkg_tjmax(unsigned int cpu, struct device *dev)
320
{
321
int err;
322
u32 eax, edx, val;
323
324
err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
325
if (!err) {
326
val = (eax >> 16) & 0xff;
327
if (val)
328
return val * 1000;
329
}
330
dev_warn(dev, "Unable to read Pkg-TjMax from CPU:%u\n", cpu);
331
return 100000; /* Default TjMax: 100 degree celsius */
332
}
333
334
static int create_name_attr(struct platform_data *pdata, struct device *dev)
335
{
336
sysfs_attr_init(&pdata->name_attr.attr);
337
pdata->name_attr.attr.name = "name";
338
pdata->name_attr.attr.mode = S_IRUGO;
339
pdata->name_attr.show = show_name;
340
return device_create_file(dev, &pdata->name_attr);
341
}
342
343
static int create_core_attrs(struct temp_data *tdata, struct device *dev,
344
int attr_no)
345
{
346
int err, i;
347
static ssize_t (*rd_ptr[MAX_ATTRS]) (struct device *dev,
348
struct device_attribute *devattr, char *buf) = {
349
show_label, show_crit_alarm, show_ttarget,
350
show_temp, show_tjmax };
351
static const char *names[MAX_ATTRS] = {
352
"temp%d_label", "temp%d_crit_alarm",
353
"temp%d_max", "temp%d_input",
354
"temp%d_crit" };
355
356
for (i = 0; i < MAX_ATTRS; i++) {
357
snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i],
358
attr_no);
359
sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);
360
tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];
361
tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO;
362
tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
363
tdata->sd_attrs[i].dev_attr.store = NULL;
364
tdata->sd_attrs[i].index = attr_no;
365
err = device_create_file(dev, &tdata->sd_attrs[i].dev_attr);
366
if (err)
367
goto exit_free;
368
}
369
return 0;
370
371
exit_free:
372
while (--i >= 0)
373
device_remove_file(dev, &tdata->sd_attrs[i].dev_attr);
374
return err;
375
}
376
377
static void update_ttarget(__u8 cpu_model, struct temp_data *tdata,
378
struct device *dev)
379
{
380
int err;
381
u32 eax, edx;
382
383
/*
384
* Initialize ttarget value. Eventually this will be
385
* initialized with the value from MSR_IA32_THERM_INTERRUPT
386
* register. If IA32_TEMPERATURE_TARGET is supported, this
387
* value will be over written below.
388
* To Do: Patch to initialize ttarget from MSR_IA32_THERM_INTERRUPT
389
*/
390
tdata->ttarget = tdata->tjmax - 20000;
391
392
/*
393
* Read the still undocumented IA32_TEMPERATURE_TARGET. It exists
394
* on older CPUs but not in this register,
395
* Atoms don't have it either.
396
*/
397
if (cpu_model > 0xe && cpu_model != 0x1c) {
398
err = rdmsr_safe_on_cpu(tdata->cpu,
399
MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
400
if (err) {
401
dev_warn(dev,
402
"Unable to read IA32_TEMPERATURE_TARGET MSR\n");
403
} else {
404
tdata->ttarget = tdata->tjmax -
405
((eax >> 8) & 0xff) * 1000;
406
}
407
}
408
}
409
410
static int __devinit chk_ucode_version(struct platform_device *pdev)
411
{
412
struct cpuinfo_x86 *c = &cpu_data(pdev->id);
413
int err;
414
u32 edx;
415
416
/*
417
* Check if we have problem with errata AE18 of Core processors:
418
* Readings might stop update when processor visited too deep sleep,
419
* fixed for stepping D0 (6EC).
420
*/
421
if (c->x86_model == 0xe && c->x86_mask < 0xc) {
422
/* check for microcode update */
423
err = smp_call_function_single(pdev->id, get_ucode_rev_on_cpu,
424
&edx, 1);
425
if (err) {
426
dev_err(&pdev->dev,
427
"Cannot determine microcode revision of "
428
"CPU#%u (%d)!\n", pdev->id, err);
429
return -ENODEV;
430
} else if (edx < 0x39) {
431
dev_err(&pdev->dev,
432
"Errata AE18 not fixed, update BIOS or "
433
"microcode of the CPU!\n");
434
return -ENODEV;
435
}
436
}
437
return 0;
438
}
439
440
static struct platform_device *coretemp_get_pdev(unsigned int cpu)
441
{
442
u16 phys_proc_id = TO_PHYS_ID(cpu);
443
struct pdev_entry *p;
444
445
mutex_lock(&pdev_list_mutex);
446
447
list_for_each_entry(p, &pdev_list, list)
448
if (p->phys_proc_id == phys_proc_id) {
449
mutex_unlock(&pdev_list_mutex);
450
return p->pdev;
451
}
452
453
mutex_unlock(&pdev_list_mutex);
454
return NULL;
455
}
456
457
static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
458
{
459
struct temp_data *tdata;
460
461
tdata = kzalloc(sizeof(struct temp_data), GFP_KERNEL);
462
if (!tdata)
463
return NULL;
464
465
tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS :
466
MSR_IA32_THERM_STATUS;
467
tdata->is_pkg_data = pkg_flag;
468
tdata->cpu = cpu;
469
tdata->cpu_core_id = TO_CORE_ID(cpu);
470
mutex_init(&tdata->update_lock);
471
return tdata;
472
}
473
474
static int create_core_data(struct platform_data *pdata,
475
struct platform_device *pdev,
476
unsigned int cpu, int pkg_flag)
477
{
478
struct temp_data *tdata;
479
struct cpuinfo_x86 *c = &cpu_data(cpu);
480
u32 eax, edx;
481
int err, attr_no;
482
483
/*
484
* Find attr number for sysfs:
485
* We map the attr number to core id of the CPU
486
* The attr number is always core id + 2
487
* The Pkgtemp will always show up as temp1_*, if available
488
*/
489
attr_no = pkg_flag ? 1 : TO_ATTR_NO(cpu);
490
491
if (attr_no > MAX_CORE_DATA - 1)
492
return -ERANGE;
493
494
/*
495
* Provide a single set of attributes for all HT siblings of a core
496
* to avoid duplicate sensors (the processor ID and core ID of all
497
* HT siblings of a core are the same).
498
* Skip if a HT sibling of this core is already registered.
499
* This is not an error.
500
*/
501
if (pdata->core_data[attr_no] != NULL)
502
return 0;
503
504
tdata = init_temp_data(cpu, pkg_flag);
505
if (!tdata)
506
return -ENOMEM;
507
508
/* Test if we can access the status register */
509
err = rdmsr_safe_on_cpu(cpu, tdata->status_reg, &eax, &edx);
510
if (err)
511
goto exit_free;
512
513
/* We can access status register. Get Critical Temperature */
514
if (pkg_flag)
515
tdata->tjmax = get_pkg_tjmax(pdev->id, &pdev->dev);
516
else
517
tdata->tjmax = get_tjmax(c, cpu, &pdev->dev);
518
519
update_ttarget(c->x86_model, tdata, &pdev->dev);
520
pdata->core_data[attr_no] = tdata;
521
522
/* Create sysfs interfaces */
523
err = create_core_attrs(tdata, &pdev->dev, attr_no);
524
if (err)
525
goto exit_free;
526
527
return 0;
528
exit_free:
529
kfree(tdata);
530
return err;
531
}
532
533
static void coretemp_add_core(unsigned int cpu, int pkg_flag)
534
{
535
struct platform_data *pdata;
536
struct platform_device *pdev = coretemp_get_pdev(cpu);
537
int err;
538
539
if (!pdev)
540
return;
541
542
pdata = platform_get_drvdata(pdev);
543
544
err = create_core_data(pdata, pdev, cpu, pkg_flag);
545
if (err)
546
dev_err(&pdev->dev, "Adding Core %u failed\n", cpu);
547
}
548
549
static void coretemp_remove_core(struct platform_data *pdata,
550
struct device *dev, int indx)
551
{
552
int i;
553
struct temp_data *tdata = pdata->core_data[indx];
554
555
/* Remove the sysfs attributes */
556
for (i = 0; i < MAX_ATTRS; i++)
557
device_remove_file(dev, &tdata->sd_attrs[i].dev_attr);
558
559
kfree(pdata->core_data[indx]);
560
pdata->core_data[indx] = NULL;
561
}
562
563
static int __devinit coretemp_probe(struct platform_device *pdev)
564
{
565
struct platform_data *pdata;
566
int err;
567
568
/* Check the microcode version of the CPU */
569
err = chk_ucode_version(pdev);
570
if (err)
571
return err;
572
573
/* Initialize the per-package data structures */
574
pdata = kzalloc(sizeof(struct platform_data), GFP_KERNEL);
575
if (!pdata)
576
return -ENOMEM;
577
578
err = create_name_attr(pdata, &pdev->dev);
579
if (err)
580
goto exit_free;
581
582
pdata->phys_proc_id = TO_PHYS_ID(pdev->id);
583
platform_set_drvdata(pdev, pdata);
584
585
pdata->hwmon_dev = hwmon_device_register(&pdev->dev);
586
if (IS_ERR(pdata->hwmon_dev)) {
587
err = PTR_ERR(pdata->hwmon_dev);
588
dev_err(&pdev->dev, "Class registration failed (%d)\n", err);
589
goto exit_name;
590
}
591
return 0;
592
593
exit_name:
594
device_remove_file(&pdev->dev, &pdata->name_attr);
595
platform_set_drvdata(pdev, NULL);
596
exit_free:
597
kfree(pdata);
598
return err;
599
}
600
601
static int __devexit coretemp_remove(struct platform_device *pdev)
602
{
603
struct platform_data *pdata = platform_get_drvdata(pdev);
604
int i;
605
606
for (i = MAX_CORE_DATA - 1; i >= 0; --i)
607
if (pdata->core_data[i])
608
coretemp_remove_core(pdata, &pdev->dev, i);
609
610
device_remove_file(&pdev->dev, &pdata->name_attr);
611
hwmon_device_unregister(pdata->hwmon_dev);
612
platform_set_drvdata(pdev, NULL);
613
kfree(pdata);
614
return 0;
615
}
616
617
static struct platform_driver coretemp_driver = {
618
.driver = {
619
.owner = THIS_MODULE,
620
.name = DRVNAME,
621
},
622
.probe = coretemp_probe,
623
.remove = __devexit_p(coretemp_remove),
624
};
625
626
static int __cpuinit coretemp_device_add(unsigned int cpu)
627
{
628
int err;
629
struct platform_device *pdev;
630
struct pdev_entry *pdev_entry;
631
632
mutex_lock(&pdev_list_mutex);
633
634
pdev = platform_device_alloc(DRVNAME, cpu);
635
if (!pdev) {
636
err = -ENOMEM;
637
pr_err("Device allocation failed\n");
638
goto exit;
639
}
640
641
pdev_entry = kzalloc(sizeof(struct pdev_entry), GFP_KERNEL);
642
if (!pdev_entry) {
643
err = -ENOMEM;
644
goto exit_device_put;
645
}
646
647
err = platform_device_add(pdev);
648
if (err) {
649
pr_err("Device addition failed (%d)\n", err);
650
goto exit_device_free;
651
}
652
653
pdev_entry->pdev = pdev;
654
pdev_entry->phys_proc_id = TO_PHYS_ID(cpu);
655
656
list_add_tail(&pdev_entry->list, &pdev_list);
657
mutex_unlock(&pdev_list_mutex);
658
659
return 0;
660
661
exit_device_free:
662
kfree(pdev_entry);
663
exit_device_put:
664
platform_device_put(pdev);
665
exit:
666
mutex_unlock(&pdev_list_mutex);
667
return err;
668
}
669
670
static void coretemp_device_remove(unsigned int cpu)
671
{
672
struct pdev_entry *p, *n;
673
u16 phys_proc_id = TO_PHYS_ID(cpu);
674
675
mutex_lock(&pdev_list_mutex);
676
list_for_each_entry_safe(p, n, &pdev_list, list) {
677
if (p->phys_proc_id != phys_proc_id)
678
continue;
679
platform_device_unregister(p->pdev);
680
list_del(&p->list);
681
kfree(p);
682
}
683
mutex_unlock(&pdev_list_mutex);
684
}
685
686
static bool is_any_core_online(struct platform_data *pdata)
687
{
688
int i;
689
690
/* Find online cores, except pkgtemp data */
691
for (i = MAX_CORE_DATA - 1; i >= 0; --i) {
692
if (pdata->core_data[i] &&
693
!pdata->core_data[i]->is_pkg_data) {
694
return true;
695
}
696
}
697
return false;
698
}
699
700
static void __cpuinit get_core_online(unsigned int cpu)
701
{
702
struct cpuinfo_x86 *c = &cpu_data(cpu);
703
struct platform_device *pdev = coretemp_get_pdev(cpu);
704
int err;
705
706
/*
707
* CPUID.06H.EAX[0] indicates whether the CPU has thermal
708
* sensors. We check this bit only, all the early CPUs
709
* without thermal sensors will be filtered out.
710
*/
711
if (!cpu_has(c, X86_FEATURE_DTS))
712
return;
713
714
if (!pdev) {
715
/*
716
* Alright, we have DTS support.
717
* We are bringing the _first_ core in this pkg
718
* online. So, initialize per-pkg data structures and
719
* then bring this core online.
720
*/
721
err = coretemp_device_add(cpu);
722
if (err)
723
return;
724
/*
725
* Check whether pkgtemp support is available.
726
* If so, add interfaces for pkgtemp.
727
*/
728
if (cpu_has(c, X86_FEATURE_PTS))
729
coretemp_add_core(cpu, 1);
730
}
731
/*
732
* Physical CPU device already exists.
733
* So, just add interfaces for this core.
734
*/
735
coretemp_add_core(cpu, 0);
736
}
737
738
static void __cpuinit put_core_offline(unsigned int cpu)
739
{
740
int i, indx;
741
struct platform_data *pdata;
742
struct platform_device *pdev = coretemp_get_pdev(cpu);
743
744
/* If the physical CPU device does not exist, just return */
745
if (!pdev)
746
return;
747
748
pdata = platform_get_drvdata(pdev);
749
750
indx = TO_ATTR_NO(cpu);
751
752
if (pdata->core_data[indx] && pdata->core_data[indx]->cpu == cpu)
753
coretemp_remove_core(pdata, &pdev->dev, indx);
754
755
/*
756
* If a HT sibling of a core is taken offline, but another HT sibling
757
* of the same core is still online, register the alternate sibling.
758
* This ensures that exactly one set of attributes is provided as long
759
* as at least one HT sibling of a core is online.
760
*/
761
for_each_sibling(i, cpu) {
762
if (i != cpu) {
763
get_core_online(i);
764
/*
765
* Display temperature sensor data for one HT sibling
766
* per core only, so abort the loop after one such
767
* sibling has been found.
768
*/
769
break;
770
}
771
}
772
/*
773
* If all cores in this pkg are offline, remove the device.
774
* coretemp_device_remove calls unregister_platform_device,
775
* which in turn calls coretemp_remove. This removes the
776
* pkgtemp entry and does other clean ups.
777
*/
778
if (!is_any_core_online(pdata))
779
coretemp_device_remove(cpu);
780
}
781
782
static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb,
783
unsigned long action, void *hcpu)
784
{
785
unsigned int cpu = (unsigned long) hcpu;
786
787
switch (action) {
788
case CPU_ONLINE:
789
case CPU_DOWN_FAILED:
790
get_core_online(cpu);
791
break;
792
case CPU_DOWN_PREPARE:
793
put_core_offline(cpu);
794
break;
795
}
796
return NOTIFY_OK;
797
}
798
799
static struct notifier_block coretemp_cpu_notifier __refdata = {
800
.notifier_call = coretemp_cpu_callback,
801
};
802
803
static int __init coretemp_init(void)
804
{
805
int i, err = -ENODEV;
806
807
/* quick check if we run Intel */
808
if (cpu_data(0).x86_vendor != X86_VENDOR_INTEL)
809
goto exit;
810
811
err = platform_driver_register(&coretemp_driver);
812
if (err)
813
goto exit;
814
815
for_each_online_cpu(i)
816
get_core_online(i);
817
818
#ifndef CONFIG_HOTPLUG_CPU
819
if (list_empty(&pdev_list)) {
820
err = -ENODEV;
821
goto exit_driver_unreg;
822
}
823
#endif
824
825
register_hotcpu_notifier(&coretemp_cpu_notifier);
826
return 0;
827
828
#ifndef CONFIG_HOTPLUG_CPU
829
exit_driver_unreg:
830
platform_driver_unregister(&coretemp_driver);
831
#endif
832
exit:
833
return err;
834
}
835
836
static void __exit coretemp_exit(void)
837
{
838
struct pdev_entry *p, *n;
839
840
unregister_hotcpu_notifier(&coretemp_cpu_notifier);
841
mutex_lock(&pdev_list_mutex);
842
list_for_each_entry_safe(p, n, &pdev_list, list) {
843
platform_device_unregister(p->pdev);
844
list_del(&p->list);
845
kfree(p);
846
}
847
mutex_unlock(&pdev_list_mutex);
848
platform_driver_unregister(&coretemp_driver);
849
}
850
851
MODULE_AUTHOR("Rudolf Marek <[email protected]>");
852
MODULE_DESCRIPTION("Intel Core temperature monitor");
853
MODULE_LICENSE("GPL");
854
855
module_init(coretemp_init)
856
module_exit(coretemp_exit)
857
858