Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/ia64/kernel/palinfo.c
10817 views
1
/*
2
* palinfo.c
3
*
4
* Prints processor specific information reported by PAL.
5
* This code is based on specification of PAL as of the
6
* Intel IA-64 Architecture Software Developer's Manual v1.0.
7
*
8
*
9
* Copyright (C) 2000-2001, 2003 Hewlett-Packard Co
10
* Stephane Eranian <[email protected]>
11
* Copyright (C) 2004 Intel Corporation
12
* Ashok Raj <[email protected]>
13
*
14
* 05/26/2000 S.Eranian initial release
15
* 08/21/2000 S.Eranian updated to July 2000 PAL specs
16
* 02/05/2001 S.Eranian fixed module support
17
* 10/23/2001 S.Eranian updated pal_perf_mon_info bug fixes
18
* 03/24/2004 Ashok Raj updated to work with CPU Hotplug
19
* 10/26/2006 Russ Anderson updated processor features to rev 2.2 spec
20
*/
21
#include <linux/types.h>
22
#include <linux/errno.h>
23
#include <linux/init.h>
24
#include <linux/proc_fs.h>
25
#include <linux/mm.h>
26
#include <linux/module.h>
27
#include <linux/efi.h>
28
#include <linux/notifier.h>
29
#include <linux/cpu.h>
30
#include <linux/cpumask.h>
31
32
#include <asm/pal.h>
33
#include <asm/sal.h>
34
#include <asm/page.h>
35
#include <asm/processor.h>
36
#include <linux/smp.h>
37
38
MODULE_AUTHOR("Stephane Eranian <[email protected]>");
39
MODULE_DESCRIPTION("/proc interface to IA-64 PAL");
40
MODULE_LICENSE("GPL");
41
42
#define PALINFO_VERSION "0.5"
43
44
typedef int (*palinfo_func_t)(char*);
45
46
typedef struct {
47
const char *name; /* name of the proc entry */
48
palinfo_func_t proc_read; /* function to call for reading */
49
struct proc_dir_entry *entry; /* registered entry (removal) */
50
} palinfo_entry_t;
51
52
53
/*
54
* A bunch of string array to get pretty printing
55
*/
56
57
static char *cache_types[] = {
58
"", /* not used */
59
"Instruction",
60
"Data",
61
"Data/Instruction" /* unified */
62
};
63
64
static const char *cache_mattrib[]={
65
"WriteThrough",
66
"WriteBack",
67
"", /* reserved */
68
"" /* reserved */
69
};
70
71
static const char *cache_st_hints[]={
72
"Temporal, level 1",
73
"Reserved",
74
"Reserved",
75
"Non-temporal, all levels",
76
"Reserved",
77
"Reserved",
78
"Reserved",
79
"Reserved"
80
};
81
82
static const char *cache_ld_hints[]={
83
"Temporal, level 1",
84
"Non-temporal, level 1",
85
"Reserved",
86
"Non-temporal, all levels",
87
"Reserved",
88
"Reserved",
89
"Reserved",
90
"Reserved"
91
};
92
93
static const char *rse_hints[]={
94
"enforced lazy",
95
"eager stores",
96
"eager loads",
97
"eager loads and stores"
98
};
99
100
#define RSE_HINTS_COUNT ARRAY_SIZE(rse_hints)
101
102
static const char *mem_attrib[]={
103
"WB", /* 000 */
104
"SW", /* 001 */
105
"010", /* 010 */
106
"011", /* 011 */
107
"UC", /* 100 */
108
"UCE", /* 101 */
109
"WC", /* 110 */
110
"NaTPage" /* 111 */
111
};
112
113
/*
114
* Take a 64bit vector and produces a string such that
115
* if bit n is set then 2^n in clear text is generated. The adjustment
116
* to the right unit is also done.
117
*
118
* Input:
119
* - a pointer to a buffer to hold the string
120
* - a 64-bit vector
121
* Ouput:
122
* - a pointer to the end of the buffer
123
*
124
*/
125
static char *
126
bitvector_process(char *p, u64 vector)
127
{
128
int i,j;
129
const char *units[]={ "", "K", "M", "G", "T" };
130
131
for (i=0, j=0; i < 64; i++ , j=i/10) {
132
if (vector & 0x1) {
133
p += sprintf(p, "%d%s ", 1 << (i-j*10), units[j]);
134
}
135
vector >>= 1;
136
}
137
return p;
138
}
139
140
/*
141
* Take a 64bit vector and produces a string such that
142
* if bit n is set then register n is present. The function
143
* takes into account consecutive registers and prints out ranges.
144
*
145
* Input:
146
* - a pointer to a buffer to hold the string
147
* - a 64-bit vector
148
* Ouput:
149
* - a pointer to the end of the buffer
150
*
151
*/
152
static char *
153
bitregister_process(char *p, u64 *reg_info, int max)
154
{
155
int i, begin, skip = 0;
156
u64 value = reg_info[0];
157
158
value >>= i = begin = ffs(value) - 1;
159
160
for(; i < max; i++ ) {
161
162
if (i != 0 && (i%64) == 0) value = *++reg_info;
163
164
if ((value & 0x1) == 0 && skip == 0) {
165
if (begin <= i - 2)
166
p += sprintf(p, "%d-%d ", begin, i-1);
167
else
168
p += sprintf(p, "%d ", i-1);
169
skip = 1;
170
begin = -1;
171
} else if ((value & 0x1) && skip == 1) {
172
skip = 0;
173
begin = i;
174
}
175
value >>=1;
176
}
177
if (begin > -1) {
178
if (begin < 127)
179
p += sprintf(p, "%d-127", begin);
180
else
181
p += sprintf(p, "127");
182
}
183
184
return p;
185
}
186
187
static int
188
power_info(char *page)
189
{
190
s64 status;
191
char *p = page;
192
u64 halt_info_buffer[8];
193
pal_power_mgmt_info_u_t *halt_info =(pal_power_mgmt_info_u_t *)halt_info_buffer;
194
int i;
195
196
status = ia64_pal_halt_info(halt_info);
197
if (status != 0) return 0;
198
199
for (i=0; i < 8 ; i++ ) {
200
if (halt_info[i].pal_power_mgmt_info_s.im == 1) {
201
p += sprintf(p, "Power level %d:\n"
202
"\tentry_latency : %d cycles\n"
203
"\texit_latency : %d cycles\n"
204
"\tpower consumption : %d mW\n"
205
"\tCache+TLB coherency : %s\n", i,
206
halt_info[i].pal_power_mgmt_info_s.entry_latency,
207
halt_info[i].pal_power_mgmt_info_s.exit_latency,
208
halt_info[i].pal_power_mgmt_info_s.power_consumption,
209
halt_info[i].pal_power_mgmt_info_s.co ? "Yes" : "No");
210
} else {
211
p += sprintf(p,"Power level %d: not implemented\n",i);
212
}
213
}
214
return p - page;
215
}
216
217
static int
218
cache_info(char *page)
219
{
220
char *p = page;
221
unsigned long i, levels, unique_caches;
222
pal_cache_config_info_t cci;
223
int j, k;
224
long status;
225
226
if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
227
printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status);
228
return 0;
229
}
230
231
p += sprintf(p, "Cache levels : %ld\nUnique caches : %ld\n\n", levels, unique_caches);
232
233
for (i=0; i < levels; i++) {
234
235
for (j=2; j >0 ; j--) {
236
237
/* even without unification some level may not be present */
238
if ((status=ia64_pal_cache_config_info(i,j, &cci)) != 0) {
239
continue;
240
}
241
p += sprintf(p,
242
"%s Cache level %lu:\n"
243
"\tSize : %u bytes\n"
244
"\tAttributes : ",
245
cache_types[j+cci.pcci_unified], i+1,
246
cci.pcci_cache_size);
247
248
if (cci.pcci_unified) p += sprintf(p, "Unified ");
249
250
p += sprintf(p, "%s\n", cache_mattrib[cci.pcci_cache_attr]);
251
252
p += sprintf(p,
253
"\tAssociativity : %d\n"
254
"\tLine size : %d bytes\n"
255
"\tStride : %d bytes\n",
256
cci.pcci_assoc, 1<<cci.pcci_line_size, 1<<cci.pcci_stride);
257
if (j == 1)
258
p += sprintf(p, "\tStore latency : N/A\n");
259
else
260
p += sprintf(p, "\tStore latency : %d cycle(s)\n",
261
cci.pcci_st_latency);
262
263
p += sprintf(p,
264
"\tLoad latency : %d cycle(s)\n"
265
"\tStore hints : ", cci.pcci_ld_latency);
266
267
for(k=0; k < 8; k++ ) {
268
if ( cci.pcci_st_hints & 0x1)
269
p += sprintf(p, "[%s]", cache_st_hints[k]);
270
cci.pcci_st_hints >>=1;
271
}
272
p += sprintf(p, "\n\tLoad hints : ");
273
274
for(k=0; k < 8; k++ ) {
275
if (cci.pcci_ld_hints & 0x1)
276
p += sprintf(p, "[%s]", cache_ld_hints[k]);
277
cci.pcci_ld_hints >>=1;
278
}
279
p += sprintf(p,
280
"\n\tAlias boundary : %d byte(s)\n"
281
"\tTag LSB : %d\n"
282
"\tTag MSB : %d\n",
283
1<<cci.pcci_alias_boundary, cci.pcci_tag_lsb,
284
cci.pcci_tag_msb);
285
286
/* when unified, data(j=2) is enough */
287
if (cci.pcci_unified) break;
288
}
289
}
290
return p - page;
291
}
292
293
294
static int
295
vm_info(char *page)
296
{
297
char *p = page;
298
u64 tr_pages =0, vw_pages=0, tc_pages;
299
u64 attrib;
300
pal_vm_info_1_u_t vm_info_1;
301
pal_vm_info_2_u_t vm_info_2;
302
pal_tc_info_u_t tc_info;
303
ia64_ptce_info_t ptce;
304
const char *sep;
305
int i, j;
306
long status;
307
308
if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) {
309
printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
310
} else {
311
312
p += sprintf(p,
313
"Physical Address Space : %d bits\n"
314
"Virtual Address Space : %d bits\n"
315
"Protection Key Registers(PKR) : %d\n"
316
"Implemented bits in PKR.key : %d\n"
317
"Hash Tag ID : 0x%x\n"
318
"Size of RR.rid : %d\n"
319
"Max Purges : ",
320
vm_info_1.pal_vm_info_1_s.phys_add_size,
321
vm_info_2.pal_vm_info_2_s.impl_va_msb+1,
322
vm_info_1.pal_vm_info_1_s.max_pkr+1,
323
vm_info_1.pal_vm_info_1_s.key_size,
324
vm_info_1.pal_vm_info_1_s.hash_tag_id,
325
vm_info_2.pal_vm_info_2_s.rid_size);
326
if (vm_info_2.pal_vm_info_2_s.max_purges == PAL_MAX_PURGES)
327
p += sprintf(p, "unlimited\n");
328
else
329
p += sprintf(p, "%d\n",
330
vm_info_2.pal_vm_info_2_s.max_purges ?
331
vm_info_2.pal_vm_info_2_s.max_purges : 1);
332
}
333
334
if (ia64_pal_mem_attrib(&attrib) == 0) {
335
p += sprintf(p, "Supported memory attributes : ");
336
sep = "";
337
for (i = 0; i < 8; i++) {
338
if (attrib & (1 << i)) {
339
p += sprintf(p, "%s%s", sep, mem_attrib[i]);
340
sep = ", ";
341
}
342
}
343
p += sprintf(p, "\n");
344
}
345
346
if ((status = ia64_pal_vm_page_size(&tr_pages, &vw_pages)) !=0) {
347
printk(KERN_ERR "ia64_pal_vm_page_size=%ld\n", status);
348
} else {
349
350
p += sprintf(p,
351
"\nTLB walker : %simplemented\n"
352
"Number of DTR : %d\n"
353
"Number of ITR : %d\n"
354
"TLB insertable page sizes : ",
355
vm_info_1.pal_vm_info_1_s.vw ? "" : "not ",
356
vm_info_1.pal_vm_info_1_s.max_dtr_entry+1,
357
vm_info_1.pal_vm_info_1_s.max_itr_entry+1);
358
359
360
p = bitvector_process(p, tr_pages);
361
362
p += sprintf(p, "\nTLB purgeable page sizes : ");
363
364
p = bitvector_process(p, vw_pages);
365
}
366
if ((status=ia64_get_ptce(&ptce)) != 0) {
367
printk(KERN_ERR "ia64_get_ptce=%ld\n", status);
368
} else {
369
p += sprintf(p,
370
"\nPurge base address : 0x%016lx\n"
371
"Purge outer loop count : %d\n"
372
"Purge inner loop count : %d\n"
373
"Purge outer loop stride : %d\n"
374
"Purge inner loop stride : %d\n",
375
ptce.base, ptce.count[0], ptce.count[1],
376
ptce.stride[0], ptce.stride[1]);
377
378
p += sprintf(p,
379
"TC Levels : %d\n"
380
"Unique TC(s) : %d\n",
381
vm_info_1.pal_vm_info_1_s.num_tc_levels,
382
vm_info_1.pal_vm_info_1_s.max_unique_tcs);
383
384
for(i=0; i < vm_info_1.pal_vm_info_1_s.num_tc_levels; i++) {
385
for (j=2; j>0 ; j--) {
386
tc_pages = 0; /* just in case */
387
388
389
/* even without unification, some levels may not be present */
390
if ((status=ia64_pal_vm_info(i,j, &tc_info, &tc_pages)) != 0) {
391
continue;
392
}
393
394
p += sprintf(p,
395
"\n%s Translation Cache Level %d:\n"
396
"\tHash sets : %d\n"
397
"\tAssociativity : %d\n"
398
"\tNumber of entries : %d\n"
399
"\tFlags : ",
400
cache_types[j+tc_info.tc_unified], i+1,
401
tc_info.tc_num_sets,
402
tc_info.tc_associativity,
403
tc_info.tc_num_entries);
404
405
if (tc_info.tc_pf)
406
p += sprintf(p, "PreferredPageSizeOptimized ");
407
if (tc_info.tc_unified)
408
p += sprintf(p, "Unified ");
409
if (tc_info.tc_reduce_tr)
410
p += sprintf(p, "TCReduction");
411
412
p += sprintf(p, "\n\tSupported page sizes: ");
413
414
p = bitvector_process(p, tc_pages);
415
416
/* when unified date (j=2) is enough */
417
if (tc_info.tc_unified)
418
break;
419
}
420
}
421
}
422
p += sprintf(p, "\n");
423
424
return p - page;
425
}
426
427
428
static int
429
register_info(char *page)
430
{
431
char *p = page;
432
u64 reg_info[2];
433
u64 info;
434
unsigned long phys_stacked;
435
pal_hints_u_t hints;
436
unsigned long iregs, dregs;
437
static const char * const info_type[] = {
438
"Implemented AR(s)",
439
"AR(s) with read side-effects",
440
"Implemented CR(s)",
441
"CR(s) with read side-effects",
442
};
443
444
for(info=0; info < 4; info++) {
445
446
if (ia64_pal_register_info(info, &reg_info[0], &reg_info[1]) != 0) return 0;
447
448
p += sprintf(p, "%-32s : ", info_type[info]);
449
450
p = bitregister_process(p, reg_info, 128);
451
452
p += sprintf(p, "\n");
453
}
454
455
if (ia64_pal_rse_info(&phys_stacked, &hints) == 0) {
456
457
p += sprintf(p,
458
"RSE stacked physical registers : %ld\n"
459
"RSE load/store hints : %ld (%s)\n",
460
phys_stacked, hints.ph_data,
461
hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(??)");
462
}
463
if (ia64_pal_debug_info(&iregs, &dregs))
464
return 0;
465
466
p += sprintf(p,
467
"Instruction debug register pairs : %ld\n"
468
"Data debug register pairs : %ld\n", iregs, dregs);
469
470
return p - page;
471
}
472
473
static char *proc_features_0[]={ /* Feature set 0 */
474
NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
475
NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,
476
NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
477
NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,
478
"Unimplemented instruction address fault",
479
"INIT, PMI, and LINT pins",
480
"Simple unimplemented instr addresses",
481
"Variable P-state performance",
482
"Virtual machine features implemented",
483
"XIP,XPSR,XFS implemented",
484
"XR1-XR3 implemented",
485
"Disable dynamic predicate prediction",
486
"Disable processor physical number",
487
"Disable dynamic data cache prefetch",
488
"Disable dynamic inst cache prefetch",
489
"Disable dynamic branch prediction",
490
NULL, NULL, NULL, NULL,
491
"Disable P-states",
492
"Enable MCA on Data Poisoning",
493
"Enable vmsw instruction",
494
"Enable extern environmental notification",
495
"Disable BINIT on processor time-out",
496
"Disable dynamic power management (DPM)",
497
"Disable coherency",
498
"Disable cache",
499
"Enable CMCI promotion",
500
"Enable MCA to BINIT promotion",
501
"Enable MCA promotion",
502
"Enable BERR promotion"
503
};
504
505
static char *proc_features_16[]={ /* Feature set 16 */
506
"Disable ETM",
507
"Enable ETM",
508
"Enable MCA on half-way timer",
509
"Enable snoop WC",
510
NULL,
511
"Enable Fast Deferral",
512
"Disable MCA on memory aliasing",
513
"Enable RSB",
514
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
515
"DP system processor",
516
"Low Voltage",
517
"HT supported",
518
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
519
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
520
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
521
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
522
NULL, NULL, NULL, NULL, NULL
523
};
524
525
static char **proc_features[]={
526
proc_features_0,
527
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
528
NULL, NULL, NULL, NULL,
529
proc_features_16,
530
NULL, NULL, NULL, NULL,
531
};
532
533
static char * feature_set_info(char *page, u64 avail, u64 status, u64 control,
534
unsigned long set)
535
{
536
char *p = page;
537
char **vf, **v;
538
int i;
539
540
vf = v = proc_features[set];
541
for(i=0; i < 64; i++, avail >>=1, status >>=1, control >>=1) {
542
543
if (!(control)) /* No remaining bits set */
544
break;
545
if (!(avail & 0x1)) /* Print only bits that are available */
546
continue;
547
if (vf)
548
v = vf + i;
549
if ( v && *v ) {
550
p += sprintf(p, "%-40s : %s %s\n", *v,
551
avail & 0x1 ? (status & 0x1 ?
552
"On " : "Off"): "",
553
avail & 0x1 ? (control & 0x1 ?
554
"Ctrl" : "NoCtrl"): "");
555
} else {
556
p += sprintf(p, "Feature set %2ld bit %2d\t\t\t"
557
" : %s %s\n",
558
set, i,
559
avail & 0x1 ? (status & 0x1 ?
560
"On " : "Off"): "",
561
avail & 0x1 ? (control & 0x1 ?
562
"Ctrl" : "NoCtrl"): "");
563
}
564
}
565
return p;
566
}
567
568
static int
569
processor_info(char *page)
570
{
571
char *p = page;
572
u64 avail=1, status=1, control=1, feature_set=0;
573
s64 ret;
574
575
do {
576
ret = ia64_pal_proc_get_features(&avail, &status, &control,
577
feature_set);
578
if (ret < 0) {
579
return p - page;
580
}
581
if (ret == 1) {
582
feature_set++;
583
continue;
584
}
585
586
p = feature_set_info(p, avail, status, control, feature_set);
587
588
feature_set++;
589
} while(1);
590
591
return p - page;
592
}
593
594
static const char *bus_features[]={
595
NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
596
NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,
597
NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
598
NULL,NULL,
599
"Request Bus Parking",
600
"Bus Lock Mask",
601
"Enable Half Transfer",
602
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
603
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
604
NULL, NULL, NULL, NULL,
605
"Enable Cache Line Repl. Shared",
606
"Enable Cache Line Repl. Exclusive",
607
"Disable Transaction Queuing",
608
"Disable Response Error Checking",
609
"Disable Bus Error Checking",
610
"Disable Bus Requester Internal Error Signalling",
611
"Disable Bus Requester Error Signalling",
612
"Disable Bus Initialization Event Checking",
613
"Disable Bus Initialization Event Signalling",
614
"Disable Bus Address Error Checking",
615
"Disable Bus Address Error Signalling",
616
"Disable Bus Data Error Checking"
617
};
618
619
620
static int
621
bus_info(char *page)
622
{
623
char *p = page;
624
const char **v = bus_features;
625
pal_bus_features_u_t av, st, ct;
626
u64 avail, status, control;
627
int i;
628
s64 ret;
629
630
if ((ret=ia64_pal_bus_get_features(&av, &st, &ct)) != 0) return 0;
631
632
avail = av.pal_bus_features_val;
633
status = st.pal_bus_features_val;
634
control = ct.pal_bus_features_val;
635
636
for(i=0; i < 64; i++, v++, avail >>=1, status >>=1, control >>=1) {
637
if ( ! *v ) continue;
638
p += sprintf(p, "%-48s : %s%s %s\n", *v,
639
avail & 0x1 ? "" : "NotImpl",
640
avail & 0x1 ? (status & 0x1 ? "On" : "Off"): "",
641
avail & 0x1 ? (control & 0x1 ? "Ctrl" : "NoCtrl"): "");
642
}
643
return p - page;
644
}
645
646
static int
647
version_info(char *page)
648
{
649
pal_version_u_t min_ver, cur_ver;
650
char *p = page;
651
652
if (ia64_pal_version(&min_ver, &cur_ver) != 0)
653
return 0;
654
655
p += sprintf(p,
656
"PAL_vendor : 0x%02x (min=0x%02x)\n"
657
"PAL_A : %02x.%02x (min=%02x.%02x)\n"
658
"PAL_B : %02x.%02x (min=%02x.%02x)\n",
659
cur_ver.pal_version_s.pv_pal_vendor,
660
min_ver.pal_version_s.pv_pal_vendor,
661
cur_ver.pal_version_s.pv_pal_a_model,
662
cur_ver.pal_version_s.pv_pal_a_rev,
663
min_ver.pal_version_s.pv_pal_a_model,
664
min_ver.pal_version_s.pv_pal_a_rev,
665
cur_ver.pal_version_s.pv_pal_b_model,
666
cur_ver.pal_version_s.pv_pal_b_rev,
667
min_ver.pal_version_s.pv_pal_b_model,
668
min_ver.pal_version_s.pv_pal_b_rev);
669
return p - page;
670
}
671
672
static int
673
perfmon_info(char *page)
674
{
675
char *p = page;
676
u64 pm_buffer[16];
677
pal_perf_mon_info_u_t pm_info;
678
679
if (ia64_pal_perf_mon_info(pm_buffer, &pm_info) != 0) return 0;
680
681
p += sprintf(p,
682
"PMC/PMD pairs : %d\n"
683
"Counter width : %d bits\n"
684
"Cycle event number : %d\n"
685
"Retired event number : %d\n"
686
"Implemented PMC : ",
687
pm_info.pal_perf_mon_info_s.generic, pm_info.pal_perf_mon_info_s.width,
688
pm_info.pal_perf_mon_info_s.cycles, pm_info.pal_perf_mon_info_s.retired);
689
690
p = bitregister_process(p, pm_buffer, 256);
691
p += sprintf(p, "\nImplemented PMD : ");
692
p = bitregister_process(p, pm_buffer+4, 256);
693
p += sprintf(p, "\nCycles count capable : ");
694
p = bitregister_process(p, pm_buffer+8, 256);
695
p += sprintf(p, "\nRetired bundles count capable : ");
696
697
#ifdef CONFIG_ITANIUM
698
/*
699
* PAL_PERF_MON_INFO reports that only PMC4 can be used to count CPU_CYCLES
700
* which is wrong, both PMC4 and PMD5 support it.
701
*/
702
if (pm_buffer[12] == 0x10) pm_buffer[12]=0x30;
703
#endif
704
705
p = bitregister_process(p, pm_buffer+12, 256);
706
707
p += sprintf(p, "\n");
708
709
return p - page;
710
}
711
712
static int
713
frequency_info(char *page)
714
{
715
char *p = page;
716
struct pal_freq_ratio proc, itc, bus;
717
unsigned long base;
718
719
if (ia64_pal_freq_base(&base) == -1)
720
p += sprintf(p, "Output clock : not implemented\n");
721
else
722
p += sprintf(p, "Output clock : %ld ticks/s\n", base);
723
724
if (ia64_pal_freq_ratios(&proc, &bus, &itc) != 0) return 0;
725
726
p += sprintf(p,
727
"Processor/Clock ratio : %d/%d\n"
728
"Bus/Clock ratio : %d/%d\n"
729
"ITC/Clock ratio : %d/%d\n",
730
proc.num, proc.den, bus.num, bus.den, itc.num, itc.den);
731
732
return p - page;
733
}
734
735
static int
736
tr_info(char *page)
737
{
738
char *p = page;
739
long status;
740
pal_tr_valid_u_t tr_valid;
741
u64 tr_buffer[4];
742
pal_vm_info_1_u_t vm_info_1;
743
pal_vm_info_2_u_t vm_info_2;
744
unsigned long i, j;
745
unsigned long max[3], pgm;
746
struct ifa_reg {
747
unsigned long valid:1;
748
unsigned long ig:11;
749
unsigned long vpn:52;
750
} *ifa_reg;
751
struct itir_reg {
752
unsigned long rv1:2;
753
unsigned long ps:6;
754
unsigned long key:24;
755
unsigned long rv2:32;
756
} *itir_reg;
757
struct gr_reg {
758
unsigned long p:1;
759
unsigned long rv1:1;
760
unsigned long ma:3;
761
unsigned long a:1;
762
unsigned long d:1;
763
unsigned long pl:2;
764
unsigned long ar:3;
765
unsigned long ppn:38;
766
unsigned long rv2:2;
767
unsigned long ed:1;
768
unsigned long ig:11;
769
} *gr_reg;
770
struct rid_reg {
771
unsigned long ig1:1;
772
unsigned long rv1:1;
773
unsigned long ig2:6;
774
unsigned long rid:24;
775
unsigned long rv2:32;
776
} *rid_reg;
777
778
if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) {
779
printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
780
return 0;
781
}
782
max[0] = vm_info_1.pal_vm_info_1_s.max_itr_entry+1;
783
max[1] = vm_info_1.pal_vm_info_1_s.max_dtr_entry+1;
784
785
for (i=0; i < 2; i++ ) {
786
for (j=0; j < max[i]; j++) {
787
788
status = ia64_pal_tr_read(j, i, tr_buffer, &tr_valid);
789
if (status != 0) {
790
printk(KERN_ERR "palinfo: pal call failed on tr[%lu:%lu]=%ld\n",
791
i, j, status);
792
continue;
793
}
794
795
ifa_reg = (struct ifa_reg *)&tr_buffer[2];
796
797
if (ifa_reg->valid == 0) continue;
798
799
gr_reg = (struct gr_reg *)tr_buffer;
800
itir_reg = (struct itir_reg *)&tr_buffer[1];
801
rid_reg = (struct rid_reg *)&tr_buffer[3];
802
803
pgm = -1 << (itir_reg->ps - 12);
804
p += sprintf(p,
805
"%cTR%lu: av=%d pv=%d dv=%d mv=%d\n"
806
"\tppn : 0x%lx\n"
807
"\tvpn : 0x%lx\n"
808
"\tps : ",
809
"ID"[i], j,
810
tr_valid.pal_tr_valid_s.access_rights_valid,
811
tr_valid.pal_tr_valid_s.priv_level_valid,
812
tr_valid.pal_tr_valid_s.dirty_bit_valid,
813
tr_valid.pal_tr_valid_s.mem_attr_valid,
814
(gr_reg->ppn & pgm)<< 12, (ifa_reg->vpn & pgm)<< 12);
815
816
p = bitvector_process(p, 1<< itir_reg->ps);
817
818
p += sprintf(p,
819
"\n\tpl : %d\n"
820
"\tar : %d\n"
821
"\trid : %x\n"
822
"\tp : %d\n"
823
"\tma : %d\n"
824
"\td : %d\n",
825
gr_reg->pl, gr_reg->ar, rid_reg->rid, gr_reg->p, gr_reg->ma,
826
gr_reg->d);
827
}
828
}
829
return p - page;
830
}
831
832
833
834
/*
835
* List {name,function} pairs for every entry in /proc/palinfo/cpu*
836
*/
837
static palinfo_entry_t palinfo_entries[]={
838
{ "version_info", version_info, },
839
{ "vm_info", vm_info, },
840
{ "cache_info", cache_info, },
841
{ "power_info", power_info, },
842
{ "register_info", register_info, },
843
{ "processor_info", processor_info, },
844
{ "perfmon_info", perfmon_info, },
845
{ "frequency_info", frequency_info, },
846
{ "bus_info", bus_info },
847
{ "tr_info", tr_info, }
848
};
849
850
#define NR_PALINFO_ENTRIES (int) ARRAY_SIZE(palinfo_entries)
851
852
/*
853
* this array is used to keep track of the proc entries we create. This is
854
* required in the module mode when we need to remove all entries. The procfs code
855
* does not do recursion of deletion
856
*
857
* Notes:
858
* - +1 accounts for the cpuN directory entry in /proc/pal
859
*/
860
#define NR_PALINFO_PROC_ENTRIES (NR_CPUS*(NR_PALINFO_ENTRIES+1))
861
862
static struct proc_dir_entry *palinfo_proc_entries[NR_PALINFO_PROC_ENTRIES];
863
static struct proc_dir_entry *palinfo_dir;
864
865
/*
866
* This data structure is used to pass which cpu,function is being requested
867
* It must fit in a 64bit quantity to be passed to the proc callback routine
868
*
869
* In SMP mode, when we get a request for another CPU, we must call that
870
* other CPU using IPI and wait for the result before returning.
871
*/
872
typedef union {
873
u64 value;
874
struct {
875
unsigned req_cpu: 32; /* for which CPU this info is */
876
unsigned func_id: 32; /* which function is requested */
877
} pal_func_cpu;
878
} pal_func_cpu_u_t;
879
880
#define req_cpu pal_func_cpu.req_cpu
881
#define func_id pal_func_cpu.func_id
882
883
#ifdef CONFIG_SMP
884
885
/*
886
* used to hold information about final function to call
887
*/
888
typedef struct {
889
palinfo_func_t func; /* pointer to function to call */
890
char *page; /* buffer to store results */
891
int ret; /* return value from call */
892
} palinfo_smp_data_t;
893
894
895
/*
896
* this function does the actual final call and he called
897
* from the smp code, i.e., this is the palinfo callback routine
898
*/
899
static void
900
palinfo_smp_call(void *info)
901
{
902
palinfo_smp_data_t *data = (palinfo_smp_data_t *)info;
903
data->ret = (*data->func)(data->page);
904
}
905
906
/*
907
* function called to trigger the IPI, we need to access a remote CPU
908
* Return:
909
* 0 : error or nothing to output
910
* otherwise how many bytes in the "page" buffer were written
911
*/
912
static
913
int palinfo_handle_smp(pal_func_cpu_u_t *f, char *page)
914
{
915
palinfo_smp_data_t ptr;
916
int ret;
917
918
ptr.func = palinfo_entries[f->func_id].proc_read;
919
ptr.page = page;
920
ptr.ret = 0; /* just in case */
921
922
923
/* will send IPI to other CPU and wait for completion of remote call */
924
if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 1))) {
925
printk(KERN_ERR "palinfo: remote CPU call from %d to %d on function %d: "
926
"error %d\n", smp_processor_id(), f->req_cpu, f->func_id, ret);
927
return 0;
928
}
929
return ptr.ret;
930
}
931
#else /* ! CONFIG_SMP */
932
static
933
int palinfo_handle_smp(pal_func_cpu_u_t *f, char *page)
934
{
935
printk(KERN_ERR "palinfo: should not be called with non SMP kernel\n");
936
return 0;
937
}
938
#endif /* CONFIG_SMP */
939
940
/*
941
* Entry point routine: all calls go through this function
942
*/
943
static int
944
palinfo_read_entry(char *page, char **start, off_t off, int count, int *eof, void *data)
945
{
946
int len=0;
947
pal_func_cpu_u_t *f = (pal_func_cpu_u_t *)&data;
948
949
/*
950
* in SMP mode, we may need to call another CPU to get correct
951
* information. PAL, by definition, is processor specific
952
*/
953
if (f->req_cpu == get_cpu())
954
len = (*palinfo_entries[f->func_id].proc_read)(page);
955
else
956
len = palinfo_handle_smp(f, page);
957
958
put_cpu();
959
960
if (len <= off+count) *eof = 1;
961
962
*start = page + off;
963
len -= off;
964
965
if (len>count) len = count;
966
if (len<0) len = 0;
967
968
return len;
969
}
970
971
static void __cpuinit
972
create_palinfo_proc_entries(unsigned int cpu)
973
{
974
# define CPUSTR "cpu%d"
975
976
pal_func_cpu_u_t f;
977
struct proc_dir_entry **pdir;
978
struct proc_dir_entry *cpu_dir;
979
int j;
980
char cpustr[sizeof(CPUSTR)];
981
982
983
/*
984
* we keep track of created entries in a depth-first order for
985
* cleanup purposes. Each entry is stored into palinfo_proc_entries
986
*/
987
sprintf(cpustr,CPUSTR, cpu);
988
989
cpu_dir = proc_mkdir(cpustr, palinfo_dir);
990
991
f.req_cpu = cpu;
992
993
/*
994
* Compute the location to store per cpu entries
995
* We dont store the top level entry in this list, but
996
* remove it finally after removing all cpu entries.
997
*/
998
pdir = &palinfo_proc_entries[cpu*(NR_PALINFO_ENTRIES+1)];
999
*pdir++ = cpu_dir;
1000
for (j=0; j < NR_PALINFO_ENTRIES; j++) {
1001
f.func_id = j;
1002
*pdir = create_proc_read_entry(
1003
palinfo_entries[j].name, 0, cpu_dir,
1004
palinfo_read_entry, (void *)f.value);
1005
pdir++;
1006
}
1007
}
1008
1009
static void
1010
remove_palinfo_proc_entries(unsigned int hcpu)
1011
{
1012
int j;
1013
struct proc_dir_entry *cpu_dir, **pdir;
1014
1015
pdir = &palinfo_proc_entries[hcpu*(NR_PALINFO_ENTRIES+1)];
1016
cpu_dir = *pdir;
1017
*pdir++=NULL;
1018
for (j=0; j < (NR_PALINFO_ENTRIES); j++) {
1019
if ((*pdir)) {
1020
remove_proc_entry ((*pdir)->name, cpu_dir);
1021
*pdir ++= NULL;
1022
}
1023
}
1024
1025
if (cpu_dir) {
1026
remove_proc_entry(cpu_dir->name, palinfo_dir);
1027
}
1028
}
1029
1030
static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
1031
unsigned long action, void *hcpu)
1032
{
1033
unsigned int hotcpu = (unsigned long)hcpu;
1034
1035
switch (action) {
1036
case CPU_ONLINE:
1037
case CPU_ONLINE_FROZEN:
1038
create_palinfo_proc_entries(hotcpu);
1039
break;
1040
case CPU_DEAD:
1041
case CPU_DEAD_FROZEN:
1042
remove_palinfo_proc_entries(hotcpu);
1043
break;
1044
}
1045
return NOTIFY_OK;
1046
}
1047
1048
static struct notifier_block __refdata palinfo_cpu_notifier =
1049
{
1050
.notifier_call = palinfo_cpu_callback,
1051
.priority = 0,
1052
};
1053
1054
static int __init
1055
palinfo_init(void)
1056
{
1057
int i = 0;
1058
1059
printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION);
1060
palinfo_dir = proc_mkdir("pal", NULL);
1061
1062
/* Create palinfo dirs in /proc for all online cpus */
1063
for_each_online_cpu(i) {
1064
create_palinfo_proc_entries(i);
1065
}
1066
1067
/* Register for future delivery via notify registration */
1068
register_hotcpu_notifier(&palinfo_cpu_notifier);
1069
1070
return 0;
1071
}
1072
1073
static void __exit
1074
palinfo_exit(void)
1075
{
1076
int i = 0;
1077
1078
/* remove all nodes: depth first pass. Could optimize this */
1079
for_each_online_cpu(i) {
1080
remove_palinfo_proc_entries(i);
1081
}
1082
1083
/*
1084
* Remove the top level entry finally
1085
*/
1086
remove_proc_entry(palinfo_dir->name, NULL);
1087
1088
/*
1089
* Unregister from cpu notifier callbacks
1090
*/
1091
unregister_hotcpu_notifier(&palinfo_cpu_notifier);
1092
}
1093
1094
module_init(palinfo_init);
1095
module_exit(palinfo_exit);
1096
1097