Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/kernel/locking/lockdep_proc.c
25923 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* kernel/lockdep_proc.c
4
*
5
* Runtime locking correctness validator
6
*
7
* Started by Ingo Molnar:
8
*
9
* Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <[email protected]>
10
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
11
*
12
* Code for /proc/lockdep and /proc/lockdep_stats:
13
*
14
*/
15
#include <linux/export.h>
16
#include <linux/proc_fs.h>
17
#include <linux/seq_file.h>
18
#include <linux/kallsyms.h>
19
#include <linux/debug_locks.h>
20
#include <linux/vmalloc.h>
21
#include <linux/sort.h>
22
#include <linux/uaccess.h>
23
#include <asm/div64.h>
24
25
#include "lockdep_internals.h"
26
27
/*
28
* Since iteration of lock_classes is done without holding the lockdep lock,
29
* it is not safe to iterate all_lock_classes list directly as the iteration
30
* may branch off to free_lock_classes or the zapped list. Iteration is done
31
* directly on the lock_classes array by checking the lock_classes_in_use
32
* bitmap and max_lock_class_idx.
33
*/
34
#define iterate_lock_classes(idx, class) \
35
for (idx = 0, class = lock_classes; idx <= max_lock_class_idx; \
36
idx++, class++)
37
38
static void *l_next(struct seq_file *m, void *v, loff_t *pos)
39
{
40
struct lock_class *class = v;
41
42
++class;
43
*pos = class - lock_classes;
44
return (*pos > max_lock_class_idx) ? NULL : class;
45
}
46
47
static void *l_start(struct seq_file *m, loff_t *pos)
48
{
49
unsigned long idx = *pos;
50
51
if (idx > max_lock_class_idx)
52
return NULL;
53
return lock_classes + idx;
54
}
55
56
static void l_stop(struct seq_file *m, void *v)
57
{
58
}
59
60
static void print_name(struct seq_file *m, struct lock_class *class)
61
{
62
char str[KSYM_NAME_LEN];
63
const char *name = class->name;
64
65
if (!name) {
66
name = __get_key_name(class->key, str);
67
seq_printf(m, "%s", name);
68
} else{
69
seq_printf(m, "%s", name);
70
if (class->name_version > 1)
71
seq_printf(m, "#%d", class->name_version);
72
if (class->subclass)
73
seq_printf(m, "/%d", class->subclass);
74
}
75
}
76
77
static int l_show(struct seq_file *m, void *v)
78
{
79
struct lock_class *class = v;
80
struct lock_list *entry;
81
char usage[LOCK_USAGE_CHARS];
82
int idx = class - lock_classes;
83
84
if (v == lock_classes)
85
seq_printf(m, "all lock classes:\n");
86
87
if (!test_bit(idx, lock_classes_in_use))
88
return 0;
89
90
seq_printf(m, "%p", class->key);
91
#ifdef CONFIG_DEBUG_LOCKDEP
92
seq_printf(m, " OPS:%8ld", debug_class_ops_read(class));
93
#endif
94
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
95
seq_printf(m, " FD:%5ld", lockdep_count_forward_deps(class));
96
seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class));
97
98
get_usage_chars(class, usage);
99
seq_printf(m, " %s", usage);
100
}
101
102
seq_printf(m, ": ");
103
print_name(m, class);
104
seq_puts(m, "\n");
105
106
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
107
list_for_each_entry(entry, &class->locks_after, entry) {
108
if (entry->distance == 1) {
109
seq_printf(m, " -> [%p] ", entry->class->key);
110
print_name(m, entry->class);
111
seq_puts(m, "\n");
112
}
113
}
114
seq_puts(m, "\n");
115
}
116
117
return 0;
118
}
119
120
static const struct seq_operations lockdep_ops = {
121
.start = l_start,
122
.next = l_next,
123
.stop = l_stop,
124
.show = l_show,
125
};
126
127
#ifdef CONFIG_PROVE_LOCKING
128
static void *lc_start(struct seq_file *m, loff_t *pos)
129
{
130
if (*pos < 0)
131
return NULL;
132
133
if (*pos == 0)
134
return SEQ_START_TOKEN;
135
136
return lock_chains + (*pos - 1);
137
}
138
139
static void *lc_next(struct seq_file *m, void *v, loff_t *pos)
140
{
141
*pos = lockdep_next_lockchain(*pos - 1) + 1;
142
return lc_start(m, pos);
143
}
144
145
static void lc_stop(struct seq_file *m, void *v)
146
{
147
}
148
149
static int lc_show(struct seq_file *m, void *v)
150
{
151
struct lock_chain *chain = v;
152
struct lock_class *class;
153
int i;
154
static const char * const irq_strs[] = {
155
[0] = "0",
156
[LOCK_CHAIN_HARDIRQ_CONTEXT] = "hardirq",
157
[LOCK_CHAIN_SOFTIRQ_CONTEXT] = "softirq",
158
[LOCK_CHAIN_SOFTIRQ_CONTEXT|
159
LOCK_CHAIN_HARDIRQ_CONTEXT] = "hardirq|softirq",
160
};
161
162
if (v == SEQ_START_TOKEN) {
163
if (!nr_free_chain_hlocks)
164
seq_printf(m, "(buggered) ");
165
seq_printf(m, "all lock chains:\n");
166
return 0;
167
}
168
169
seq_printf(m, "irq_context: %s\n", irq_strs[chain->irq_context]);
170
171
for (i = 0; i < chain->depth; i++) {
172
class = lock_chain_get_class(chain, i);
173
if (!class->key)
174
continue;
175
176
seq_printf(m, "[%p] ", class->key);
177
print_name(m, class);
178
seq_puts(m, "\n");
179
}
180
seq_puts(m, "\n");
181
182
return 0;
183
}
184
185
static const struct seq_operations lockdep_chains_ops = {
186
.start = lc_start,
187
.next = lc_next,
188
.stop = lc_stop,
189
.show = lc_show,
190
};
191
#endif /* CONFIG_PROVE_LOCKING */
192
193
static void lockdep_stats_debug_show(struct seq_file *m)
194
{
195
#ifdef CONFIG_DEBUG_LOCKDEP
196
unsigned long long hi1 = debug_atomic_read(hardirqs_on_events),
197
hi2 = debug_atomic_read(hardirqs_off_events),
198
hr1 = debug_atomic_read(redundant_hardirqs_on),
199
hr2 = debug_atomic_read(redundant_hardirqs_off),
200
si1 = debug_atomic_read(softirqs_on_events),
201
si2 = debug_atomic_read(softirqs_off_events),
202
sr1 = debug_atomic_read(redundant_softirqs_on),
203
sr2 = debug_atomic_read(redundant_softirqs_off);
204
205
seq_printf(m, " chain lookup misses: %11llu\n",
206
debug_atomic_read(chain_lookup_misses));
207
seq_printf(m, " chain lookup hits: %11llu\n",
208
debug_atomic_read(chain_lookup_hits));
209
seq_printf(m, " cyclic checks: %11llu\n",
210
debug_atomic_read(nr_cyclic_checks));
211
seq_printf(m, " redundant checks: %11llu\n",
212
debug_atomic_read(nr_redundant_checks));
213
seq_printf(m, " redundant links: %11llu\n",
214
debug_atomic_read(nr_redundant));
215
seq_printf(m, " find-mask forwards checks: %11llu\n",
216
debug_atomic_read(nr_find_usage_forwards_checks));
217
seq_printf(m, " find-mask backwards checks: %11llu\n",
218
debug_atomic_read(nr_find_usage_backwards_checks));
219
220
seq_printf(m, " hardirq on events: %11llu\n", hi1);
221
seq_printf(m, " hardirq off events: %11llu\n", hi2);
222
seq_printf(m, " redundant hardirq ons: %11llu\n", hr1);
223
seq_printf(m, " redundant hardirq offs: %11llu\n", hr2);
224
seq_printf(m, " softirq on events: %11llu\n", si1);
225
seq_printf(m, " softirq off events: %11llu\n", si2);
226
seq_printf(m, " redundant softirq ons: %11llu\n", sr1);
227
seq_printf(m, " redundant softirq offs: %11llu\n", sr2);
228
#endif
229
}
230
231
static int lockdep_stats_show(struct seq_file *m, void *v)
232
{
233
unsigned long nr_unused = 0, nr_uncategorized = 0,
234
nr_irq_safe = 0, nr_irq_unsafe = 0,
235
nr_softirq_safe = 0, nr_softirq_unsafe = 0,
236
nr_hardirq_safe = 0, nr_hardirq_unsafe = 0,
237
nr_irq_read_safe = 0, nr_irq_read_unsafe = 0,
238
nr_softirq_read_safe = 0, nr_softirq_read_unsafe = 0,
239
nr_hardirq_read_safe = 0, nr_hardirq_read_unsafe = 0,
240
sum_forward_deps = 0;
241
242
#ifdef CONFIG_PROVE_LOCKING
243
struct lock_class *class;
244
unsigned long idx;
245
246
iterate_lock_classes(idx, class) {
247
if (!test_bit(idx, lock_classes_in_use))
248
continue;
249
250
if (class->usage_mask == 0)
251
nr_unused++;
252
if (class->usage_mask == LOCKF_USED)
253
nr_uncategorized++;
254
if (class->usage_mask & LOCKF_USED_IN_IRQ)
255
nr_irq_safe++;
256
if (class->usage_mask & LOCKF_ENABLED_IRQ)
257
nr_irq_unsafe++;
258
if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
259
nr_softirq_safe++;
260
if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ)
261
nr_softirq_unsafe++;
262
if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
263
nr_hardirq_safe++;
264
if (class->usage_mask & LOCKF_ENABLED_HARDIRQ)
265
nr_hardirq_unsafe++;
266
if (class->usage_mask & LOCKF_USED_IN_IRQ_READ)
267
nr_irq_read_safe++;
268
if (class->usage_mask & LOCKF_ENABLED_IRQ_READ)
269
nr_irq_read_unsafe++;
270
if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ)
271
nr_softirq_read_safe++;
272
if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ_READ)
273
nr_softirq_read_unsafe++;
274
if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ)
275
nr_hardirq_read_safe++;
276
if (class->usage_mask & LOCKF_ENABLED_HARDIRQ_READ)
277
nr_hardirq_read_unsafe++;
278
279
sum_forward_deps += lockdep_count_forward_deps(class);
280
}
281
282
#ifdef CONFIG_DEBUG_LOCKDEP
283
DEBUG_LOCKS_WARN_ON(debug_atomic_read(nr_unused_locks) != nr_unused);
284
#endif
285
286
#endif
287
seq_printf(m, " lock-classes: %11lu [max: %lu]\n",
288
nr_lock_classes, MAX_LOCKDEP_KEYS);
289
seq_printf(m, " dynamic-keys: %11lu\n",
290
nr_dynamic_keys);
291
seq_printf(m, " direct dependencies: %11lu [max: %lu]\n",
292
nr_list_entries, MAX_LOCKDEP_ENTRIES);
293
seq_printf(m, " indirect dependencies: %11lu\n",
294
sum_forward_deps);
295
296
/*
297
* Total number of dependencies:
298
*
299
* All irq-safe locks may nest inside irq-unsafe locks,
300
* plus all the other known dependencies:
301
*/
302
seq_printf(m, " all direct dependencies: %11lu\n",
303
nr_irq_unsafe * nr_irq_safe +
304
nr_hardirq_unsafe * nr_hardirq_safe +
305
nr_list_entries);
306
307
#ifdef CONFIG_PROVE_LOCKING
308
seq_printf(m, " dependency chains: %11lu [max: %lu]\n",
309
lock_chain_count(), MAX_LOCKDEP_CHAINS);
310
seq_printf(m, " dependency chain hlocks used: %11lu [max: %lu]\n",
311
MAX_LOCKDEP_CHAIN_HLOCKS -
312
(nr_free_chain_hlocks + nr_lost_chain_hlocks),
313
MAX_LOCKDEP_CHAIN_HLOCKS);
314
seq_printf(m, " dependency chain hlocks lost: %11u\n",
315
nr_lost_chain_hlocks);
316
#endif
317
318
#ifdef CONFIG_TRACE_IRQFLAGS
319
seq_printf(m, " in-hardirq chains: %11u\n",
320
nr_hardirq_chains);
321
seq_printf(m, " in-softirq chains: %11u\n",
322
nr_softirq_chains);
323
#endif
324
seq_printf(m, " in-process chains: %11u\n",
325
nr_process_chains);
326
seq_printf(m, " stack-trace entries: %11lu [max: %lu]\n",
327
nr_stack_trace_entries, MAX_STACK_TRACE_ENTRIES);
328
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
329
seq_printf(m, " number of stack traces: %11llu\n",
330
lockdep_stack_trace_count());
331
seq_printf(m, " number of stack hash chains: %11llu\n",
332
lockdep_stack_hash_count());
333
#endif
334
seq_printf(m, " combined max dependencies: %11u\n",
335
(nr_hardirq_chains + 1) *
336
(nr_softirq_chains + 1) *
337
(nr_process_chains + 1)
338
);
339
seq_printf(m, " hardirq-safe locks: %11lu\n",
340
nr_hardirq_safe);
341
seq_printf(m, " hardirq-unsafe locks: %11lu\n",
342
nr_hardirq_unsafe);
343
seq_printf(m, " softirq-safe locks: %11lu\n",
344
nr_softirq_safe);
345
seq_printf(m, " softirq-unsafe locks: %11lu\n",
346
nr_softirq_unsafe);
347
seq_printf(m, " irq-safe locks: %11lu\n",
348
nr_irq_safe);
349
seq_printf(m, " irq-unsafe locks: %11lu\n",
350
nr_irq_unsafe);
351
352
seq_printf(m, " hardirq-read-safe locks: %11lu\n",
353
nr_hardirq_read_safe);
354
seq_printf(m, " hardirq-read-unsafe locks: %11lu\n",
355
nr_hardirq_read_unsafe);
356
seq_printf(m, " softirq-read-safe locks: %11lu\n",
357
nr_softirq_read_safe);
358
seq_printf(m, " softirq-read-unsafe locks: %11lu\n",
359
nr_softirq_read_unsafe);
360
seq_printf(m, " irq-read-safe locks: %11lu\n",
361
nr_irq_read_safe);
362
seq_printf(m, " irq-read-unsafe locks: %11lu\n",
363
nr_irq_read_unsafe);
364
365
seq_printf(m, " uncategorized locks: %11lu\n",
366
nr_uncategorized);
367
seq_printf(m, " unused locks: %11lu\n",
368
nr_unused);
369
seq_printf(m, " max locking depth: %11u\n",
370
max_lockdep_depth);
371
#ifdef CONFIG_PROVE_LOCKING
372
seq_printf(m, " max bfs queue depth: %11u\n",
373
max_bfs_queue_depth);
374
#endif
375
seq_printf(m, " max lock class index: %11lu\n",
376
max_lock_class_idx);
377
lockdep_stats_debug_show(m);
378
seq_printf(m, " debug_locks: %11u\n",
379
debug_locks);
380
381
/*
382
* Zapped classes and lockdep data buffers reuse statistics.
383
*/
384
seq_puts(m, "\n");
385
seq_printf(m, " zapped classes: %11lu\n",
386
nr_zapped_classes);
387
#ifdef CONFIG_PROVE_LOCKING
388
seq_printf(m, " zapped lock chains: %11lu\n",
389
nr_zapped_lock_chains);
390
seq_printf(m, " large chain blocks: %11u\n",
391
nr_large_chain_blocks);
392
#endif
393
return 0;
394
}
395
396
#ifdef CONFIG_LOCK_STAT
397
398
struct lock_stat_data {
399
struct lock_class *class;
400
struct lock_class_stats stats;
401
};
402
403
struct lock_stat_seq {
404
struct lock_stat_data *iter_end;
405
struct lock_stat_data stats[MAX_LOCKDEP_KEYS];
406
};
407
408
/*
409
* sort on absolute number of contentions
410
*/
411
static int lock_stat_cmp(const void *l, const void *r)
412
{
413
const struct lock_stat_data *dl = l, *dr = r;
414
unsigned long nl, nr;
415
416
nl = dl->stats.read_waittime.nr + dl->stats.write_waittime.nr;
417
nr = dr->stats.read_waittime.nr + dr->stats.write_waittime.nr;
418
419
return nr - nl;
420
}
421
422
static void seq_line(struct seq_file *m, char c, int offset, int length)
423
{
424
int i;
425
426
for (i = 0; i < offset; i++)
427
seq_puts(m, " ");
428
for (i = 0; i < length; i++)
429
seq_putc(m, c);
430
seq_puts(m, "\n");
431
}
432
433
static void snprint_time(char *buf, size_t bufsiz, s64 nr)
434
{
435
s64 div;
436
s32 rem;
437
438
nr += 5; /* for display rounding */
439
div = div_s64_rem(nr, 1000, &rem);
440
snprintf(buf, bufsiz, "%lld.%02d", (long long)div, (int)rem/10);
441
}
442
443
static void seq_time(struct seq_file *m, s64 time)
444
{
445
char num[22];
446
447
snprint_time(num, sizeof(num), time);
448
seq_printf(m, " %14s", num);
449
}
450
451
static void seq_lock_time(struct seq_file *m, struct lock_time *lt)
452
{
453
seq_printf(m, "%14lu", lt->nr);
454
seq_time(m, lt->min);
455
seq_time(m, lt->max);
456
seq_time(m, lt->total);
457
seq_time(m, lt->nr ? div64_u64(lt->total, lt->nr) : 0);
458
}
459
460
static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
461
{
462
const struct lockdep_subclass_key *ckey;
463
struct lock_class_stats *stats;
464
struct lock_class *class;
465
const char *cname;
466
int i, namelen;
467
char name[39];
468
469
class = data->class;
470
stats = &data->stats;
471
472
namelen = 38;
473
if (class->name_version > 1)
474
namelen -= 2; /* XXX truncates versions > 9 */
475
if (class->subclass)
476
namelen -= 2;
477
478
rcu_read_lock_sched();
479
cname = rcu_dereference_sched(class->name);
480
ckey = rcu_dereference_sched(class->key);
481
482
if (!cname && !ckey) {
483
rcu_read_unlock_sched();
484
return;
485
486
} else if (!cname) {
487
char str[KSYM_NAME_LEN];
488
const char *key_name;
489
490
key_name = __get_key_name(ckey, str);
491
snprintf(name, namelen, "%s", key_name);
492
} else {
493
snprintf(name, namelen, "%s", cname);
494
}
495
rcu_read_unlock_sched();
496
497
namelen = strlen(name);
498
if (class->name_version > 1) {
499
snprintf(name+namelen, 3, "#%d", class->name_version);
500
namelen += 2;
501
}
502
if (class->subclass) {
503
snprintf(name+namelen, 3, "/%d", class->subclass);
504
namelen += 2;
505
}
506
507
if (stats->write_holdtime.nr) {
508
if (stats->read_holdtime.nr)
509
seq_printf(m, "%38s-W:", name);
510
else
511
seq_printf(m, "%40s:", name);
512
513
seq_printf(m, "%14lu ", stats->bounces[bounce_contended_write]);
514
seq_lock_time(m, &stats->write_waittime);
515
seq_printf(m, " %14lu ", stats->bounces[bounce_acquired_write]);
516
seq_lock_time(m, &stats->write_holdtime);
517
seq_puts(m, "\n");
518
}
519
520
if (stats->read_holdtime.nr) {
521
seq_printf(m, "%38s-R:", name);
522
seq_printf(m, "%14lu ", stats->bounces[bounce_contended_read]);
523
seq_lock_time(m, &stats->read_waittime);
524
seq_printf(m, " %14lu ", stats->bounces[bounce_acquired_read]);
525
seq_lock_time(m, &stats->read_holdtime);
526
seq_puts(m, "\n");
527
}
528
529
if (stats->read_waittime.nr + stats->write_waittime.nr == 0)
530
return;
531
532
if (stats->read_holdtime.nr)
533
namelen += 2;
534
535
for (i = 0; i < LOCKSTAT_POINTS; i++) {
536
char ip[32];
537
538
if (class->contention_point[i] == 0)
539
break;
540
541
if (!i)
542
seq_line(m, '-', 40-namelen, namelen);
543
544
snprintf(ip, sizeof(ip), "[<%p>]",
545
(void *)class->contention_point[i]);
546
seq_printf(m, "%40s %14lu %29s %pS\n",
547
name, stats->contention_point[i],
548
ip, (void *)class->contention_point[i]);
549
}
550
for (i = 0; i < LOCKSTAT_POINTS; i++) {
551
char ip[32];
552
553
if (class->contending_point[i] == 0)
554
break;
555
556
if (!i)
557
seq_line(m, '-', 40-namelen, namelen);
558
559
snprintf(ip, sizeof(ip), "[<%p>]",
560
(void *)class->contending_point[i]);
561
seq_printf(m, "%40s %14lu %29s %pS\n",
562
name, stats->contending_point[i],
563
ip, (void *)class->contending_point[i]);
564
}
565
if (i) {
566
seq_puts(m, "\n");
567
seq_line(m, '.', 0, 40 + 1 + 12 * (14 + 1));
568
seq_puts(m, "\n");
569
}
570
}
571
572
static void seq_header(struct seq_file *m)
573
{
574
seq_puts(m, "lock_stat version 0.4\n");
575
576
if (unlikely(!debug_locks))
577
seq_printf(m, "*WARNING* lock debugging disabled!! - possibly due to a lockdep warning\n");
578
579
seq_line(m, '-', 0, 40 + 1 + 12 * (14 + 1));
580
seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s %14s %14s "
581
"%14s %14s\n",
582
"class name",
583
"con-bounces",
584
"contentions",
585
"waittime-min",
586
"waittime-max",
587
"waittime-total",
588
"waittime-avg",
589
"acq-bounces",
590
"acquisitions",
591
"holdtime-min",
592
"holdtime-max",
593
"holdtime-total",
594
"holdtime-avg");
595
seq_line(m, '-', 0, 40 + 1 + 12 * (14 + 1));
596
seq_printf(m, "\n");
597
}
598
599
static void *ls_start(struct seq_file *m, loff_t *pos)
600
{
601
struct lock_stat_seq *data = m->private;
602
struct lock_stat_data *iter;
603
604
if (*pos == 0)
605
return SEQ_START_TOKEN;
606
607
iter = data->stats + (*pos - 1);
608
if (iter >= data->iter_end)
609
iter = NULL;
610
611
return iter;
612
}
613
614
static void *ls_next(struct seq_file *m, void *v, loff_t *pos)
615
{
616
(*pos)++;
617
return ls_start(m, pos);
618
}
619
620
static void ls_stop(struct seq_file *m, void *v)
621
{
622
}
623
624
static int ls_show(struct seq_file *m, void *v)
625
{
626
if (v == SEQ_START_TOKEN)
627
seq_header(m);
628
else
629
seq_stats(m, v);
630
631
return 0;
632
}
633
634
static const struct seq_operations lockstat_ops = {
635
.start = ls_start,
636
.next = ls_next,
637
.stop = ls_stop,
638
.show = ls_show,
639
};
640
641
static int lock_stat_open(struct inode *inode, struct file *file)
642
{
643
int res;
644
struct lock_class *class;
645
struct lock_stat_seq *data = vmalloc(sizeof(struct lock_stat_seq));
646
647
if (!data)
648
return -ENOMEM;
649
650
res = seq_open(file, &lockstat_ops);
651
if (!res) {
652
struct lock_stat_data *iter = data->stats;
653
struct seq_file *m = file->private_data;
654
unsigned long idx;
655
656
iterate_lock_classes(idx, class) {
657
if (!test_bit(idx, lock_classes_in_use))
658
continue;
659
iter->class = class;
660
lock_stats(class, &iter->stats);
661
iter++;
662
}
663
664
data->iter_end = iter;
665
666
sort(data->stats, data->iter_end - data->stats,
667
sizeof(struct lock_stat_data),
668
lock_stat_cmp, NULL);
669
670
m->private = data;
671
} else
672
vfree(data);
673
674
return res;
675
}
676
677
static ssize_t lock_stat_write(struct file *file, const char __user *buf,
678
size_t count, loff_t *ppos)
679
{
680
struct lock_class *class;
681
unsigned long idx;
682
char c;
683
684
if (count) {
685
if (get_user(c, buf))
686
return -EFAULT;
687
688
if (c != '0')
689
return count;
690
691
iterate_lock_classes(idx, class) {
692
if (!test_bit(idx, lock_classes_in_use))
693
continue;
694
clear_lock_stats(class);
695
}
696
}
697
return count;
698
}
699
700
static int lock_stat_release(struct inode *inode, struct file *file)
701
{
702
struct seq_file *seq = file->private_data;
703
704
vfree(seq->private);
705
return seq_release(inode, file);
706
}
707
708
static const struct proc_ops lock_stat_proc_ops = {
709
.proc_open = lock_stat_open,
710
.proc_write = lock_stat_write,
711
.proc_read = seq_read,
712
.proc_lseek = seq_lseek,
713
.proc_release = lock_stat_release,
714
};
715
#endif /* CONFIG_LOCK_STAT */
716
717
static int __init lockdep_proc_init(void)
718
{
719
proc_create_seq("lockdep", S_IRUSR, NULL, &lockdep_ops);
720
#ifdef CONFIG_PROVE_LOCKING
721
proc_create_seq("lockdep_chains", S_IRUSR, NULL, &lockdep_chains_ops);
722
#endif
723
proc_create_single("lockdep_stats", S_IRUSR, NULL, lockdep_stats_show);
724
#ifdef CONFIG_LOCK_STAT
725
proc_create("lock_stat", S_IRUSR | S_IWUSR, NULL, &lock_stat_proc_ops);
726
#endif
727
728
return 0;
729
}
730
731
__initcall(lockdep_proc_init);
732
733
734