Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/base/regmap/regmap-debugfs.c
26426 views
1
// SPDX-License-Identifier: GPL-2.0
2
//
3
// Register map access API - debugfs
4
//
5
// Copyright 2011 Wolfson Microelectronics plc
6
//
7
// Author: Mark Brown <[email protected]>
8
9
#include <linux/slab.h>
10
#include <linux/mutex.h>
11
#include <linux/debugfs.h>
12
#include <linux/uaccess.h>
13
#include <linux/device.h>
14
#include <linux/list.h>
15
16
#include "internal.h"
17
18
struct regmap_debugfs_node {
19
struct regmap *map;
20
struct list_head link;
21
};
22
23
static unsigned int dummy_index;
24
static struct dentry *regmap_debugfs_root;
25
static LIST_HEAD(regmap_debugfs_early_list);
26
static DEFINE_MUTEX(regmap_debugfs_early_lock);
27
28
/* Calculate the length of a fixed format */
29
static size_t regmap_calc_reg_len(int max_val)
30
{
31
return snprintf(NULL, 0, "%x", max_val);
32
}
33
34
static ssize_t regmap_name_read_file(struct file *file,
35
char __user *user_buf, size_t count,
36
loff_t *ppos)
37
{
38
struct regmap *map = file->private_data;
39
const char *name = "nodev";
40
int ret;
41
char *buf;
42
43
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
44
if (!buf)
45
return -ENOMEM;
46
47
if (map->dev && map->dev->driver)
48
name = map->dev->driver->name;
49
50
ret = snprintf(buf, PAGE_SIZE, "%s\n", name);
51
if (ret >= PAGE_SIZE) {
52
kfree(buf);
53
return ret;
54
}
55
56
ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
57
kfree(buf);
58
return ret;
59
}
60
61
static const struct file_operations regmap_name_fops = {
62
.open = simple_open,
63
.read = regmap_name_read_file,
64
.llseek = default_llseek,
65
};
66
67
static void regmap_debugfs_free_dump_cache(struct regmap *map)
68
{
69
struct regmap_debugfs_off_cache *c;
70
71
while (!list_empty(&map->debugfs_off_cache)) {
72
c = list_first_entry(&map->debugfs_off_cache,
73
struct regmap_debugfs_off_cache,
74
list);
75
list_del(&c->list);
76
kfree(c);
77
}
78
}
79
80
static bool regmap_printable(struct regmap *map, unsigned int reg)
81
{
82
if (regmap_precious(map, reg))
83
return false;
84
85
if (!regmap_readable(map, reg) && !regmap_cached(map, reg))
86
return false;
87
88
return true;
89
}
90
91
/*
92
* Work out where the start offset maps into register numbers, bearing
93
* in mind that we suppress hidden registers.
94
*/
95
static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
96
unsigned int base,
97
loff_t from,
98
loff_t *pos)
99
{
100
struct regmap_debugfs_off_cache *c = NULL;
101
loff_t p = 0;
102
unsigned int i, ret;
103
unsigned int fpos_offset;
104
unsigned int reg_offset;
105
106
/* Suppress the cache if we're using a subrange */
107
if (base)
108
return base;
109
110
/*
111
* If we don't have a cache build one so we don't have to do a
112
* linear scan each time.
113
*/
114
mutex_lock(&map->cache_lock);
115
i = base;
116
if (list_empty(&map->debugfs_off_cache)) {
117
for (; i <= map->max_register; i += map->reg_stride) {
118
/* Skip unprinted registers, closing off cache entry */
119
if (!regmap_printable(map, i)) {
120
if (c) {
121
c->max = p - 1;
122
c->max_reg = i - map->reg_stride;
123
list_add_tail(&c->list,
124
&map->debugfs_off_cache);
125
c = NULL;
126
}
127
128
continue;
129
}
130
131
/* No cache entry? Start a new one */
132
if (!c) {
133
c = kzalloc(sizeof(*c), GFP_KERNEL);
134
if (!c) {
135
regmap_debugfs_free_dump_cache(map);
136
mutex_unlock(&map->cache_lock);
137
return base;
138
}
139
c->min = p;
140
c->base_reg = i;
141
}
142
143
p += map->debugfs_tot_len;
144
}
145
}
146
147
/* Close the last entry off if we didn't scan beyond it */
148
if (c) {
149
c->max = p - 1;
150
c->max_reg = i - map->reg_stride;
151
list_add_tail(&c->list,
152
&map->debugfs_off_cache);
153
}
154
155
/*
156
* This should never happen; we return above if we fail to
157
* allocate and we should never be in this code if there are
158
* no registers at all.
159
*/
160
WARN_ON(list_empty(&map->debugfs_off_cache));
161
ret = base;
162
163
/* Find the relevant block:offset */
164
list_for_each_entry(c, &map->debugfs_off_cache, list) {
165
if (from >= c->min && from <= c->max) {
166
fpos_offset = from - c->min;
167
reg_offset = fpos_offset / map->debugfs_tot_len;
168
*pos = c->min + (reg_offset * map->debugfs_tot_len);
169
mutex_unlock(&map->cache_lock);
170
return c->base_reg + (reg_offset * map->reg_stride);
171
}
172
173
*pos = c->max;
174
ret = c->max_reg;
175
}
176
mutex_unlock(&map->cache_lock);
177
178
return ret;
179
}
180
181
static inline void regmap_calc_tot_len(struct regmap *map,
182
void *buf, size_t count)
183
{
184
/* Calculate the length of a fixed format */
185
if (!map->debugfs_tot_len) {
186
map->debugfs_reg_len = regmap_calc_reg_len(map->max_register);
187
map->debugfs_val_len = 2 * map->format.val_bytes;
188
map->debugfs_tot_len = map->debugfs_reg_len +
189
map->debugfs_val_len + 3; /* : \n */
190
}
191
}
192
193
static int regmap_next_readable_reg(struct regmap *map, int reg)
194
{
195
struct regmap_debugfs_off_cache *c;
196
int ret = -EINVAL;
197
198
if (regmap_printable(map, reg + map->reg_stride)) {
199
ret = reg + map->reg_stride;
200
} else {
201
mutex_lock(&map->cache_lock);
202
list_for_each_entry(c, &map->debugfs_off_cache, list) {
203
if (reg > c->max_reg)
204
continue;
205
if (reg < c->base_reg) {
206
ret = c->base_reg;
207
break;
208
}
209
}
210
mutex_unlock(&map->cache_lock);
211
}
212
return ret;
213
}
214
215
static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
216
unsigned int to, char __user *user_buf,
217
size_t count, loff_t *ppos)
218
{
219
size_t buf_pos = 0;
220
loff_t p = *ppos;
221
ssize_t ret;
222
int i;
223
char *buf;
224
unsigned int val, start_reg;
225
226
if (*ppos < 0 || !count)
227
return -EINVAL;
228
229
if (count > (PAGE_SIZE << MAX_PAGE_ORDER))
230
count = PAGE_SIZE << MAX_PAGE_ORDER;
231
232
buf = kmalloc(count, GFP_KERNEL);
233
if (!buf)
234
return -ENOMEM;
235
236
regmap_calc_tot_len(map, buf, count);
237
238
/* Work out which register we're starting at */
239
start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p);
240
241
for (i = start_reg; i >= 0 && i <= to;
242
i = regmap_next_readable_reg(map, i)) {
243
244
/* If we're in the region the user is trying to read */
245
if (p >= *ppos) {
246
/* ...but not beyond it */
247
if (buf_pos + map->debugfs_tot_len > count)
248
break;
249
250
/* Format the register */
251
snprintf(buf + buf_pos, count - buf_pos, "%.*x: ",
252
map->debugfs_reg_len, i - from);
253
buf_pos += map->debugfs_reg_len + 2;
254
255
/* Format the value, write all X if we can't read */
256
ret = regmap_read(map, i, &val);
257
if (ret == 0)
258
snprintf(buf + buf_pos, count - buf_pos,
259
"%.*x", map->debugfs_val_len, val);
260
else
261
memset(buf + buf_pos, 'X',
262
map->debugfs_val_len);
263
buf_pos += 2 * map->format.val_bytes;
264
265
buf[buf_pos++] = '\n';
266
}
267
p += map->debugfs_tot_len;
268
}
269
270
ret = buf_pos;
271
272
if (copy_to_user(user_buf, buf, buf_pos)) {
273
ret = -EFAULT;
274
goto out;
275
}
276
277
*ppos += buf_pos;
278
279
out:
280
kfree(buf);
281
return ret;
282
}
283
284
static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
285
size_t count, loff_t *ppos)
286
{
287
struct regmap *map = file->private_data;
288
289
return regmap_read_debugfs(map, 0, map->max_register, user_buf,
290
count, ppos);
291
}
292
293
#undef REGMAP_ALLOW_WRITE_DEBUGFS
294
#ifdef REGMAP_ALLOW_WRITE_DEBUGFS
295
/*
296
* This can be dangerous especially when we have clients such as
297
* PMICs, therefore don't provide any real compile time configuration option
298
* for this feature, people who want to use this will need to modify
299
* the source code directly.
300
*/
301
static ssize_t regmap_map_write_file(struct file *file,
302
const char __user *user_buf,
303
size_t count, loff_t *ppos)
304
{
305
char buf[32];
306
size_t buf_size;
307
char *start = buf;
308
unsigned long reg, value;
309
struct regmap *map = file->private_data;
310
int ret;
311
312
buf_size = min(count, (sizeof(buf)-1));
313
if (copy_from_user(buf, user_buf, buf_size))
314
return -EFAULT;
315
buf[buf_size] = 0;
316
317
while (*start == ' ')
318
start++;
319
reg = simple_strtoul(start, &start, 16);
320
while (*start == ' ')
321
start++;
322
if (kstrtoul(start, 16, &value))
323
return -EINVAL;
324
325
/* Userspace has been fiddling around behind the kernel's back */
326
add_taint(TAINT_USER, LOCKDEP_STILL_OK);
327
328
ret = regmap_write(map, reg, value);
329
if (ret < 0)
330
return ret;
331
return buf_size;
332
}
333
#else
334
#define regmap_map_write_file NULL
335
#endif
336
337
static const struct file_operations regmap_map_fops = {
338
.open = simple_open,
339
.read = regmap_map_read_file,
340
.write = regmap_map_write_file,
341
.llseek = default_llseek,
342
};
343
344
static ssize_t regmap_range_read_file(struct file *file, char __user *user_buf,
345
size_t count, loff_t *ppos)
346
{
347
struct regmap_range_node *range = file->private_data;
348
struct regmap *map = range->map;
349
350
return regmap_read_debugfs(map, range->range_min, range->range_max,
351
user_buf, count, ppos);
352
}
353
354
static const struct file_operations regmap_range_fops = {
355
.open = simple_open,
356
.read = regmap_range_read_file,
357
.llseek = default_llseek,
358
};
359
360
static ssize_t regmap_reg_ranges_read_file(struct file *file,
361
char __user *user_buf, size_t count,
362
loff_t *ppos)
363
{
364
struct regmap *map = file->private_data;
365
struct regmap_debugfs_off_cache *c;
366
loff_t p = 0;
367
size_t buf_pos = 0;
368
char *buf;
369
char *entry;
370
int ret;
371
unsigned int entry_len;
372
373
if (*ppos < 0 || !count)
374
return -EINVAL;
375
376
if (count > (PAGE_SIZE << MAX_PAGE_ORDER))
377
count = PAGE_SIZE << MAX_PAGE_ORDER;
378
379
buf = kmalloc(count, GFP_KERNEL);
380
if (!buf)
381
return -ENOMEM;
382
383
entry = kmalloc(PAGE_SIZE, GFP_KERNEL);
384
if (!entry) {
385
kfree(buf);
386
return -ENOMEM;
387
}
388
389
/* While we are at it, build the register dump cache
390
* now so the read() operation on the `registers' file
391
* can benefit from using the cache. We do not care
392
* about the file position information that is contained
393
* in the cache, just about the actual register blocks */
394
regmap_calc_tot_len(map, buf, count);
395
regmap_debugfs_get_dump_start(map, 0, *ppos, &p);
396
397
/* Reset file pointer as the fixed-format of the `registers'
398
* file is not compatible with the `range' file */
399
p = 0;
400
mutex_lock(&map->cache_lock);
401
list_for_each_entry(c, &map->debugfs_off_cache, list) {
402
entry_len = snprintf(entry, PAGE_SIZE, "%x-%x\n",
403
c->base_reg, c->max_reg);
404
if (p >= *ppos) {
405
if (buf_pos + entry_len > count)
406
break;
407
memcpy(buf + buf_pos, entry, entry_len);
408
buf_pos += entry_len;
409
}
410
p += entry_len;
411
}
412
mutex_unlock(&map->cache_lock);
413
414
kfree(entry);
415
ret = buf_pos;
416
417
if (copy_to_user(user_buf, buf, buf_pos)) {
418
ret = -EFAULT;
419
goto out_buf;
420
}
421
422
*ppos += buf_pos;
423
out_buf:
424
kfree(buf);
425
return ret;
426
}
427
428
static const struct file_operations regmap_reg_ranges_fops = {
429
.open = simple_open,
430
.read = regmap_reg_ranges_read_file,
431
.llseek = default_llseek,
432
};
433
434
static int regmap_access_show(struct seq_file *s, void *ignored)
435
{
436
struct regmap *map = s->private;
437
int i, reg_len;
438
439
reg_len = regmap_calc_reg_len(map->max_register);
440
441
for (i = 0; i <= map->max_register; i += map->reg_stride) {
442
/* Ignore registers which are neither readable nor writable */
443
if (!regmap_readable(map, i) && !regmap_writeable(map, i))
444
continue;
445
446
/* Format the register */
447
seq_printf(s, "%.*x: %c %c %c %c\n", reg_len, i,
448
regmap_readable(map, i) ? 'y' : 'n',
449
regmap_writeable(map, i) ? 'y' : 'n',
450
regmap_volatile(map, i) ? 'y' : 'n',
451
regmap_precious(map, i) ? 'y' : 'n');
452
}
453
454
return 0;
455
}
456
457
DEFINE_SHOW_ATTRIBUTE(regmap_access);
458
459
static ssize_t regmap_cache_only_write_file(struct file *file,
460
const char __user *user_buf,
461
size_t count, loff_t *ppos)
462
{
463
struct regmap *map = container_of(file->private_data,
464
struct regmap, cache_only);
465
bool new_val, require_sync = false;
466
int err;
467
468
err = kstrtobool_from_user(user_buf, count, &new_val);
469
/* Ignore malforned data like debugfs_write_file_bool() */
470
if (err)
471
return count;
472
473
map->lock(map->lock_arg);
474
475
if (new_val && !map->cache_only) {
476
dev_warn(map->dev, "debugfs cache_only=Y forced\n");
477
add_taint(TAINT_USER, LOCKDEP_STILL_OK);
478
} else if (!new_val && map->cache_only) {
479
dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n");
480
require_sync = true;
481
}
482
map->cache_only = new_val;
483
484
map->unlock(map->lock_arg);
485
486
if (require_sync) {
487
err = regcache_sync(map);
488
if (err)
489
dev_err(map->dev, "Failed to sync cache %d\n", err);
490
}
491
492
return count;
493
}
494
495
static const struct file_operations regmap_cache_only_fops = {
496
.open = simple_open,
497
.read = debugfs_read_file_bool,
498
.write = regmap_cache_only_write_file,
499
};
500
501
static ssize_t regmap_cache_bypass_write_file(struct file *file,
502
const char __user *user_buf,
503
size_t count, loff_t *ppos)
504
{
505
struct regmap *map = container_of(file->private_data,
506
struct regmap, cache_bypass);
507
bool new_val;
508
int err;
509
510
err = kstrtobool_from_user(user_buf, count, &new_val);
511
/* Ignore malforned data like debugfs_write_file_bool() */
512
if (err)
513
return count;
514
515
map->lock(map->lock_arg);
516
517
if (new_val && !map->cache_bypass) {
518
dev_warn(map->dev, "debugfs cache_bypass=Y forced\n");
519
add_taint(TAINT_USER, LOCKDEP_STILL_OK);
520
} else if (!new_val && map->cache_bypass) {
521
dev_warn(map->dev, "debugfs cache_bypass=N forced\n");
522
}
523
map->cache_bypass = new_val;
524
525
map->unlock(map->lock_arg);
526
527
return count;
528
}
529
530
static const struct file_operations regmap_cache_bypass_fops = {
531
.open = simple_open,
532
.read = debugfs_read_file_bool,
533
.write = regmap_cache_bypass_write_file,
534
};
535
536
void regmap_debugfs_init(struct regmap *map)
537
{
538
struct rb_node *next;
539
struct regmap_range_node *range_node;
540
const char *devname = "dummy";
541
const char *name = map->name;
542
543
/*
544
* Userspace can initiate reads from the hardware over debugfs.
545
* Normally internal regmap structures and buffers are protected with
546
* a mutex or a spinlock, but if the regmap owner decided to disable
547
* all locking mechanisms, this is no longer the case. For safety:
548
* don't create the debugfs entries if locking is disabled.
549
*/
550
if (map->debugfs_disable) {
551
dev_dbg(map->dev, "regmap locking disabled - not creating debugfs entries\n");
552
return;
553
}
554
555
/* If we don't have the debugfs root yet, postpone init */
556
if (!regmap_debugfs_root) {
557
struct regmap_debugfs_node *node;
558
node = kzalloc(sizeof(*node), GFP_KERNEL);
559
if (!node)
560
return;
561
node->map = map;
562
mutex_lock(&regmap_debugfs_early_lock);
563
list_add(&node->link, &regmap_debugfs_early_list);
564
mutex_unlock(&regmap_debugfs_early_lock);
565
return;
566
}
567
568
INIT_LIST_HEAD(&map->debugfs_off_cache);
569
mutex_init(&map->cache_lock);
570
571
if (map->dev)
572
devname = dev_name(map->dev);
573
574
if (name) {
575
if (!map->debugfs_name) {
576
map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
577
devname, name);
578
if (!map->debugfs_name)
579
return;
580
}
581
name = map->debugfs_name;
582
} else {
583
name = devname;
584
}
585
586
if (!strcmp(name, "dummy")) {
587
kfree(map->debugfs_name);
588
map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d",
589
dummy_index);
590
if (!map->debugfs_name)
591
return;
592
name = map->debugfs_name;
593
dummy_index++;
594
}
595
596
map->debugfs = debugfs_create_dir(name, regmap_debugfs_root);
597
598
debugfs_create_file("name", 0400, map->debugfs,
599
map, &regmap_name_fops);
600
601
debugfs_create_file("range", 0400, map->debugfs,
602
map, &regmap_reg_ranges_fops);
603
604
if (map->max_register || regmap_readable(map, 0)) {
605
umode_t registers_mode;
606
607
#if defined(REGMAP_ALLOW_WRITE_DEBUGFS)
608
registers_mode = 0600;
609
#else
610
registers_mode = 0400;
611
#endif
612
613
debugfs_create_file("registers", registers_mode, map->debugfs,
614
map, &regmap_map_fops);
615
debugfs_create_file("access", 0400, map->debugfs,
616
map, &regmap_access_fops);
617
}
618
619
if (map->cache_type) {
620
debugfs_create_file("cache_only", 0600, map->debugfs,
621
&map->cache_only, &regmap_cache_only_fops);
622
debugfs_create_bool("cache_dirty", 0400, map->debugfs,
623
&map->cache_dirty);
624
debugfs_create_file("cache_bypass", 0600, map->debugfs,
625
&map->cache_bypass,
626
&regmap_cache_bypass_fops);
627
}
628
629
/*
630
* This could interfere with driver operation. Therefore, don't provide
631
* any real compile time configuration option for this feature. One will
632
* have to modify the source code directly in order to use it.
633
*/
634
#undef REGMAP_ALLOW_FORCE_WRITE_FIELD_DEBUGFS
635
#ifdef REGMAP_ALLOW_FORCE_WRITE_FIELD_DEBUGFS
636
debugfs_create_bool("force_write_field", 0600, map->debugfs,
637
&map->force_write_field);
638
#endif
639
640
next = rb_first(&map->range_tree);
641
while (next) {
642
range_node = rb_entry(next, struct regmap_range_node, node);
643
644
if (range_node->name)
645
debugfs_create_file(range_node->name, 0400,
646
map->debugfs, range_node,
647
&regmap_range_fops);
648
649
next = rb_next(&range_node->node);
650
}
651
652
if (map->cache_ops && map->cache_ops->debugfs_init)
653
map->cache_ops->debugfs_init(map);
654
}
655
656
void regmap_debugfs_exit(struct regmap *map)
657
{
658
if (map->debugfs) {
659
debugfs_remove_recursive(map->debugfs);
660
mutex_lock(&map->cache_lock);
661
regmap_debugfs_free_dump_cache(map);
662
mutex_unlock(&map->cache_lock);
663
kfree(map->debugfs_name);
664
map->debugfs_name = NULL;
665
} else {
666
struct regmap_debugfs_node *node, *tmp;
667
668
mutex_lock(&regmap_debugfs_early_lock);
669
list_for_each_entry_safe(node, tmp, &regmap_debugfs_early_list,
670
link) {
671
if (node->map == map) {
672
list_del(&node->link);
673
kfree(node);
674
}
675
}
676
mutex_unlock(&regmap_debugfs_early_lock);
677
}
678
}
679
680
void regmap_debugfs_initcall(void)
681
{
682
struct regmap_debugfs_node *node, *tmp;
683
684
regmap_debugfs_root = debugfs_create_dir("regmap", NULL);
685
686
mutex_lock(&regmap_debugfs_early_lock);
687
list_for_each_entry_safe(node, tmp, &regmap_debugfs_early_list, link) {
688
regmap_debugfs_init(node->map);
689
list_del(&node->link);
690
kfree(node);
691
}
692
mutex_unlock(&regmap_debugfs_early_lock);
693
}
694
695