Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/base/regmap/regmap.c
49618 views
1
// SPDX-License-Identifier: GPL-2.0
2
//
3
// Register map access API
4
//
5
// Copyright 2011 Wolfson Microelectronics plc
6
//
7
// Author: Mark Brown <[email protected]>
8
9
#include <linux/device.h>
10
#include <linux/slab.h>
11
#include <linux/export.h>
12
#include <linux/mutex.h>
13
#include <linux/err.h>
14
#include <linux/property.h>
15
#include <linux/rbtree.h>
16
#include <linux/sched.h>
17
#include <linux/delay.h>
18
#include <linux/log2.h>
19
#include <linux/hwspinlock.h>
20
#include <linux/unaligned.h>
21
22
#define CREATE_TRACE_POINTS
23
#include "trace.h"
24
25
#include "internal.h"
26
27
/*
28
* Sometimes for failures during very early init the trace
29
* infrastructure isn't available early enough to be used. For this
30
* sort of problem defining LOG_DEVICE will add printks for basic
31
* register I/O on a specific device.
32
*/
33
#undef LOG_DEVICE
34
35
#ifdef LOG_DEVICE
36
static inline bool regmap_should_log(struct regmap *map)
37
{
38
return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0);
39
}
40
#else
41
static inline bool regmap_should_log(struct regmap *map) { return false; }
42
#endif
43
44
45
static int _regmap_update_bits(struct regmap *map, unsigned int reg,
46
unsigned int mask, unsigned int val,
47
bool *change, bool force_write);
48
49
static int _regmap_bus_reg_read(void *context, unsigned int reg,
50
unsigned int *val);
51
static int _regmap_bus_read(void *context, unsigned int reg,
52
unsigned int *val);
53
static int _regmap_bus_formatted_write(void *context, unsigned int reg,
54
unsigned int val);
55
static int _regmap_bus_reg_write(void *context, unsigned int reg,
56
unsigned int val);
57
static int _regmap_bus_raw_write(void *context, unsigned int reg,
58
unsigned int val);
59
60
bool regmap_reg_in_ranges(unsigned int reg,
61
const struct regmap_range *ranges,
62
unsigned int nranges)
63
{
64
const struct regmap_range *r;
65
int i;
66
67
for (i = 0, r = ranges; i < nranges; i++, r++)
68
if (regmap_reg_in_range(reg, r))
69
return true;
70
return false;
71
}
72
EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
73
74
bool regmap_check_range_table(struct regmap *map, unsigned int reg,
75
const struct regmap_access_table *table)
76
{
77
/* Check "no ranges" first */
78
if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
79
return false;
80
81
/* In case zero "yes ranges" are supplied, any reg is OK */
82
if (!table->n_yes_ranges)
83
return true;
84
85
return regmap_reg_in_ranges(reg, table->yes_ranges,
86
table->n_yes_ranges);
87
}
88
EXPORT_SYMBOL_GPL(regmap_check_range_table);
89
90
bool regmap_writeable(struct regmap *map, unsigned int reg)
91
{
92
if (map->max_register_is_set && reg > map->max_register)
93
return false;
94
95
if (map->writeable_reg)
96
return map->writeable_reg(map->dev, reg);
97
98
if (map->wr_table)
99
return regmap_check_range_table(map, reg, map->wr_table);
100
101
return true;
102
}
103
104
bool regmap_cached(struct regmap *map, unsigned int reg)
105
{
106
int ret;
107
unsigned int val;
108
109
if (map->cache_type == REGCACHE_NONE)
110
return false;
111
112
if (!map->cache_ops)
113
return false;
114
115
if (map->max_register_is_set && reg > map->max_register)
116
return false;
117
118
map->lock(map->lock_arg);
119
ret = regcache_read(map, reg, &val);
120
map->unlock(map->lock_arg);
121
if (ret)
122
return false;
123
124
return true;
125
}
126
127
bool regmap_readable(struct regmap *map, unsigned int reg)
128
{
129
if (!map->reg_read)
130
return false;
131
132
if (map->max_register_is_set && reg > map->max_register)
133
return false;
134
135
if (map->format.format_write)
136
return false;
137
138
if (map->readable_reg)
139
return map->readable_reg(map->dev, reg);
140
141
if (map->rd_table)
142
return regmap_check_range_table(map, reg, map->rd_table);
143
144
return true;
145
}
146
147
bool regmap_volatile(struct regmap *map, unsigned int reg)
148
{
149
if (!map->format.format_write && !regmap_readable(map, reg))
150
return false;
151
152
if (map->volatile_reg)
153
return map->volatile_reg(map->dev, reg);
154
155
if (map->volatile_table)
156
return regmap_check_range_table(map, reg, map->volatile_table);
157
158
if (map->cache_ops)
159
return false;
160
else
161
return true;
162
}
163
164
bool regmap_precious(struct regmap *map, unsigned int reg)
165
{
166
if (!regmap_readable(map, reg))
167
return false;
168
169
if (map->precious_reg)
170
return map->precious_reg(map->dev, reg);
171
172
if (map->precious_table)
173
return regmap_check_range_table(map, reg, map->precious_table);
174
175
return false;
176
}
177
178
bool regmap_writeable_noinc(struct regmap *map, unsigned int reg)
179
{
180
if (map->writeable_noinc_reg)
181
return map->writeable_noinc_reg(map->dev, reg);
182
183
if (map->wr_noinc_table)
184
return regmap_check_range_table(map, reg, map->wr_noinc_table);
185
186
return true;
187
}
188
189
bool regmap_readable_noinc(struct regmap *map, unsigned int reg)
190
{
191
if (map->readable_noinc_reg)
192
return map->readable_noinc_reg(map->dev, reg);
193
194
if (map->rd_noinc_table)
195
return regmap_check_range_table(map, reg, map->rd_noinc_table);
196
197
return true;
198
}
199
200
static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
201
size_t num)
202
{
203
unsigned int i;
204
205
for (i = 0; i < num; i++)
206
if (!regmap_volatile(map, reg + regmap_get_offset(map, i)))
207
return false;
208
209
return true;
210
}
211
212
static void regmap_format_12_20_write(struct regmap *map,
213
unsigned int reg, unsigned int val)
214
{
215
u8 *out = map->work_buf;
216
217
out[0] = reg >> 4;
218
out[1] = (reg << 4) | (val >> 16);
219
out[2] = val >> 8;
220
out[3] = val;
221
}
222
223
224
static void regmap_format_2_6_write(struct regmap *map,
225
unsigned int reg, unsigned int val)
226
{
227
u8 *out = map->work_buf;
228
229
*out = (reg << 6) | val;
230
}
231
232
static void regmap_format_4_12_write(struct regmap *map,
233
unsigned int reg, unsigned int val)
234
{
235
__be16 *out = map->work_buf;
236
*out = cpu_to_be16((reg << 12) | val);
237
}
238
239
static void regmap_format_7_9_write(struct regmap *map,
240
unsigned int reg, unsigned int val)
241
{
242
__be16 *out = map->work_buf;
243
*out = cpu_to_be16((reg << 9) | val);
244
}
245
246
static void regmap_format_7_17_write(struct regmap *map,
247
unsigned int reg, unsigned int val)
248
{
249
u8 *out = map->work_buf;
250
251
out[2] = val;
252
out[1] = val >> 8;
253
out[0] = (val >> 16) | (reg << 1);
254
}
255
256
static void regmap_format_10_14_write(struct regmap *map,
257
unsigned int reg, unsigned int val)
258
{
259
u8 *out = map->work_buf;
260
261
out[2] = val;
262
out[1] = (val >> 8) | (reg << 6);
263
out[0] = reg >> 2;
264
}
265
266
static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
267
{
268
u8 *b = buf;
269
270
b[0] = val << shift;
271
}
272
273
static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
274
{
275
put_unaligned_be16(val << shift, buf);
276
}
277
278
static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
279
{
280
put_unaligned_le16(val << shift, buf);
281
}
282
283
static void regmap_format_16_native(void *buf, unsigned int val,
284
unsigned int shift)
285
{
286
u16 v = val << shift;
287
288
memcpy(buf, &v, sizeof(v));
289
}
290
291
static void regmap_format_24_be(void *buf, unsigned int val, unsigned int shift)
292
{
293
put_unaligned_be24(val << shift, buf);
294
}
295
296
static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
297
{
298
put_unaligned_be32(val << shift, buf);
299
}
300
301
static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
302
{
303
put_unaligned_le32(val << shift, buf);
304
}
305
306
static void regmap_format_32_native(void *buf, unsigned int val,
307
unsigned int shift)
308
{
309
u32 v = val << shift;
310
311
memcpy(buf, &v, sizeof(v));
312
}
313
314
static void regmap_parse_inplace_noop(void *buf)
315
{
316
}
317
318
static unsigned int regmap_parse_8(const void *buf)
319
{
320
const u8 *b = buf;
321
322
return b[0];
323
}
324
325
static unsigned int regmap_parse_16_be(const void *buf)
326
{
327
return get_unaligned_be16(buf);
328
}
329
330
static unsigned int regmap_parse_16_le(const void *buf)
331
{
332
return get_unaligned_le16(buf);
333
}
334
335
static void regmap_parse_16_be_inplace(void *buf)
336
{
337
u16 v = get_unaligned_be16(buf);
338
339
memcpy(buf, &v, sizeof(v));
340
}
341
342
static void regmap_parse_16_le_inplace(void *buf)
343
{
344
u16 v = get_unaligned_le16(buf);
345
346
memcpy(buf, &v, sizeof(v));
347
}
348
349
static unsigned int regmap_parse_16_native(const void *buf)
350
{
351
u16 v;
352
353
memcpy(&v, buf, sizeof(v));
354
return v;
355
}
356
357
static unsigned int regmap_parse_24_be(const void *buf)
358
{
359
return get_unaligned_be24(buf);
360
}
361
362
static unsigned int regmap_parse_32_be(const void *buf)
363
{
364
return get_unaligned_be32(buf);
365
}
366
367
static unsigned int regmap_parse_32_le(const void *buf)
368
{
369
return get_unaligned_le32(buf);
370
}
371
372
static void regmap_parse_32_be_inplace(void *buf)
373
{
374
u32 v = get_unaligned_be32(buf);
375
376
memcpy(buf, &v, sizeof(v));
377
}
378
379
static void regmap_parse_32_le_inplace(void *buf)
380
{
381
u32 v = get_unaligned_le32(buf);
382
383
memcpy(buf, &v, sizeof(v));
384
}
385
386
static unsigned int regmap_parse_32_native(const void *buf)
387
{
388
u32 v;
389
390
memcpy(&v, buf, sizeof(v));
391
return v;
392
}
393
394
static void regmap_lock_hwlock(void *__map)
395
{
396
struct regmap *map = __map;
397
398
hwspin_lock_timeout(map->hwlock, UINT_MAX);
399
}
400
401
static void regmap_lock_hwlock_irq(void *__map)
402
{
403
struct regmap *map = __map;
404
405
hwspin_lock_timeout_irq(map->hwlock, UINT_MAX);
406
}
407
408
static void regmap_lock_hwlock_irqsave(void *__map)
409
{
410
struct regmap *map = __map;
411
unsigned long flags = 0;
412
413
hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX,
414
&flags);
415
map->spinlock_flags = flags;
416
}
417
418
static void regmap_unlock_hwlock(void *__map)
419
{
420
struct regmap *map = __map;
421
422
hwspin_unlock(map->hwlock);
423
}
424
425
static void regmap_unlock_hwlock_irq(void *__map)
426
{
427
struct regmap *map = __map;
428
429
hwspin_unlock_irq(map->hwlock);
430
}
431
432
static void regmap_unlock_hwlock_irqrestore(void *__map)
433
{
434
struct regmap *map = __map;
435
436
hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags);
437
}
438
439
static void regmap_lock_unlock_none(void *__map)
440
{
441
442
}
443
444
static void regmap_lock_mutex(void *__map)
445
{
446
struct regmap *map = __map;
447
mutex_lock(&map->mutex);
448
}
449
450
static void regmap_unlock_mutex(void *__map)
451
{
452
struct regmap *map = __map;
453
mutex_unlock(&map->mutex);
454
}
455
456
static void regmap_lock_spinlock(void *__map)
457
__acquires(&map->spinlock)
458
{
459
struct regmap *map = __map;
460
unsigned long flags;
461
462
spin_lock_irqsave(&map->spinlock, flags);
463
map->spinlock_flags = flags;
464
}
465
466
static void regmap_unlock_spinlock(void *__map)
467
__releases(&map->spinlock)
468
{
469
struct regmap *map = __map;
470
spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
471
}
472
473
static void regmap_lock_raw_spinlock(void *__map)
474
__acquires(&map->raw_spinlock)
475
{
476
struct regmap *map = __map;
477
unsigned long flags;
478
479
raw_spin_lock_irqsave(&map->raw_spinlock, flags);
480
map->raw_spinlock_flags = flags;
481
}
482
483
static void regmap_unlock_raw_spinlock(void *__map)
484
__releases(&map->raw_spinlock)
485
{
486
struct regmap *map = __map;
487
raw_spin_unlock_irqrestore(&map->raw_spinlock, map->raw_spinlock_flags);
488
}
489
490
static void dev_get_regmap_release(struct device *dev, void *res)
491
{
492
/*
493
* We don't actually have anything to do here; the goal here
494
* is not to manage the regmap but to provide a simple way to
495
* get the regmap back given a struct device.
496
*/
497
}
498
499
static bool _regmap_range_add(struct regmap *map,
500
struct regmap_range_node *data)
501
{
502
struct rb_root *root = &map->range_tree;
503
struct rb_node **new = &(root->rb_node), *parent = NULL;
504
505
while (*new) {
506
struct regmap_range_node *this =
507
rb_entry(*new, struct regmap_range_node, node);
508
509
parent = *new;
510
if (data->range_max < this->range_min)
511
new = &((*new)->rb_left);
512
else if (data->range_min > this->range_max)
513
new = &((*new)->rb_right);
514
else
515
return false;
516
}
517
518
rb_link_node(&data->node, parent, new);
519
rb_insert_color(&data->node, root);
520
521
return true;
522
}
523
524
static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
525
unsigned int reg)
526
{
527
struct rb_node *node = map->range_tree.rb_node;
528
529
while (node) {
530
struct regmap_range_node *this =
531
rb_entry(node, struct regmap_range_node, node);
532
533
if (reg < this->range_min)
534
node = node->rb_left;
535
else if (reg > this->range_max)
536
node = node->rb_right;
537
else
538
return this;
539
}
540
541
return NULL;
542
}
543
544
static void regmap_range_exit(struct regmap *map)
545
{
546
struct rb_node *next;
547
struct regmap_range_node *range_node;
548
549
next = rb_first(&map->range_tree);
550
while (next) {
551
range_node = rb_entry(next, struct regmap_range_node, node);
552
next = rb_next(&range_node->node);
553
rb_erase(&range_node->node, &map->range_tree);
554
kfree(range_node);
555
}
556
557
kfree(map->selector_work_buf);
558
}
559
560
static int regmap_set_name(struct regmap *map, const struct regmap_config *config)
561
{
562
if (config->name) {
563
const char *name = kstrdup_const(config->name, GFP_KERNEL);
564
565
if (!name)
566
return -ENOMEM;
567
568
kfree_const(map->name);
569
map->name = name;
570
}
571
572
return 0;
573
}
574
575
int regmap_attach_dev(struct device *dev, struct regmap *map,
576
const struct regmap_config *config)
577
{
578
struct regmap **m;
579
int ret;
580
581
map->dev = dev;
582
583
ret = regmap_set_name(map, config);
584
if (ret)
585
return ret;
586
587
regmap_debugfs_exit(map);
588
regmap_debugfs_init(map);
589
590
/* Add a devres resource for dev_get_regmap() */
591
m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
592
if (!m) {
593
regmap_debugfs_exit(map);
594
return -ENOMEM;
595
}
596
*m = map;
597
devres_add(dev, m);
598
599
return 0;
600
}
601
EXPORT_SYMBOL_GPL(regmap_attach_dev);
602
603
static int dev_get_regmap_match(struct device *dev, void *res, void *data);
604
605
static int regmap_detach_dev(struct device *dev, struct regmap *map)
606
{
607
if (!dev)
608
return 0;
609
610
return devres_release(dev, dev_get_regmap_release,
611
dev_get_regmap_match, (void *)map->name);
612
}
613
614
static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
615
const struct regmap_config *config)
616
{
617
enum regmap_endian endian;
618
619
/* Retrieve the endianness specification from the regmap config */
620
endian = config->reg_format_endian;
621
622
/* If the regmap config specified a non-default value, use that */
623
if (endian != REGMAP_ENDIAN_DEFAULT)
624
return endian;
625
626
/* Retrieve the endianness specification from the bus config */
627
if (bus && bus->reg_format_endian_default)
628
endian = bus->reg_format_endian_default;
629
630
/* If the bus specified a non-default value, use that */
631
if (endian != REGMAP_ENDIAN_DEFAULT)
632
return endian;
633
634
/* Use this if no other value was found */
635
return REGMAP_ENDIAN_BIG;
636
}
637
638
enum regmap_endian regmap_get_val_endian(struct device *dev,
639
const struct regmap_bus *bus,
640
const struct regmap_config *config)
641
{
642
struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL;
643
enum regmap_endian endian;
644
645
/* Retrieve the endianness specification from the regmap config */
646
endian = config->val_format_endian;
647
648
/* If the regmap config specified a non-default value, use that */
649
if (endian != REGMAP_ENDIAN_DEFAULT)
650
return endian;
651
652
/* If the firmware node exist try to get endianness from it */
653
if (fwnode_property_read_bool(fwnode, "big-endian"))
654
endian = REGMAP_ENDIAN_BIG;
655
else if (fwnode_property_read_bool(fwnode, "little-endian"))
656
endian = REGMAP_ENDIAN_LITTLE;
657
else if (fwnode_property_read_bool(fwnode, "native-endian"))
658
endian = REGMAP_ENDIAN_NATIVE;
659
660
/* If the endianness was specified in fwnode, use that */
661
if (endian != REGMAP_ENDIAN_DEFAULT)
662
return endian;
663
664
/* Retrieve the endianness specification from the bus config */
665
if (bus && bus->val_format_endian_default)
666
endian = bus->val_format_endian_default;
667
668
/* If the bus specified a non-default value, use that */
669
if (endian != REGMAP_ENDIAN_DEFAULT)
670
return endian;
671
672
/* Use this if no other value was found */
673
return REGMAP_ENDIAN_BIG;
674
}
675
EXPORT_SYMBOL_GPL(regmap_get_val_endian);
676
677
struct regmap *__regmap_init(struct device *dev,
678
const struct regmap_bus *bus,
679
void *bus_context,
680
const struct regmap_config *config,
681
struct lock_class_key *lock_key,
682
const char *lock_name)
683
{
684
struct regmap *map;
685
int ret = -EINVAL;
686
enum regmap_endian reg_endian, val_endian;
687
int i, j;
688
689
if (!config)
690
goto err;
691
692
map = kzalloc(sizeof(*map), GFP_KERNEL);
693
if (map == NULL) {
694
ret = -ENOMEM;
695
goto err;
696
}
697
698
ret = regmap_set_name(map, config);
699
if (ret)
700
goto err_map;
701
702
ret = -EINVAL; /* Later error paths rely on this */
703
704
if (config->disable_locking) {
705
map->lock = map->unlock = regmap_lock_unlock_none;
706
map->can_sleep = config->can_sleep;
707
regmap_debugfs_disable(map);
708
} else if (config->lock && config->unlock) {
709
map->lock = config->lock;
710
map->unlock = config->unlock;
711
map->lock_arg = config->lock_arg;
712
map->can_sleep = config->can_sleep;
713
} else if (config->use_hwlock) {
714
map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
715
if (!map->hwlock) {
716
ret = -ENXIO;
717
goto err_name;
718
}
719
720
switch (config->hwlock_mode) {
721
case HWLOCK_IRQSTATE:
722
map->lock = regmap_lock_hwlock_irqsave;
723
map->unlock = regmap_unlock_hwlock_irqrestore;
724
break;
725
case HWLOCK_IRQ:
726
map->lock = regmap_lock_hwlock_irq;
727
map->unlock = regmap_unlock_hwlock_irq;
728
break;
729
default:
730
map->lock = regmap_lock_hwlock;
731
map->unlock = regmap_unlock_hwlock;
732
break;
733
}
734
735
map->lock_arg = map;
736
} else {
737
if ((bus && bus->fast_io) ||
738
config->fast_io) {
739
if (config->use_raw_spinlock) {
740
raw_spin_lock_init(&map->raw_spinlock);
741
map->lock = regmap_lock_raw_spinlock;
742
map->unlock = regmap_unlock_raw_spinlock;
743
lockdep_set_class_and_name(&map->raw_spinlock,
744
lock_key, lock_name);
745
} else {
746
spin_lock_init(&map->spinlock);
747
map->lock = regmap_lock_spinlock;
748
map->unlock = regmap_unlock_spinlock;
749
lockdep_set_class_and_name(&map->spinlock,
750
lock_key, lock_name);
751
}
752
} else {
753
mutex_init(&map->mutex);
754
map->lock = regmap_lock_mutex;
755
map->unlock = regmap_unlock_mutex;
756
map->can_sleep = true;
757
lockdep_set_class_and_name(&map->mutex,
758
lock_key, lock_name);
759
}
760
map->lock_arg = map;
761
map->lock_key = lock_key;
762
}
763
764
/*
765
* When we write in fast-paths with regmap_bulk_write() don't allocate
766
* scratch buffers with sleeping allocations.
767
*/
768
if ((bus && bus->fast_io) || config->fast_io)
769
map->alloc_flags = GFP_ATOMIC;
770
else
771
map->alloc_flags = GFP_KERNEL;
772
773
map->reg_base = config->reg_base;
774
map->reg_shift = config->pad_bits % 8;
775
776
map->format.pad_bytes = config->pad_bits / 8;
777
map->format.reg_shift = config->reg_shift;
778
map->format.reg_bytes = BITS_TO_BYTES(config->reg_bits);
779
map->format.val_bytes = BITS_TO_BYTES(config->val_bits);
780
map->format.buf_size = BITS_TO_BYTES(config->reg_bits + config->val_bits + config->pad_bits);
781
if (config->reg_stride)
782
map->reg_stride = config->reg_stride;
783
else
784
map->reg_stride = 1;
785
if (is_power_of_2(map->reg_stride))
786
map->reg_stride_order = ilog2(map->reg_stride);
787
else
788
map->reg_stride_order = -1;
789
map->use_single_read = config->use_single_read || !(config->read || (bus && bus->read));
790
map->use_single_write = config->use_single_write || !(config->write || (bus && bus->write));
791
map->can_multi_write = config->can_multi_write && (config->write || (bus && bus->write));
792
if (bus) {
793
map->max_raw_read = bus->max_raw_read;
794
map->max_raw_write = bus->max_raw_write;
795
} else if (config->max_raw_read && config->max_raw_write) {
796
map->max_raw_read = config->max_raw_read;
797
map->max_raw_write = config->max_raw_write;
798
}
799
map->dev = dev;
800
map->bus = bus;
801
map->bus_context = bus_context;
802
map->max_register = config->max_register;
803
map->max_register_is_set = map->max_register ?: config->max_register_is_0;
804
map->wr_table = config->wr_table;
805
map->rd_table = config->rd_table;
806
map->volatile_table = config->volatile_table;
807
map->precious_table = config->precious_table;
808
map->wr_noinc_table = config->wr_noinc_table;
809
map->rd_noinc_table = config->rd_noinc_table;
810
map->writeable_reg = config->writeable_reg;
811
map->readable_reg = config->readable_reg;
812
map->volatile_reg = config->volatile_reg;
813
map->precious_reg = config->precious_reg;
814
map->writeable_noinc_reg = config->writeable_noinc_reg;
815
map->readable_noinc_reg = config->readable_noinc_reg;
816
map->cache_type = config->cache_type;
817
818
spin_lock_init(&map->async_lock);
819
INIT_LIST_HEAD(&map->async_list);
820
INIT_LIST_HEAD(&map->async_free);
821
init_waitqueue_head(&map->async_waitq);
822
823
if (config->read_flag_mask ||
824
config->write_flag_mask ||
825
config->zero_flag_mask) {
826
map->read_flag_mask = config->read_flag_mask;
827
map->write_flag_mask = config->write_flag_mask;
828
} else if (bus) {
829
map->read_flag_mask = bus->read_flag_mask;
830
}
831
832
if (config->read && config->write) {
833
map->reg_read = _regmap_bus_read;
834
if (config->reg_update_bits)
835
map->reg_update_bits = config->reg_update_bits;
836
837
/* Bulk read/write */
838
map->read = config->read;
839
map->write = config->write;
840
841
reg_endian = REGMAP_ENDIAN_NATIVE;
842
val_endian = REGMAP_ENDIAN_NATIVE;
843
} else if (!bus) {
844
map->reg_read = config->reg_read;
845
map->reg_write = config->reg_write;
846
map->reg_update_bits = config->reg_update_bits;
847
848
map->defer_caching = false;
849
goto skip_format_initialization;
850
} else if (!bus->read || !bus->write) {
851
map->reg_read = _regmap_bus_reg_read;
852
map->reg_write = _regmap_bus_reg_write;
853
map->reg_update_bits = bus->reg_update_bits;
854
855
map->defer_caching = false;
856
goto skip_format_initialization;
857
} else {
858
map->reg_read = _regmap_bus_read;
859
map->reg_update_bits = bus->reg_update_bits;
860
/* Bulk read/write */
861
map->read = bus->read;
862
map->write = bus->write;
863
864
reg_endian = regmap_get_reg_endian(bus, config);
865
val_endian = regmap_get_val_endian(dev, bus, config);
866
}
867
868
switch (config->reg_bits + map->reg_shift) {
869
case 2:
870
switch (config->val_bits) {
871
case 6:
872
map->format.format_write = regmap_format_2_6_write;
873
break;
874
default:
875
goto err_hwlock;
876
}
877
break;
878
879
case 4:
880
switch (config->val_bits) {
881
case 12:
882
map->format.format_write = regmap_format_4_12_write;
883
break;
884
default:
885
goto err_hwlock;
886
}
887
break;
888
889
case 7:
890
switch (config->val_bits) {
891
case 9:
892
map->format.format_write = regmap_format_7_9_write;
893
break;
894
case 17:
895
map->format.format_write = regmap_format_7_17_write;
896
break;
897
default:
898
goto err_hwlock;
899
}
900
break;
901
902
case 10:
903
switch (config->val_bits) {
904
case 14:
905
map->format.format_write = regmap_format_10_14_write;
906
break;
907
default:
908
goto err_hwlock;
909
}
910
break;
911
912
case 12:
913
switch (config->val_bits) {
914
case 20:
915
map->format.format_write = regmap_format_12_20_write;
916
break;
917
default:
918
goto err_hwlock;
919
}
920
break;
921
922
case 8:
923
map->format.format_reg = regmap_format_8;
924
break;
925
926
case 16:
927
switch (reg_endian) {
928
case REGMAP_ENDIAN_BIG:
929
map->format.format_reg = regmap_format_16_be;
930
break;
931
case REGMAP_ENDIAN_LITTLE:
932
map->format.format_reg = regmap_format_16_le;
933
break;
934
case REGMAP_ENDIAN_NATIVE:
935
map->format.format_reg = regmap_format_16_native;
936
break;
937
default:
938
goto err_hwlock;
939
}
940
break;
941
942
case 24:
943
switch (reg_endian) {
944
case REGMAP_ENDIAN_BIG:
945
map->format.format_reg = regmap_format_24_be;
946
break;
947
default:
948
goto err_hwlock;
949
}
950
break;
951
952
case 32:
953
switch (reg_endian) {
954
case REGMAP_ENDIAN_BIG:
955
map->format.format_reg = regmap_format_32_be;
956
break;
957
case REGMAP_ENDIAN_LITTLE:
958
map->format.format_reg = regmap_format_32_le;
959
break;
960
case REGMAP_ENDIAN_NATIVE:
961
map->format.format_reg = regmap_format_32_native;
962
break;
963
default:
964
goto err_hwlock;
965
}
966
break;
967
968
default:
969
goto err_hwlock;
970
}
971
972
if (val_endian == REGMAP_ENDIAN_NATIVE)
973
map->format.parse_inplace = regmap_parse_inplace_noop;
974
975
switch (config->val_bits) {
976
case 8:
977
map->format.format_val = regmap_format_8;
978
map->format.parse_val = regmap_parse_8;
979
map->format.parse_inplace = regmap_parse_inplace_noop;
980
break;
981
case 16:
982
switch (val_endian) {
983
case REGMAP_ENDIAN_BIG:
984
map->format.format_val = regmap_format_16_be;
985
map->format.parse_val = regmap_parse_16_be;
986
map->format.parse_inplace = regmap_parse_16_be_inplace;
987
break;
988
case REGMAP_ENDIAN_LITTLE:
989
map->format.format_val = regmap_format_16_le;
990
map->format.parse_val = regmap_parse_16_le;
991
map->format.parse_inplace = regmap_parse_16_le_inplace;
992
break;
993
case REGMAP_ENDIAN_NATIVE:
994
map->format.format_val = regmap_format_16_native;
995
map->format.parse_val = regmap_parse_16_native;
996
break;
997
default:
998
goto err_hwlock;
999
}
1000
break;
1001
case 24:
1002
switch (val_endian) {
1003
case REGMAP_ENDIAN_BIG:
1004
map->format.format_val = regmap_format_24_be;
1005
map->format.parse_val = regmap_parse_24_be;
1006
break;
1007
default:
1008
goto err_hwlock;
1009
}
1010
break;
1011
case 32:
1012
switch (val_endian) {
1013
case REGMAP_ENDIAN_BIG:
1014
map->format.format_val = regmap_format_32_be;
1015
map->format.parse_val = regmap_parse_32_be;
1016
map->format.parse_inplace = regmap_parse_32_be_inplace;
1017
break;
1018
case REGMAP_ENDIAN_LITTLE:
1019
map->format.format_val = regmap_format_32_le;
1020
map->format.parse_val = regmap_parse_32_le;
1021
map->format.parse_inplace = regmap_parse_32_le_inplace;
1022
break;
1023
case REGMAP_ENDIAN_NATIVE:
1024
map->format.format_val = regmap_format_32_native;
1025
map->format.parse_val = regmap_parse_32_native;
1026
break;
1027
default:
1028
goto err_hwlock;
1029
}
1030
break;
1031
}
1032
1033
if (map->format.format_write) {
1034
if ((reg_endian != REGMAP_ENDIAN_BIG) ||
1035
(val_endian != REGMAP_ENDIAN_BIG))
1036
goto err_hwlock;
1037
map->use_single_write = true;
1038
}
1039
1040
if (!map->format.format_write &&
1041
!(map->format.format_reg && map->format.format_val))
1042
goto err_hwlock;
1043
1044
map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
1045
if (map->work_buf == NULL) {
1046
ret = -ENOMEM;
1047
goto err_hwlock;
1048
}
1049
1050
if (map->format.format_write) {
1051
map->defer_caching = false;
1052
map->reg_write = _regmap_bus_formatted_write;
1053
} else if (map->format.format_val) {
1054
map->defer_caching = true;
1055
map->reg_write = _regmap_bus_raw_write;
1056
}
1057
1058
skip_format_initialization:
1059
1060
map->range_tree = RB_ROOT;
1061
for (i = 0; i < config->num_ranges; i++) {
1062
const struct regmap_range_cfg *range_cfg = &config->ranges[i];
1063
struct regmap_range_node *new;
1064
1065
/* Sanity check */
1066
if (range_cfg->range_max < range_cfg->range_min) {
1067
dev_err(map->dev, "Invalid range %d: %u < %u\n", i,
1068
range_cfg->range_max, range_cfg->range_min);
1069
goto err_range;
1070
}
1071
1072
if (range_cfg->range_max > map->max_register) {
1073
dev_err(map->dev, "Invalid range %d: %u > %u\n", i,
1074
range_cfg->range_max, map->max_register);
1075
goto err_range;
1076
}
1077
1078
if (range_cfg->selector_reg > map->max_register) {
1079
dev_err(map->dev,
1080
"Invalid range %d: selector out of map\n", i);
1081
goto err_range;
1082
}
1083
1084
if (range_cfg->window_len == 0) {
1085
dev_err(map->dev, "Invalid range %d: window_len 0\n",
1086
i);
1087
goto err_range;
1088
}
1089
1090
/* Make sure, that this register range has no selector
1091
or data window within its boundary */
1092
for (j = 0; j < config->num_ranges; j++) {
1093
unsigned int sel_reg = config->ranges[j].selector_reg;
1094
unsigned int win_min = config->ranges[j].window_start;
1095
unsigned int win_max = win_min +
1096
config->ranges[j].window_len - 1;
1097
1098
/* Allow data window inside its own virtual range */
1099
if (j == i)
1100
continue;
1101
1102
if (range_cfg->range_min <= sel_reg &&
1103
sel_reg <= range_cfg->range_max) {
1104
dev_err(map->dev,
1105
"Range %d: selector for %d in window\n",
1106
i, j);
1107
goto err_range;
1108
}
1109
1110
if (!(win_max < range_cfg->range_min ||
1111
win_min > range_cfg->range_max)) {
1112
dev_err(map->dev,
1113
"Range %d: window for %d in window\n",
1114
i, j);
1115
goto err_range;
1116
}
1117
}
1118
1119
new = kzalloc(sizeof(*new), GFP_KERNEL);
1120
if (new == NULL) {
1121
ret = -ENOMEM;
1122
goto err_range;
1123
}
1124
1125
new->map = map;
1126
new->name = range_cfg->name;
1127
new->range_min = range_cfg->range_min;
1128
new->range_max = range_cfg->range_max;
1129
new->selector_reg = range_cfg->selector_reg;
1130
new->selector_mask = range_cfg->selector_mask;
1131
new->selector_shift = range_cfg->selector_shift;
1132
new->window_start = range_cfg->window_start;
1133
new->window_len = range_cfg->window_len;
1134
1135
if (!_regmap_range_add(map, new)) {
1136
dev_err(map->dev, "Failed to add range %d\n", i);
1137
kfree(new);
1138
goto err_range;
1139
}
1140
1141
if (map->selector_work_buf == NULL) {
1142
map->selector_work_buf =
1143
kzalloc(map->format.buf_size, GFP_KERNEL);
1144
if (map->selector_work_buf == NULL) {
1145
ret = -ENOMEM;
1146
goto err_range;
1147
}
1148
}
1149
}
1150
1151
ret = regcache_init(map, config);
1152
if (ret != 0)
1153
goto err_range;
1154
1155
if (dev) {
1156
ret = regmap_attach_dev(dev, map, config);
1157
if (ret != 0)
1158
goto err_regcache;
1159
} else {
1160
regmap_debugfs_init(map);
1161
}
1162
1163
return map;
1164
1165
err_regcache:
1166
regcache_exit(map);
1167
err_range:
1168
regmap_range_exit(map);
1169
kfree(map->work_buf);
1170
err_hwlock:
1171
if (map->hwlock)
1172
hwspin_lock_free(map->hwlock);
1173
err_name:
1174
kfree_const(map->name);
1175
err_map:
1176
kfree(map);
1177
err:
1178
if (bus && bus->free_on_exit)
1179
kfree(bus);
1180
return ERR_PTR(ret);
1181
}
1182
EXPORT_SYMBOL_GPL(__regmap_init);
1183
1184
static void devm_regmap_release(struct device *dev, void *res)
1185
{
1186
regmap_exit(*(struct regmap **)res);
1187
}
1188
1189
struct regmap *__devm_regmap_init(struct device *dev,
1190
const struct regmap_bus *bus,
1191
void *bus_context,
1192
const struct regmap_config *config,
1193
struct lock_class_key *lock_key,
1194
const char *lock_name)
1195
{
1196
struct regmap **ptr, *regmap;
1197
1198
ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
1199
if (!ptr)
1200
return ERR_PTR(-ENOMEM);
1201
1202
regmap = __regmap_init(dev, bus, bus_context, config,
1203
lock_key, lock_name);
1204
if (!IS_ERR(regmap)) {
1205
*ptr = regmap;
1206
devres_add(dev, ptr);
1207
} else {
1208
devres_free(ptr);
1209
}
1210
1211
return regmap;
1212
}
1213
EXPORT_SYMBOL_GPL(__devm_regmap_init);
1214
1215
static void regmap_field_init(struct regmap_field *rm_field,
1216
struct regmap *regmap, struct reg_field reg_field)
1217
{
1218
rm_field->regmap = regmap;
1219
rm_field->reg = reg_field.reg;
1220
rm_field->shift = reg_field.lsb;
1221
rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb);
1222
1223
WARN_ONCE(rm_field->mask == 0, "invalid empty mask defined\n");
1224
1225
rm_field->id_size = reg_field.id_size;
1226
rm_field->id_offset = reg_field.id_offset;
1227
}
1228
1229
/**
1230
* devm_regmap_field_alloc() - Allocate and initialise a register field.
1231
*
1232
* @dev: Device that will be interacted with
1233
* @regmap: regmap bank in which this register field is located.
1234
* @reg_field: Register field with in the bank.
1235
*
1236
* The return value will be an ERR_PTR() on error or a valid pointer
1237
* to a struct regmap_field. The regmap_field will be automatically freed
1238
* by the device management code.
1239
*/
1240
struct regmap_field *devm_regmap_field_alloc(struct device *dev,
1241
struct regmap *regmap, struct reg_field reg_field)
1242
{
1243
struct regmap_field *rm_field = devm_kzalloc(dev,
1244
sizeof(*rm_field), GFP_KERNEL);
1245
if (!rm_field)
1246
return ERR_PTR(-ENOMEM);
1247
1248
regmap_field_init(rm_field, regmap, reg_field);
1249
1250
return rm_field;
1251
1252
}
1253
EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
1254
1255
1256
/**
1257
* regmap_field_bulk_alloc() - Allocate and initialise a bulk register field.
1258
*
1259
* @regmap: regmap bank in which this register field is located.
1260
* @rm_field: regmap register fields within the bank.
1261
* @reg_field: Register fields within the bank.
1262
* @num_fields: Number of register fields.
1263
*
1264
* The return value will be an -ENOMEM on error or zero for success.
1265
* Newly allocated regmap_fields should be freed by calling
1266
* regmap_field_bulk_free()
1267
*/
1268
int regmap_field_bulk_alloc(struct regmap *regmap,
1269
struct regmap_field **rm_field,
1270
const struct reg_field *reg_field,
1271
int num_fields)
1272
{
1273
struct regmap_field *rf;
1274
int i;
1275
1276
rf = kcalloc(num_fields, sizeof(*rf), GFP_KERNEL);
1277
if (!rf)
1278
return -ENOMEM;
1279
1280
for (i = 0; i < num_fields; i++) {
1281
regmap_field_init(&rf[i], regmap, reg_field[i]);
1282
rm_field[i] = &rf[i];
1283
}
1284
1285
return 0;
1286
}
1287
EXPORT_SYMBOL_GPL(regmap_field_bulk_alloc);
1288
1289
/**
1290
* devm_regmap_field_bulk_alloc() - Allocate and initialise a bulk register
1291
* fields.
1292
*
1293
* @dev: Device that will be interacted with
1294
* @regmap: regmap bank in which this register field is located.
1295
* @rm_field: regmap register fields within the bank.
1296
* @reg_field: Register fields within the bank.
1297
* @num_fields: Number of register fields.
1298
*
1299
* The return value will be an -ENOMEM on error or zero for success.
1300
* Newly allocated regmap_fields will be automatically freed by the
1301
* device management code.
1302
*/
1303
int devm_regmap_field_bulk_alloc(struct device *dev,
1304
struct regmap *regmap,
1305
struct regmap_field **rm_field,
1306
const struct reg_field *reg_field,
1307
int num_fields)
1308
{
1309
struct regmap_field *rf;
1310
int i;
1311
1312
rf = devm_kcalloc(dev, num_fields, sizeof(*rf), GFP_KERNEL);
1313
if (!rf)
1314
return -ENOMEM;
1315
1316
for (i = 0; i < num_fields; i++) {
1317
regmap_field_init(&rf[i], regmap, reg_field[i]);
1318
rm_field[i] = &rf[i];
1319
}
1320
1321
return 0;
1322
}
1323
EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_alloc);
1324
1325
/**
1326
* regmap_field_bulk_free() - Free register field allocated using
1327
* regmap_field_bulk_alloc.
1328
*
1329
* @field: regmap fields which should be freed.
1330
*/
1331
void regmap_field_bulk_free(struct regmap_field *field)
1332
{
1333
kfree(field);
1334
}
1335
EXPORT_SYMBOL_GPL(regmap_field_bulk_free);
1336
1337
/**
1338
* devm_regmap_field_bulk_free() - Free a bulk register field allocated using
1339
* devm_regmap_field_bulk_alloc.
1340
*
1341
* @dev: Device that will be interacted with
1342
* @field: regmap field which should be freed.
1343
*
1344
* Free register field allocated using devm_regmap_field_bulk_alloc(). Usually
1345
* drivers need not call this function, as the memory allocated via devm
1346
* will be freed as per device-driver life-cycle.
1347
*/
1348
void devm_regmap_field_bulk_free(struct device *dev,
1349
struct regmap_field *field)
1350
{
1351
devm_kfree(dev, field);
1352
}
1353
EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_free);
1354
1355
/**
1356
* devm_regmap_field_free() - Free a register field allocated using
1357
* devm_regmap_field_alloc.
1358
*
1359
* @dev: Device that will be interacted with
1360
* @field: regmap field which should be freed.
1361
*
1362
* Free register field allocated using devm_regmap_field_alloc(). Usually
1363
* drivers need not call this function, as the memory allocated via devm
1364
* will be freed as per device-driver life-cyle.
1365
*/
1366
void devm_regmap_field_free(struct device *dev,
1367
struct regmap_field *field)
1368
{
1369
devm_kfree(dev, field);
1370
}
1371
EXPORT_SYMBOL_GPL(devm_regmap_field_free);
1372
1373
/**
1374
* regmap_field_alloc() - Allocate and initialise a register field.
1375
*
1376
* @regmap: regmap bank in which this register field is located.
1377
* @reg_field: Register field with in the bank.
1378
*
1379
* The return value will be an ERR_PTR() on error or a valid pointer
1380
* to a struct regmap_field. The regmap_field should be freed by the
1381
* user once its finished working with it using regmap_field_free().
1382
*/
1383
struct regmap_field *regmap_field_alloc(struct regmap *regmap,
1384
struct reg_field reg_field)
1385
{
1386
struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL);
1387
1388
if (!rm_field)
1389
return ERR_PTR(-ENOMEM);
1390
1391
regmap_field_init(rm_field, regmap, reg_field);
1392
1393
return rm_field;
1394
}
1395
EXPORT_SYMBOL_GPL(regmap_field_alloc);
1396
1397
/**
1398
* regmap_field_free() - Free register field allocated using
1399
* regmap_field_alloc.
1400
*
1401
* @field: regmap field which should be freed.
1402
*/
1403
void regmap_field_free(struct regmap_field *field)
1404
{
1405
kfree(field);
1406
}
1407
EXPORT_SYMBOL_GPL(regmap_field_free);
1408
1409
/**
1410
* regmap_reinit_cache() - Reinitialise the current register cache
1411
*
1412
* @map: Register map to operate on.
1413
* @config: New configuration. Only the cache data will be used.
1414
*
1415
* Discard any existing register cache for the map and initialize a
1416
* new cache. This can be used to restore the cache to defaults or to
1417
* update the cache configuration to reflect runtime discovery of the
1418
* hardware.
1419
*
1420
* No explicit locking is done here, the user needs to ensure that
1421
* this function will not race with other calls to regmap.
1422
*/
1423
int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
1424
{
1425
int ret;
1426
1427
regcache_exit(map);
1428
regmap_debugfs_exit(map);
1429
1430
map->max_register = config->max_register;
1431
map->max_register_is_set = map->max_register ?: config->max_register_is_0;
1432
map->writeable_reg = config->writeable_reg;
1433
map->readable_reg = config->readable_reg;
1434
map->volatile_reg = config->volatile_reg;
1435
map->precious_reg = config->precious_reg;
1436
map->writeable_noinc_reg = config->writeable_noinc_reg;
1437
map->readable_noinc_reg = config->readable_noinc_reg;
1438
map->cache_type = config->cache_type;
1439
1440
ret = regmap_set_name(map, config);
1441
if (ret)
1442
return ret;
1443
1444
regmap_debugfs_init(map);
1445
1446
map->cache_bypass = false;
1447
map->cache_only = false;
1448
1449
return regcache_init(map, config);
1450
}
1451
EXPORT_SYMBOL_GPL(regmap_reinit_cache);
1452
1453
/**
1454
* regmap_exit() - Free a previously allocated register map
1455
*
1456
* @map: Register map to operate on.
1457
*/
1458
void regmap_exit(struct regmap *map)
1459
{
1460
struct regmap_async *async;
1461
1462
regmap_detach_dev(map->dev, map);
1463
regcache_exit(map);
1464
1465
regmap_debugfs_exit(map);
1466
regmap_range_exit(map);
1467
if (map->bus && map->bus->free_context)
1468
map->bus->free_context(map->bus_context);
1469
kfree(map->work_buf);
1470
while (!list_empty(&map->async_free)) {
1471
async = list_first_entry_or_null(&map->async_free,
1472
struct regmap_async,
1473
list);
1474
list_del(&async->list);
1475
kfree(async->work_buf);
1476
kfree(async);
1477
}
1478
if (map->hwlock)
1479
hwspin_lock_free(map->hwlock);
1480
if (map->lock == regmap_lock_mutex)
1481
mutex_destroy(&map->mutex);
1482
kfree_const(map->name);
1483
kfree(map->patch);
1484
if (map->bus && map->bus->free_on_exit)
1485
kfree(map->bus);
1486
kfree(map);
1487
}
1488
EXPORT_SYMBOL_GPL(regmap_exit);
1489
1490
static int dev_get_regmap_match(struct device *dev, void *res, void *data)
1491
{
1492
struct regmap **r = res;
1493
if (!r || !*r) {
1494
WARN_ON(!r || !*r);
1495
return 0;
1496
}
1497
1498
/* If the user didn't specify a name match any */
1499
if (data)
1500
return (*r)->name && !strcmp((*r)->name, data);
1501
else
1502
return 1;
1503
}
1504
1505
/**
1506
* dev_get_regmap() - Obtain the regmap (if any) for a device
1507
*
1508
* @dev: Device to retrieve the map for
1509
* @name: Optional name for the register map, usually NULL.
1510
*
1511
* Returns the regmap for the device if one is present, or NULL. If
1512
* name is specified then it must match the name specified when
1513
* registering the device, if it is NULL then the first regmap found
1514
* will be used. Devices with multiple register maps are very rare,
1515
* generic code should normally not need to specify a name.
1516
*/
1517
struct regmap *dev_get_regmap(struct device *dev, const char *name)
1518
{
1519
struct regmap **r = devres_find(dev, dev_get_regmap_release,
1520
dev_get_regmap_match, (void *)name);
1521
1522
if (!r)
1523
return NULL;
1524
return *r;
1525
}
1526
EXPORT_SYMBOL_GPL(dev_get_regmap);
1527
1528
/**
1529
* regmap_get_device() - Obtain the device from a regmap
1530
*
1531
* @map: Register map to operate on.
1532
*
1533
* Returns the underlying device that the regmap has been created for.
1534
*/
1535
struct device *regmap_get_device(struct regmap *map)
1536
{
1537
return map->dev;
1538
}
1539
EXPORT_SYMBOL_GPL(regmap_get_device);
1540
1541
static int _regmap_select_page(struct regmap *map, unsigned int *reg,
1542
struct regmap_range_node *range,
1543
unsigned int val_num)
1544
{
1545
void *orig_work_buf;
1546
unsigned int win_offset;
1547
unsigned int win_page;
1548
bool page_chg;
1549
int ret;
1550
1551
win_offset = (*reg - range->range_min) % range->window_len;
1552
win_page = (*reg - range->range_min) / range->window_len;
1553
1554
if (val_num > 1) {
1555
/* Bulk write shouldn't cross range boundary */
1556
if (*reg + val_num - 1 > range->range_max)
1557
return -EINVAL;
1558
1559
/* ... or single page boundary */
1560
if (val_num > range->window_len - win_offset)
1561
return -EINVAL;
1562
}
1563
1564
/* It is possible to have selector register inside data window.
1565
In that case, selector register is located on every page and
1566
it needs no page switching, when accessed alone. */
1567
if (val_num > 1 ||
1568
range->window_start + win_offset != range->selector_reg) {
1569
/* Use separate work_buf during page switching */
1570
orig_work_buf = map->work_buf;
1571
map->work_buf = map->selector_work_buf;
1572
1573
ret = _regmap_update_bits(map, range->selector_reg,
1574
range->selector_mask,
1575
win_page << range->selector_shift,
1576
&page_chg, false);
1577
1578
map->work_buf = orig_work_buf;
1579
1580
if (ret != 0)
1581
return ret;
1582
}
1583
1584
*reg = range->window_start + win_offset;
1585
1586
return 0;
1587
}
1588
1589
static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
1590
unsigned long mask)
1591
{
1592
u8 *buf;
1593
int i;
1594
1595
if (!mask || !map->work_buf)
1596
return;
1597
1598
buf = map->work_buf;
1599
1600
for (i = 0; i < max_bytes; i++)
1601
buf[i] |= (mask >> (8 * i)) & 0xff;
1602
}
1603
1604
static unsigned int regmap_reg_addr(struct regmap *map, unsigned int reg)
1605
{
1606
reg += map->reg_base;
1607
1608
if (map->format.reg_shift > 0)
1609
reg >>= map->format.reg_shift;
1610
else if (map->format.reg_shift < 0)
1611
reg <<= -(map->format.reg_shift);
1612
1613
return reg;
1614
}
1615
1616
static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
1617
const void *val, size_t val_len, bool noinc)
1618
{
1619
struct regmap_range_node *range;
1620
unsigned long flags;
1621
void *work_val = map->work_buf + map->format.reg_bytes +
1622
map->format.pad_bytes;
1623
void *buf;
1624
int ret = -ENOTSUPP;
1625
size_t len;
1626
int i;
1627
1628
/* Check for unwritable or noinc registers in range
1629
* before we start
1630
*/
1631
if (!regmap_writeable_noinc(map, reg)) {
1632
for (i = 0; i < val_len / map->format.val_bytes; i++) {
1633
unsigned int element =
1634
reg + regmap_get_offset(map, i);
1635
if (!regmap_writeable(map, element) ||
1636
regmap_writeable_noinc(map, element))
1637
return -EINVAL;
1638
}
1639
}
1640
1641
if (!map->cache_bypass && map->format.parse_val) {
1642
unsigned int ival, offset;
1643
int val_bytes = map->format.val_bytes;
1644
1645
/* Cache the last written value for noinc writes */
1646
i = noinc ? val_len - val_bytes : 0;
1647
for (; i < val_len; i += val_bytes) {
1648
ival = map->format.parse_val(val + i);
1649
offset = noinc ? 0 : regmap_get_offset(map, i / val_bytes);
1650
ret = regcache_write(map, reg + offset, ival);
1651
if (ret) {
1652
dev_err(map->dev,
1653
"Error in caching of register: %x ret: %d\n",
1654
reg + offset, ret);
1655
return ret;
1656
}
1657
}
1658
if (map->cache_only) {
1659
map->cache_dirty = true;
1660
return 0;
1661
}
1662
}
1663
1664
range = _regmap_range_lookup(map, reg);
1665
if (range) {
1666
int val_num = val_len / map->format.val_bytes;
1667
int win_offset = (reg - range->range_min) % range->window_len;
1668
int win_residue = range->window_len - win_offset;
1669
1670
/* If the write goes beyond the end of the window split it */
1671
while (val_num > win_residue) {
1672
dev_dbg(map->dev, "Writing window %d/%zu\n",
1673
win_residue, val_len / map->format.val_bytes);
1674
ret = _regmap_raw_write_impl(map, reg, val,
1675
win_residue *
1676
map->format.val_bytes, noinc);
1677
if (ret != 0)
1678
return ret;
1679
1680
reg += win_residue;
1681
val_num -= win_residue;
1682
val += win_residue * map->format.val_bytes;
1683
val_len -= win_residue * map->format.val_bytes;
1684
1685
win_offset = (reg - range->range_min) %
1686
range->window_len;
1687
win_residue = range->window_len - win_offset;
1688
}
1689
1690
ret = _regmap_select_page(map, &reg, range, noinc ? 1 : val_num);
1691
if (ret != 0)
1692
return ret;
1693
}
1694
1695
reg = regmap_reg_addr(map, reg);
1696
map->format.format_reg(map->work_buf, reg, map->reg_shift);
1697
regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
1698
map->write_flag_mask);
1699
1700
/*
1701
* Essentially all I/O mechanisms will be faster with a single
1702
* buffer to write. Since register syncs often generate raw
1703
* writes of single registers optimise that case.
1704
*/
1705
if (val != work_val && val_len == map->format.val_bytes) {
1706
memcpy(work_val, val, map->format.val_bytes);
1707
val = work_val;
1708
}
1709
1710
if (map->async && map->bus && map->bus->async_write) {
1711
struct regmap_async *async;
1712
1713
trace_regmap_async_write_start(map, reg, val_len);
1714
1715
spin_lock_irqsave(&map->async_lock, flags);
1716
async = list_first_entry_or_null(&map->async_free,
1717
struct regmap_async,
1718
list);
1719
if (async)
1720
list_del(&async->list);
1721
spin_unlock_irqrestore(&map->async_lock, flags);
1722
1723
if (!async) {
1724
async = map->bus->async_alloc();
1725
if (!async)
1726
return -ENOMEM;
1727
1728
async->work_buf = kzalloc(map->format.buf_size,
1729
GFP_KERNEL | GFP_DMA);
1730
if (!async->work_buf) {
1731
kfree(async);
1732
return -ENOMEM;
1733
}
1734
}
1735
1736
async->map = map;
1737
1738
/* If the caller supplied the value we can use it safely. */
1739
memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
1740
map->format.reg_bytes + map->format.val_bytes);
1741
1742
spin_lock_irqsave(&map->async_lock, flags);
1743
list_add_tail(&async->list, &map->async_list);
1744
spin_unlock_irqrestore(&map->async_lock, flags);
1745
1746
if (val != work_val)
1747
ret = map->bus->async_write(map->bus_context,
1748
async->work_buf,
1749
map->format.reg_bytes +
1750
map->format.pad_bytes,
1751
val, val_len, async);
1752
else
1753
ret = map->bus->async_write(map->bus_context,
1754
async->work_buf,
1755
map->format.reg_bytes +
1756
map->format.pad_bytes +
1757
val_len, NULL, 0, async);
1758
1759
if (ret != 0) {
1760
dev_err(map->dev, "Failed to schedule write: %d\n",
1761
ret);
1762
1763
spin_lock_irqsave(&map->async_lock, flags);
1764
list_move(&async->list, &map->async_free);
1765
spin_unlock_irqrestore(&map->async_lock, flags);
1766
}
1767
1768
return ret;
1769
}
1770
1771
trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
1772
1773
/* If we're doing a single register write we can probably just
1774
* send the work_buf directly, otherwise try to do a gather
1775
* write.
1776
*/
1777
if (val == work_val)
1778
ret = map->write(map->bus_context, map->work_buf,
1779
map->format.reg_bytes +
1780
map->format.pad_bytes +
1781
val_len);
1782
else if (map->bus && map->bus->gather_write)
1783
ret = map->bus->gather_write(map->bus_context, map->work_buf,
1784
map->format.reg_bytes +
1785
map->format.pad_bytes,
1786
val, val_len);
1787
else
1788
ret = -ENOTSUPP;
1789
1790
/* If that didn't work fall back on linearising by hand. */
1791
if (ret == -ENOTSUPP) {
1792
len = map->format.reg_bytes + map->format.pad_bytes + val_len;
1793
buf = kzalloc(len, GFP_KERNEL);
1794
if (!buf)
1795
return -ENOMEM;
1796
1797
memcpy(buf, map->work_buf, map->format.reg_bytes);
1798
memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
1799
val, val_len);
1800
ret = map->write(map->bus_context, buf, len);
1801
1802
kfree(buf);
1803
} else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
1804
/* regcache_drop_region() takes lock that we already have,
1805
* thus call map->cache_ops->drop() directly
1806
*/
1807
if (map->cache_ops && map->cache_ops->drop)
1808
map->cache_ops->drop(map, reg, reg + 1);
1809
}
1810
1811
trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
1812
1813
return ret;
1814
}
1815
1816
/**
1817
* regmap_can_raw_write - Test if regmap_raw_write() is supported
1818
*
1819
* @map: Map to check.
1820
*/
1821
bool regmap_can_raw_write(struct regmap *map)
1822
{
1823
return map->write && map->format.format_val && map->format.format_reg;
1824
}
1825
EXPORT_SYMBOL_GPL(regmap_can_raw_write);
1826
1827
/**
1828
* regmap_get_raw_read_max - Get the maximum size we can read
1829
*
1830
* @map: Map to check.
1831
*/
1832
size_t regmap_get_raw_read_max(struct regmap *map)
1833
{
1834
return map->max_raw_read;
1835
}
1836
EXPORT_SYMBOL_GPL(regmap_get_raw_read_max);
1837
1838
/**
1839
* regmap_get_raw_write_max - Get the maximum size we can read
1840
*
1841
* @map: Map to check.
1842
*/
1843
size_t regmap_get_raw_write_max(struct regmap *map)
1844
{
1845
return map->max_raw_write;
1846
}
1847
EXPORT_SYMBOL_GPL(regmap_get_raw_write_max);
1848
1849
static int _regmap_bus_formatted_write(void *context, unsigned int reg,
1850
unsigned int val)
1851
{
1852
int ret;
1853
struct regmap_range_node *range;
1854
struct regmap *map = context;
1855
1856
WARN_ON(!map->format.format_write);
1857
1858
range = _regmap_range_lookup(map, reg);
1859
if (range) {
1860
ret = _regmap_select_page(map, &reg, range, 1);
1861
if (ret != 0)
1862
return ret;
1863
}
1864
1865
reg = regmap_reg_addr(map, reg);
1866
map->format.format_write(map, reg, val);
1867
1868
trace_regmap_hw_write_start(map, reg, 1);
1869
1870
ret = map->write(map->bus_context, map->work_buf, map->format.buf_size);
1871
1872
trace_regmap_hw_write_done(map, reg, 1);
1873
1874
return ret;
1875
}
1876
1877
static int _regmap_bus_reg_write(void *context, unsigned int reg,
1878
unsigned int val)
1879
{
1880
struct regmap *map = context;
1881
struct regmap_range_node *range;
1882
int ret;
1883
1884
range = _regmap_range_lookup(map, reg);
1885
if (range) {
1886
ret = _regmap_select_page(map, &reg, range, 1);
1887
if (ret != 0)
1888
return ret;
1889
}
1890
1891
reg = regmap_reg_addr(map, reg);
1892
return map->bus->reg_write(map->bus_context, reg, val);
1893
}
1894
1895
static int _regmap_bus_raw_write(void *context, unsigned int reg,
1896
unsigned int val)
1897
{
1898
struct regmap *map = context;
1899
1900
WARN_ON(!map->format.format_val);
1901
1902
map->format.format_val(map->work_buf + map->format.reg_bytes
1903
+ map->format.pad_bytes, val, 0);
1904
return _regmap_raw_write_impl(map, reg,
1905
map->work_buf +
1906
map->format.reg_bytes +
1907
map->format.pad_bytes,
1908
map->format.val_bytes,
1909
false);
1910
}
1911
1912
static inline void *_regmap_map_get_context(struct regmap *map)
1913
{
1914
return (map->bus || (!map->bus && map->read)) ? map : map->bus_context;
1915
}
1916
1917
int _regmap_write(struct regmap *map, unsigned int reg,
1918
unsigned int val)
1919
{
1920
int ret;
1921
void *context = _regmap_map_get_context(map);
1922
1923
if (!regmap_writeable(map, reg))
1924
return -EIO;
1925
1926
if (!map->cache_bypass && !map->defer_caching) {
1927
ret = regcache_write(map, reg, val);
1928
if (ret != 0)
1929
return ret;
1930
if (map->cache_only) {
1931
map->cache_dirty = true;
1932
return 0;
1933
}
1934
}
1935
1936
ret = map->reg_write(context, reg, val);
1937
if (ret == 0) {
1938
if (regmap_should_log(map))
1939
dev_info(map->dev, "%x <= %x\n", reg, val);
1940
1941
trace_regmap_reg_write(map, reg, val);
1942
}
1943
1944
return ret;
1945
}
1946
1947
/**
1948
* regmap_write() - Write a value to a single register
1949
*
1950
* @map: Register map to write to
1951
* @reg: Register to write to
1952
* @val: Value to be written
1953
*
1954
* A value of zero will be returned on success, a negative errno will
1955
* be returned in error cases.
1956
*/
1957
int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
1958
{
1959
int ret;
1960
1961
if (!IS_ALIGNED(reg, map->reg_stride))
1962
return -EINVAL;
1963
1964
map->lock(map->lock_arg);
1965
1966
ret = _regmap_write(map, reg, val);
1967
1968
map->unlock(map->lock_arg);
1969
1970
return ret;
1971
}
1972
EXPORT_SYMBOL_GPL(regmap_write);
1973
1974
/**
1975
* regmap_write_async() - Write a value to a single register asynchronously
1976
*
1977
* @map: Register map to write to
1978
* @reg: Register to write to
1979
* @val: Value to be written
1980
*
1981
* A value of zero will be returned on success, a negative errno will
1982
* be returned in error cases.
1983
*/
1984
int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
1985
{
1986
int ret;
1987
1988
if (!IS_ALIGNED(reg, map->reg_stride))
1989
return -EINVAL;
1990
1991
map->lock(map->lock_arg);
1992
1993
map->async = true;
1994
1995
ret = _regmap_write(map, reg, val);
1996
1997
map->async = false;
1998
1999
map->unlock(map->lock_arg);
2000
2001
return ret;
2002
}
2003
EXPORT_SYMBOL_GPL(regmap_write_async);
2004
2005
int _regmap_raw_write(struct regmap *map, unsigned int reg,
2006
const void *val, size_t val_len, bool noinc)
2007
{
2008
size_t val_bytes = map->format.val_bytes;
2009
size_t val_count = val_len / val_bytes;
2010
size_t chunk_count, chunk_bytes;
2011
size_t chunk_regs = val_count;
2012
int ret, i;
2013
2014
if (!val_count)
2015
return -EINVAL;
2016
2017
if (map->use_single_write)
2018
chunk_regs = 1;
2019
else if (map->max_raw_write && val_len > map->max_raw_write)
2020
chunk_regs = map->max_raw_write / val_bytes;
2021
2022
chunk_count = val_count / chunk_regs;
2023
chunk_bytes = chunk_regs * val_bytes;
2024
2025
/* Write as many bytes as possible with chunk_size */
2026
for (i = 0; i < chunk_count; i++) {
2027
ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes, noinc);
2028
if (ret)
2029
return ret;
2030
2031
reg += regmap_get_offset(map, chunk_regs);
2032
val += chunk_bytes;
2033
val_len -= chunk_bytes;
2034
}
2035
2036
/* Write remaining bytes */
2037
if (val_len)
2038
ret = _regmap_raw_write_impl(map, reg, val, val_len, noinc);
2039
2040
return ret;
2041
}
2042
2043
/**
2044
* regmap_raw_write() - Write raw values to one or more registers
2045
*
2046
* @map: Register map to write to
2047
* @reg: Initial register to write to
2048
* @val: Block of data to be written, laid out for direct transmission to the
2049
* device
2050
* @val_len: Length of data pointed to by val.
2051
*
2052
* This function is intended to be used for things like firmware
2053
* download where a large block of data needs to be transferred to the
2054
* device. No formatting will be done on the data provided.
2055
*
2056
* A value of zero will be returned on success, a negative errno will
2057
* be returned in error cases.
2058
*/
2059
int regmap_raw_write(struct regmap *map, unsigned int reg,
2060
const void *val, size_t val_len)
2061
{
2062
int ret;
2063
2064
if (!regmap_can_raw_write(map))
2065
return -EINVAL;
2066
if (val_len % map->format.val_bytes)
2067
return -EINVAL;
2068
2069
map->lock(map->lock_arg);
2070
2071
ret = _regmap_raw_write(map, reg, val, val_len, false);
2072
2073
map->unlock(map->lock_arg);
2074
2075
return ret;
2076
}
2077
EXPORT_SYMBOL_GPL(regmap_raw_write);
2078
2079
static int regmap_noinc_readwrite(struct regmap *map, unsigned int reg,
2080
void *val, unsigned int val_len, bool write)
2081
{
2082
size_t val_bytes = map->format.val_bytes;
2083
size_t val_count = val_len / val_bytes;
2084
unsigned int lastval;
2085
u8 *u8p;
2086
u16 *u16p;
2087
u32 *u32p;
2088
int ret;
2089
int i;
2090
2091
switch (val_bytes) {
2092
case 1:
2093
u8p = val;
2094
if (write)
2095
lastval = (unsigned int)u8p[val_count - 1];
2096
break;
2097
case 2:
2098
u16p = val;
2099
if (write)
2100
lastval = (unsigned int)u16p[val_count - 1];
2101
break;
2102
case 4:
2103
u32p = val;
2104
if (write)
2105
lastval = (unsigned int)u32p[val_count - 1];
2106
break;
2107
default:
2108
return -EINVAL;
2109
}
2110
2111
/*
2112
* Update the cache with the last value we write, the rest is just
2113
* gone down in the hardware FIFO. We can't cache FIFOs. This makes
2114
* sure a single read from the cache will work.
2115
*/
2116
if (write) {
2117
if (!map->cache_bypass && !map->defer_caching) {
2118
ret = regcache_write(map, reg, lastval);
2119
if (ret != 0)
2120
return ret;
2121
if (map->cache_only) {
2122
map->cache_dirty = true;
2123
return 0;
2124
}
2125
}
2126
ret = map->bus->reg_noinc_write(map->bus_context, reg, val, val_count);
2127
} else {
2128
ret = map->bus->reg_noinc_read(map->bus_context, reg, val, val_count);
2129
}
2130
2131
if (!ret && regmap_should_log(map)) {
2132
dev_info(map->dev, "%x %s [", reg, write ? "<=" : "=>");
2133
for (i = 0; i < val_count; i++) {
2134
switch (val_bytes) {
2135
case 1:
2136
pr_cont("%x", u8p[i]);
2137
break;
2138
case 2:
2139
pr_cont("%x", u16p[i]);
2140
break;
2141
case 4:
2142
pr_cont("%x", u32p[i]);
2143
break;
2144
default:
2145
break;
2146
}
2147
if (i == (val_count - 1))
2148
pr_cont("]\n");
2149
else
2150
pr_cont(",");
2151
}
2152
}
2153
2154
return 0;
2155
}
2156
2157
/**
2158
* regmap_noinc_write(): Write data to a register without incrementing the
2159
* register number
2160
*
2161
* @map: Register map to write to
2162
* @reg: Register to write to
2163
* @val: Pointer to data buffer
2164
* @val_len: Length of output buffer in bytes.
2165
*
2166
* The regmap API usually assumes that bulk bus write operations will write a
2167
* range of registers. Some devices have certain registers for which a write
2168
* operation can write to an internal FIFO.
2169
*
2170
* The target register must be volatile but registers after it can be
2171
* completely unrelated cacheable registers.
2172
*
2173
* This will attempt multiple writes as required to write val_len bytes.
2174
*
2175
* A value of zero will be returned on success, a negative errno will be
2176
* returned in error cases.
2177
*/
2178
int regmap_noinc_write(struct regmap *map, unsigned int reg,
2179
const void *val, size_t val_len)
2180
{
2181
size_t write_len;
2182
int ret;
2183
2184
if (!map->write && !(map->bus && map->bus->reg_noinc_write))
2185
return -EINVAL;
2186
if (val_len % map->format.val_bytes)
2187
return -EINVAL;
2188
if (!IS_ALIGNED(reg, map->reg_stride))
2189
return -EINVAL;
2190
if (val_len == 0)
2191
return -EINVAL;
2192
2193
map->lock(map->lock_arg);
2194
2195
if (!regmap_volatile(map, reg) || !regmap_writeable_noinc(map, reg)) {
2196
ret = -EINVAL;
2197
goto out_unlock;
2198
}
2199
2200
/*
2201
* Use the accelerated operation if we can. The val drops the const
2202
* typing in order to facilitate code reuse in regmap_noinc_readwrite().
2203
*/
2204
if (map->bus->reg_noinc_write) {
2205
ret = regmap_noinc_readwrite(map, reg, (void *)val, val_len, true);
2206
goto out_unlock;
2207
}
2208
2209
while (val_len) {
2210
if (map->max_raw_write && map->max_raw_write < val_len)
2211
write_len = map->max_raw_write;
2212
else
2213
write_len = val_len;
2214
ret = _regmap_raw_write(map, reg, val, write_len, true);
2215
if (ret)
2216
goto out_unlock;
2217
val = ((u8 *)val) + write_len;
2218
val_len -= write_len;
2219
}
2220
2221
out_unlock:
2222
map->unlock(map->lock_arg);
2223
return ret;
2224
}
2225
EXPORT_SYMBOL_GPL(regmap_noinc_write);
2226
2227
/**
2228
* regmap_field_update_bits_base() - Perform a read/modify/write cycle a
2229
* register field.
2230
*
2231
* @field: Register field to write to
2232
* @mask: Bitmask to change
2233
* @val: Value to be written
2234
* @change: Boolean indicating if a write was done
2235
* @async: Boolean indicating asynchronously
2236
* @force: Boolean indicating use force update
2237
*
2238
* Perform a read/modify/write cycle on the register field with change,
2239
* async, force option.
2240
*
2241
* A value of zero will be returned on success, a negative errno will
2242
* be returned in error cases.
2243
*/
2244
int regmap_field_update_bits_base(struct regmap_field *field,
2245
unsigned int mask, unsigned int val,
2246
bool *change, bool async, bool force)
2247
{
2248
mask = (mask << field->shift) & field->mask;
2249
2250
return regmap_update_bits_base(field->regmap, field->reg,
2251
mask, val << field->shift,
2252
change, async, force);
2253
}
2254
EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
2255
2256
/**
2257
* regmap_field_test_bits() - Check if all specified bits are set in a
2258
* register field.
2259
*
2260
* @field: Register field to operate on
2261
* @bits: Bits to test
2262
*
2263
* Returns negative errno if the underlying regmap_field_read() fails,
2264
* 0 if at least one of the tested bits is not set and 1 if all tested
2265
* bits are set.
2266
*/
2267
int regmap_field_test_bits(struct regmap_field *field, unsigned int bits)
2268
{
2269
unsigned int val;
2270
int ret;
2271
2272
ret = regmap_field_read(field, &val);
2273
if (ret)
2274
return ret;
2275
2276
return (val & bits) == bits;
2277
}
2278
EXPORT_SYMBOL_GPL(regmap_field_test_bits);
2279
2280
/**
2281
* regmap_fields_update_bits_base() - Perform a read/modify/write cycle a
2282
* register field with port ID
2283
*
2284
* @field: Register field to write to
2285
* @id: port ID
2286
* @mask: Bitmask to change
2287
* @val: Value to be written
2288
* @change: Boolean indicating if a write was done
2289
* @async: Boolean indicating asynchronously
2290
* @force: Boolean indicating use force update
2291
*
2292
* A value of zero will be returned on success, a negative errno will
2293
* be returned in error cases.
2294
*/
2295
int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
2296
unsigned int mask, unsigned int val,
2297
bool *change, bool async, bool force)
2298
{
2299
if (id >= field->id_size)
2300
return -EINVAL;
2301
2302
mask = (mask << field->shift) & field->mask;
2303
2304
return regmap_update_bits_base(field->regmap,
2305
field->reg + (field->id_offset * id),
2306
mask, val << field->shift,
2307
change, async, force);
2308
}
2309
EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base);
2310
2311
/**
2312
* regmap_bulk_write() - Write multiple registers to the device
2313
*
2314
* @map: Register map to write to
2315
* @reg: First register to be write from
2316
* @val: Block of data to be written, in native register size for device
2317
* @val_count: Number of registers to write
2318
*
2319
* This function is intended to be used for writing a large block of
2320
* data to the device either in single transfer or multiple transfer.
2321
*
2322
* A value of zero will be returned on success, a negative errno will
2323
* be returned in error cases.
2324
*/
2325
int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
2326
size_t val_count)
2327
{
2328
int ret = 0, i;
2329
size_t val_bytes = map->format.val_bytes;
2330
2331
if (!IS_ALIGNED(reg, map->reg_stride))
2332
return -EINVAL;
2333
2334
/*
2335
* Some devices don't support bulk write, for them we have a series of
2336
* single write operations.
2337
*/
2338
if (!map->write || !map->format.parse_inplace) {
2339
map->lock(map->lock_arg);
2340
for (i = 0; i < val_count; i++) {
2341
unsigned int ival;
2342
2343
switch (val_bytes) {
2344
case 1:
2345
ival = *(u8 *)(val + (i * val_bytes));
2346
break;
2347
case 2:
2348
ival = *(u16 *)(val + (i * val_bytes));
2349
break;
2350
case 4:
2351
ival = *(u32 *)(val + (i * val_bytes));
2352
break;
2353
default:
2354
ret = -EINVAL;
2355
goto out;
2356
}
2357
2358
ret = _regmap_write(map,
2359
reg + regmap_get_offset(map, i),
2360
ival);
2361
if (ret != 0)
2362
goto out;
2363
}
2364
out:
2365
map->unlock(map->lock_arg);
2366
} else {
2367
void *wval;
2368
2369
wval = kmemdup_array(val, val_count, val_bytes, map->alloc_flags);
2370
if (!wval)
2371
return -ENOMEM;
2372
2373
for (i = 0; i < val_count * val_bytes; i += val_bytes)
2374
map->format.parse_inplace(wval + i);
2375
2376
ret = regmap_raw_write(map, reg, wval, val_bytes * val_count);
2377
2378
kfree(wval);
2379
}
2380
2381
if (!ret)
2382
trace_regmap_bulk_write(map, reg, val, val_bytes * val_count);
2383
2384
return ret;
2385
}
2386
EXPORT_SYMBOL_GPL(regmap_bulk_write);
2387
2388
/*
2389
* _regmap_raw_multi_reg_write()
2390
*
2391
* the (register,newvalue) pairs in regs have not been formatted, but
2392
* they are all in the same page and have been changed to being page
2393
* relative. The page register has been written if that was necessary.
2394
*/
2395
static int _regmap_raw_multi_reg_write(struct regmap *map,
2396
const struct reg_sequence *regs,
2397
size_t num_regs)
2398
{
2399
int ret;
2400
void *buf;
2401
int i;
2402
u8 *u8;
2403
size_t val_bytes = map->format.val_bytes;
2404
size_t reg_bytes = map->format.reg_bytes;
2405
size_t pad_bytes = map->format.pad_bytes;
2406
size_t pair_size = reg_bytes + pad_bytes + val_bytes;
2407
size_t len = pair_size * num_regs;
2408
2409
if (!len)
2410
return -EINVAL;
2411
2412
buf = kzalloc(len, GFP_KERNEL);
2413
if (!buf)
2414
return -ENOMEM;
2415
2416
/* We have to linearise by hand. */
2417
2418
u8 = buf;
2419
2420
for (i = 0; i < num_regs; i++) {
2421
unsigned int reg = regs[i].reg;
2422
unsigned int val = regs[i].def;
2423
trace_regmap_hw_write_start(map, reg, 1);
2424
reg = regmap_reg_addr(map, reg);
2425
map->format.format_reg(u8, reg, map->reg_shift);
2426
u8 += reg_bytes + pad_bytes;
2427
map->format.format_val(u8, val, 0);
2428
u8 += val_bytes;
2429
}
2430
u8 = buf;
2431
*u8 |= map->write_flag_mask;
2432
2433
ret = map->write(map->bus_context, buf, len);
2434
2435
kfree(buf);
2436
2437
for (i = 0; i < num_regs; i++) {
2438
int reg = regs[i].reg;
2439
trace_regmap_hw_write_done(map, reg, 1);
2440
}
2441
return ret;
2442
}
2443
2444
static unsigned int _regmap_register_page(struct regmap *map,
2445
unsigned int reg,
2446
struct regmap_range_node *range)
2447
{
2448
unsigned int win_page = (reg - range->range_min) / range->window_len;
2449
2450
return win_page;
2451
}
2452
2453
static int _regmap_range_multi_paged_reg_write(struct regmap *map,
2454
struct reg_sequence *regs,
2455
size_t num_regs)
2456
{
2457
int ret;
2458
int i, n;
2459
struct reg_sequence *base;
2460
unsigned int this_page = 0;
2461
unsigned int page_change = 0;
2462
/*
2463
* the set of registers are not neccessarily in order, but
2464
* since the order of write must be preserved this algorithm
2465
* chops the set each time the page changes. This also applies
2466
* if there is a delay required at any point in the sequence.
2467
*/
2468
base = regs;
2469
for (i = 0, n = 0; i < num_regs; i++, n++) {
2470
unsigned int reg = regs[i].reg;
2471
struct regmap_range_node *range;
2472
2473
range = _regmap_range_lookup(map, reg);
2474
if (range) {
2475
unsigned int win_page = _regmap_register_page(map, reg,
2476
range);
2477
2478
if (i == 0)
2479
this_page = win_page;
2480
if (win_page != this_page) {
2481
this_page = win_page;
2482
page_change = 1;
2483
}
2484
}
2485
2486
/* If we have both a page change and a delay make sure to
2487
* write the regs and apply the delay before we change the
2488
* page.
2489
*/
2490
2491
if (page_change || regs[i].delay_us) {
2492
2493
/* For situations where the first write requires
2494
* a delay we need to make sure we don't call
2495
* raw_multi_reg_write with n=0
2496
* This can't occur with page breaks as we
2497
* never write on the first iteration
2498
*/
2499
if (regs[i].delay_us && i == 0)
2500
n = 1;
2501
2502
ret = _regmap_raw_multi_reg_write(map, base, n);
2503
if (ret != 0)
2504
return ret;
2505
2506
if (regs[i].delay_us) {
2507
if (map->can_sleep)
2508
fsleep(regs[i].delay_us);
2509
else
2510
udelay(regs[i].delay_us);
2511
}
2512
2513
base += n;
2514
n = 0;
2515
2516
if (page_change) {
2517
ret = _regmap_select_page(map,
2518
&base[n].reg,
2519
range, 1);
2520
if (ret != 0)
2521
return ret;
2522
2523
page_change = 0;
2524
}
2525
2526
}
2527
2528
}
2529
if (n > 0)
2530
return _regmap_raw_multi_reg_write(map, base, n);
2531
return 0;
2532
}
2533
2534
static int _regmap_multi_reg_write(struct regmap *map,
2535
const struct reg_sequence *regs,
2536
size_t num_regs)
2537
{
2538
int i;
2539
int ret;
2540
2541
if (!map->can_multi_write) {
2542
for (i = 0; i < num_regs; i++) {
2543
ret = _regmap_write(map, regs[i].reg, regs[i].def);
2544
if (ret != 0)
2545
return ret;
2546
2547
if (regs[i].delay_us) {
2548
if (map->can_sleep)
2549
fsleep(regs[i].delay_us);
2550
else
2551
udelay(regs[i].delay_us);
2552
}
2553
}
2554
return 0;
2555
}
2556
2557
if (!map->format.parse_inplace)
2558
return -EINVAL;
2559
2560
if (map->writeable_reg)
2561
for (i = 0; i < num_regs; i++) {
2562
int reg = regs[i].reg;
2563
if (!map->writeable_reg(map->dev, reg))
2564
return -EINVAL;
2565
if (!IS_ALIGNED(reg, map->reg_stride))
2566
return -EINVAL;
2567
}
2568
2569
if (!map->cache_bypass) {
2570
for (i = 0; i < num_regs; i++) {
2571
unsigned int val = regs[i].def;
2572
unsigned int reg = regs[i].reg;
2573
ret = regcache_write(map, reg, val);
2574
if (ret) {
2575
dev_err(map->dev,
2576
"Error in caching of register: %x ret: %d\n",
2577
reg, ret);
2578
return ret;
2579
}
2580
}
2581
if (map->cache_only) {
2582
map->cache_dirty = true;
2583
return 0;
2584
}
2585
}
2586
2587
WARN_ON(!map->bus);
2588
2589
for (i = 0; i < num_regs; i++) {
2590
unsigned int reg = regs[i].reg;
2591
struct regmap_range_node *range;
2592
2593
/* Coalesce all the writes between a page break or a delay
2594
* in a sequence
2595
*/
2596
range = _regmap_range_lookup(map, reg);
2597
if (range || regs[i].delay_us) {
2598
size_t len = sizeof(struct reg_sequence)*num_regs;
2599
struct reg_sequence *base = kmemdup(regs, len,
2600
GFP_KERNEL);
2601
if (!base)
2602
return -ENOMEM;
2603
ret = _regmap_range_multi_paged_reg_write(map, base,
2604
num_regs);
2605
kfree(base);
2606
2607
return ret;
2608
}
2609
}
2610
return _regmap_raw_multi_reg_write(map, regs, num_regs);
2611
}
2612
2613
/**
2614
* regmap_multi_reg_write() - Write multiple registers to the device
2615
*
2616
* @map: Register map to write to
2617
* @regs: Array of structures containing register,value to be written
2618
* @num_regs: Number of registers to write
2619
*
2620
* Write multiple registers to the device where the set of register, value
2621
* pairs are supplied in any order, possibly not all in a single range.
2622
*
2623
* The 'normal' block write mode will send ultimately send data on the
2624
* target bus as R,V1,V2,V3,..,Vn where successively higher registers are
2625
* addressed. However, this alternative block multi write mode will send
2626
* the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
2627
* must of course support the mode.
2628
*
2629
* A value of zero will be returned on success, a negative errno will be
2630
* returned in error cases.
2631
*/
2632
int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
2633
int num_regs)
2634
{
2635
int ret;
2636
2637
map->lock(map->lock_arg);
2638
2639
ret = _regmap_multi_reg_write(map, regs, num_regs);
2640
2641
map->unlock(map->lock_arg);
2642
2643
return ret;
2644
}
2645
EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
2646
2647
/**
2648
* regmap_multi_reg_write_bypassed() - Write multiple registers to the
2649
* device but not the cache
2650
*
2651
* @map: Register map to write to
2652
* @regs: Array of structures containing register,value to be written
2653
* @num_regs: Number of registers to write
2654
*
2655
* Write multiple registers to the device but not the cache where the set
2656
* of register are supplied in any order.
2657
*
2658
* This function is intended to be used for writing a large block of data
2659
* atomically to the device in single transfer for those I2C client devices
2660
* that implement this alternative block write mode.
2661
*
2662
* A value of zero will be returned on success, a negative errno will
2663
* be returned in error cases.
2664
*/
2665
int regmap_multi_reg_write_bypassed(struct regmap *map,
2666
const struct reg_sequence *regs,
2667
int num_regs)
2668
{
2669
int ret;
2670
bool bypass;
2671
2672
map->lock(map->lock_arg);
2673
2674
bypass = map->cache_bypass;
2675
map->cache_bypass = true;
2676
2677
ret = _regmap_multi_reg_write(map, regs, num_regs);
2678
2679
map->cache_bypass = bypass;
2680
2681
map->unlock(map->lock_arg);
2682
2683
return ret;
2684
}
2685
EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
2686
2687
/**
2688
* regmap_raw_write_async() - Write raw values to one or more registers
2689
* asynchronously
2690
*
2691
* @map: Register map to write to
2692
* @reg: Initial register to write to
2693
* @val: Block of data to be written, laid out for direct transmission to the
2694
* device. Must be valid until regmap_async_complete() is called.
2695
* @val_len: Length of data pointed to by val.
2696
*
2697
* This function is intended to be used for things like firmware
2698
* download where a large block of data needs to be transferred to the
2699
* device. No formatting will be done on the data provided.
2700
*
2701
* If supported by the underlying bus the write will be scheduled
2702
* asynchronously, helping maximise I/O speed on higher speed buses
2703
* like SPI. regmap_async_complete() can be called to ensure that all
2704
* asynchrnous writes have been completed.
2705
*
2706
* A value of zero will be returned on success, a negative errno will
2707
* be returned in error cases.
2708
*/
2709
int regmap_raw_write_async(struct regmap *map, unsigned int reg,
2710
const void *val, size_t val_len)
2711
{
2712
int ret;
2713
2714
if (val_len % map->format.val_bytes)
2715
return -EINVAL;
2716
if (!IS_ALIGNED(reg, map->reg_stride))
2717
return -EINVAL;
2718
2719
map->lock(map->lock_arg);
2720
2721
map->async = true;
2722
2723
ret = _regmap_raw_write(map, reg, val, val_len, false);
2724
2725
map->async = false;
2726
2727
map->unlock(map->lock_arg);
2728
2729
return ret;
2730
}
2731
EXPORT_SYMBOL_GPL(regmap_raw_write_async);
2732
2733
static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2734
unsigned int val_len, bool noinc)
2735
{
2736
struct regmap_range_node *range;
2737
int ret;
2738
2739
if (!map->read)
2740
return -EINVAL;
2741
2742
range = _regmap_range_lookup(map, reg);
2743
if (range) {
2744
ret = _regmap_select_page(map, &reg, range,
2745
noinc ? 1 : val_len / map->format.val_bytes);
2746
if (ret != 0)
2747
return ret;
2748
}
2749
2750
reg = regmap_reg_addr(map, reg);
2751
map->format.format_reg(map->work_buf, reg, map->reg_shift);
2752
regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
2753
map->read_flag_mask);
2754
trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
2755
2756
ret = map->read(map->bus_context, map->work_buf,
2757
map->format.reg_bytes + map->format.pad_bytes,
2758
val, val_len);
2759
2760
trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
2761
2762
return ret;
2763
}
2764
2765
static int _regmap_bus_reg_read(void *context, unsigned int reg,
2766
unsigned int *val)
2767
{
2768
struct regmap *map = context;
2769
struct regmap_range_node *range;
2770
int ret;
2771
2772
range = _regmap_range_lookup(map, reg);
2773
if (range) {
2774
ret = _regmap_select_page(map, &reg, range, 1);
2775
if (ret != 0)
2776
return ret;
2777
}
2778
2779
reg = regmap_reg_addr(map, reg);
2780
return map->bus->reg_read(map->bus_context, reg, val);
2781
}
2782
2783
static int _regmap_bus_read(void *context, unsigned int reg,
2784
unsigned int *val)
2785
{
2786
int ret;
2787
struct regmap *map = context;
2788
void *work_val = map->work_buf + map->format.reg_bytes +
2789
map->format.pad_bytes;
2790
2791
if (!map->format.parse_val)
2792
return -EINVAL;
2793
2794
ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false);
2795
if (ret == 0)
2796
*val = map->format.parse_val(work_val);
2797
2798
return ret;
2799
}
2800
2801
static int _regmap_read(struct regmap *map, unsigned int reg,
2802
unsigned int *val)
2803
{
2804
int ret;
2805
void *context = _regmap_map_get_context(map);
2806
2807
if (!map->cache_bypass) {
2808
ret = regcache_read(map, reg, val);
2809
if (ret == 0)
2810
return 0;
2811
}
2812
2813
if (map->cache_only)
2814
return -EBUSY;
2815
2816
if (!regmap_readable(map, reg))
2817
return -EIO;
2818
2819
ret = map->reg_read(context, reg, val);
2820
if (ret == 0) {
2821
if (regmap_should_log(map))
2822
dev_info(map->dev, "%x => %x\n", reg, *val);
2823
2824
trace_regmap_reg_read(map, reg, *val);
2825
2826
if (!map->cache_bypass)
2827
regcache_write(map, reg, *val);
2828
}
2829
2830
return ret;
2831
}
2832
2833
/**
2834
* regmap_read() - Read a value from a single register
2835
*
2836
* @map: Register map to read from
2837
* @reg: Register to be read from
2838
* @val: Pointer to store read value
2839
*
2840
* A value of zero will be returned on success, a negative errno will
2841
* be returned in error cases.
2842
*/
2843
int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
2844
{
2845
int ret;
2846
2847
if (!IS_ALIGNED(reg, map->reg_stride))
2848
return -EINVAL;
2849
2850
map->lock(map->lock_arg);
2851
2852
ret = _regmap_read(map, reg, val);
2853
2854
map->unlock(map->lock_arg);
2855
2856
return ret;
2857
}
2858
EXPORT_SYMBOL_GPL(regmap_read);
2859
2860
/**
2861
* regmap_read_bypassed() - Read a value from a single register direct
2862
* from the device, bypassing the cache
2863
*
2864
* @map: Register map to read from
2865
* @reg: Register to be read from
2866
* @val: Pointer to store read value
2867
*
2868
* A value of zero will be returned on success, a negative errno will
2869
* be returned in error cases.
2870
*/
2871
int regmap_read_bypassed(struct regmap *map, unsigned int reg, unsigned int *val)
2872
{
2873
int ret;
2874
bool bypass, cache_only;
2875
2876
if (!IS_ALIGNED(reg, map->reg_stride))
2877
return -EINVAL;
2878
2879
map->lock(map->lock_arg);
2880
2881
bypass = map->cache_bypass;
2882
cache_only = map->cache_only;
2883
map->cache_bypass = true;
2884
map->cache_only = false;
2885
2886
ret = _regmap_read(map, reg, val);
2887
2888
map->cache_bypass = bypass;
2889
map->cache_only = cache_only;
2890
2891
map->unlock(map->lock_arg);
2892
2893
return ret;
2894
}
2895
EXPORT_SYMBOL_GPL(regmap_read_bypassed);
2896
2897
/**
2898
* regmap_raw_read() - Read raw data from the device
2899
*
2900
* @map: Register map to read from
2901
* @reg: First register to be read from
2902
* @val: Pointer to store read value
2903
* @val_len: Size of data to read
2904
*
2905
* A value of zero will be returned on success, a negative errno will
2906
* be returned in error cases.
2907
*/
2908
int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2909
size_t val_len)
2910
{
2911
size_t val_bytes = map->format.val_bytes;
2912
size_t val_count = val_len / val_bytes;
2913
unsigned int v;
2914
int ret, i;
2915
2916
if (val_len % map->format.val_bytes)
2917
return -EINVAL;
2918
if (!IS_ALIGNED(reg, map->reg_stride))
2919
return -EINVAL;
2920
if (val_count == 0)
2921
return -EINVAL;
2922
2923
map->lock(map->lock_arg);
2924
2925
if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
2926
map->cache_type == REGCACHE_NONE) {
2927
size_t chunk_count, chunk_bytes;
2928
size_t chunk_regs = val_count;
2929
2930
if (!map->cache_bypass && map->cache_only) {
2931
ret = -EBUSY;
2932
goto out;
2933
}
2934
2935
if (!map->read) {
2936
ret = -ENOTSUPP;
2937
goto out;
2938
}
2939
2940
if (map->use_single_read)
2941
chunk_regs = 1;
2942
else if (map->max_raw_read && val_len > map->max_raw_read)
2943
chunk_regs = map->max_raw_read / val_bytes;
2944
2945
chunk_count = val_count / chunk_regs;
2946
chunk_bytes = chunk_regs * val_bytes;
2947
2948
/* Read bytes that fit into whole chunks */
2949
for (i = 0; i < chunk_count; i++) {
2950
ret = _regmap_raw_read(map, reg, val, chunk_bytes, false);
2951
if (ret != 0)
2952
goto out;
2953
2954
reg += regmap_get_offset(map, chunk_regs);
2955
val += chunk_bytes;
2956
val_len -= chunk_bytes;
2957
}
2958
2959
/* Read remaining bytes */
2960
if (val_len) {
2961
ret = _regmap_raw_read(map, reg, val, val_len, false);
2962
if (ret != 0)
2963
goto out;
2964
}
2965
} else {
2966
/* Otherwise go word by word for the cache; should be low
2967
* cost as we expect to hit the cache.
2968
*/
2969
for (i = 0; i < val_count; i++) {
2970
ret = _regmap_read(map, reg + regmap_get_offset(map, i),
2971
&v);
2972
if (ret != 0)
2973
goto out;
2974
2975
map->format.format_val(val + (i * val_bytes), v, 0);
2976
}
2977
}
2978
2979
out:
2980
map->unlock(map->lock_arg);
2981
2982
return ret;
2983
}
2984
EXPORT_SYMBOL_GPL(regmap_raw_read);
2985
2986
/**
2987
* regmap_noinc_read(): Read data from a register without incrementing the
2988
* register number
2989
*
2990
* @map: Register map to read from
2991
* @reg: Register to read from
2992
* @val: Pointer to data buffer
2993
* @val_len: Length of output buffer in bytes.
2994
*
2995
* The regmap API usually assumes that bulk read operations will read a
2996
* range of registers. Some devices have certain registers for which a read
2997
* operation read will read from an internal FIFO.
2998
*
2999
* The target register must be volatile but registers after it can be
3000
* completely unrelated cacheable registers.
3001
*
3002
* This will attempt multiple reads as required to read val_len bytes.
3003
*
3004
* A value of zero will be returned on success, a negative errno will be
3005
* returned in error cases.
3006
*/
3007
int regmap_noinc_read(struct regmap *map, unsigned int reg,
3008
void *val, size_t val_len)
3009
{
3010
size_t read_len;
3011
int ret;
3012
3013
if (!map->read)
3014
return -ENOTSUPP;
3015
3016
if (val_len % map->format.val_bytes)
3017
return -EINVAL;
3018
if (!IS_ALIGNED(reg, map->reg_stride))
3019
return -EINVAL;
3020
if (val_len == 0)
3021
return -EINVAL;
3022
3023
map->lock(map->lock_arg);
3024
3025
if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) {
3026
ret = -EINVAL;
3027
goto out_unlock;
3028
}
3029
3030
/*
3031
* We have not defined the FIFO semantics for cache, as the
3032
* cache is just one value deep. Should we return the last
3033
* written value? Just avoid this by always reading the FIFO
3034
* even when using cache. Cache only will not work.
3035
*/
3036
if (!map->cache_bypass && map->cache_only) {
3037
ret = -EBUSY;
3038
goto out_unlock;
3039
}
3040
3041
/* Use the accelerated operation if we can */
3042
if (map->bus->reg_noinc_read) {
3043
ret = regmap_noinc_readwrite(map, reg, val, val_len, false);
3044
goto out_unlock;
3045
}
3046
3047
while (val_len) {
3048
if (map->max_raw_read && map->max_raw_read < val_len)
3049
read_len = map->max_raw_read;
3050
else
3051
read_len = val_len;
3052
ret = _regmap_raw_read(map, reg, val, read_len, true);
3053
if (ret)
3054
goto out_unlock;
3055
val = ((u8 *)val) + read_len;
3056
val_len -= read_len;
3057
}
3058
3059
out_unlock:
3060
map->unlock(map->lock_arg);
3061
return ret;
3062
}
3063
EXPORT_SYMBOL_GPL(regmap_noinc_read);
3064
3065
/**
3066
* regmap_field_read(): Read a value to a single register field
3067
*
3068
* @field: Register field to read from
3069
* @val: Pointer to store read value
3070
*
3071
* A value of zero will be returned on success, a negative errno will
3072
* be returned in error cases.
3073
*/
3074
int regmap_field_read(struct regmap_field *field, unsigned int *val)
3075
{
3076
int ret;
3077
unsigned int reg_val;
3078
ret = regmap_read(field->regmap, field->reg, &reg_val);
3079
if (ret != 0)
3080
return ret;
3081
3082
reg_val &= field->mask;
3083
reg_val >>= field->shift;
3084
*val = reg_val;
3085
3086
return ret;
3087
}
3088
EXPORT_SYMBOL_GPL(regmap_field_read);
3089
3090
/**
3091
* regmap_fields_read() - Read a value to a single register field with port ID
3092
*
3093
* @field: Register field to read from
3094
* @id: port ID
3095
* @val: Pointer to store read value
3096
*
3097
* A value of zero will be returned on success, a negative errno will
3098
* be returned in error cases.
3099
*/
3100
int regmap_fields_read(struct regmap_field *field, unsigned int id,
3101
unsigned int *val)
3102
{
3103
int ret;
3104
unsigned int reg_val;
3105
3106
if (id >= field->id_size)
3107
return -EINVAL;
3108
3109
ret = regmap_read(field->regmap,
3110
field->reg + (field->id_offset * id),
3111
&reg_val);
3112
if (ret != 0)
3113
return ret;
3114
3115
reg_val &= field->mask;
3116
reg_val >>= field->shift;
3117
*val = reg_val;
3118
3119
return ret;
3120
}
3121
EXPORT_SYMBOL_GPL(regmap_fields_read);
3122
3123
static int _regmap_bulk_read(struct regmap *map, unsigned int reg,
3124
const unsigned int *regs, void *val, size_t val_count)
3125
{
3126
u32 *u32 = val;
3127
u16 *u16 = val;
3128
u8 *u8 = val;
3129
int ret, i;
3130
3131
map->lock(map->lock_arg);
3132
3133
for (i = 0; i < val_count; i++) {
3134
unsigned int ival;
3135
3136
if (regs) {
3137
if (!IS_ALIGNED(regs[i], map->reg_stride)) {
3138
ret = -EINVAL;
3139
goto out;
3140
}
3141
ret = _regmap_read(map, regs[i], &ival);
3142
} else {
3143
ret = _regmap_read(map, reg + regmap_get_offset(map, i), &ival);
3144
}
3145
if (ret != 0)
3146
goto out;
3147
3148
switch (map->format.val_bytes) {
3149
case 4:
3150
u32[i] = ival;
3151
break;
3152
case 2:
3153
u16[i] = ival;
3154
break;
3155
case 1:
3156
u8[i] = ival;
3157
break;
3158
default:
3159
ret = -EINVAL;
3160
goto out;
3161
}
3162
}
3163
out:
3164
map->unlock(map->lock_arg);
3165
return ret;
3166
}
3167
3168
/**
3169
* regmap_bulk_read() - Read multiple sequential registers from the device
3170
*
3171
* @map: Register map to read from
3172
* @reg: First register to be read from
3173
* @val: Pointer to store read value, in native register size for device
3174
* @val_count: Number of registers to read
3175
*
3176
* A value of zero will be returned on success, a negative errno will
3177
* be returned in error cases.
3178
*/
3179
int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
3180
size_t val_count)
3181
{
3182
int ret, i;
3183
size_t val_bytes = map->format.val_bytes;
3184
bool vol = regmap_volatile_range(map, reg, val_count);
3185
3186
if (!IS_ALIGNED(reg, map->reg_stride))
3187
return -EINVAL;
3188
if (val_count == 0)
3189
return -EINVAL;
3190
3191
if (map->read && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
3192
ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
3193
if (ret != 0)
3194
return ret;
3195
3196
for (i = 0; i < val_count * val_bytes; i += val_bytes)
3197
map->format.parse_inplace(val + i);
3198
} else {
3199
ret = _regmap_bulk_read(map, reg, NULL, val, val_count);
3200
}
3201
if (!ret)
3202
trace_regmap_bulk_read(map, reg, val, val_bytes * val_count);
3203
return ret;
3204
}
3205
EXPORT_SYMBOL_GPL(regmap_bulk_read);
3206
3207
/**
3208
* regmap_multi_reg_read() - Read multiple non-sequential registers from the device
3209
*
3210
* @map: Register map to read from
3211
* @regs: Array of registers to read from
3212
* @val: Pointer to store read value, in native register size for device
3213
* @val_count: Number of registers to read
3214
*
3215
* A value of zero will be returned on success, a negative errno will
3216
* be returned in error cases.
3217
*/
3218
int regmap_multi_reg_read(struct regmap *map, const unsigned int *regs, void *val,
3219
size_t val_count)
3220
{
3221
if (val_count == 0)
3222
return -EINVAL;
3223
3224
return _regmap_bulk_read(map, 0, regs, val, val_count);
3225
}
3226
EXPORT_SYMBOL_GPL(regmap_multi_reg_read);
3227
3228
static int _regmap_update_bits(struct regmap *map, unsigned int reg,
3229
unsigned int mask, unsigned int val,
3230
bool *change, bool force_write)
3231
{
3232
int ret;
3233
unsigned int tmp, orig;
3234
3235
if (change)
3236
*change = false;
3237
3238
if (regmap_volatile(map, reg) && map->reg_update_bits) {
3239
reg = regmap_reg_addr(map, reg);
3240
ret = map->reg_update_bits(map->bus_context, reg, mask, val);
3241
if (ret == 0 && change)
3242
*change = true;
3243
} else {
3244
ret = _regmap_read(map, reg, &orig);
3245
if (ret != 0)
3246
return ret;
3247
3248
tmp = orig & ~mask;
3249
tmp |= val & mask;
3250
3251
if (force_write || (tmp != orig) || map->force_write_field) {
3252
ret = _regmap_write(map, reg, tmp);
3253
if (ret == 0 && change)
3254
*change = true;
3255
}
3256
}
3257
3258
return ret;
3259
}
3260
3261
/**
3262
* regmap_update_bits_base() - Perform a read/modify/write cycle on a register
3263
*
3264
* @map: Register map to update
3265
* @reg: Register to update
3266
* @mask: Bitmask to change
3267
* @val: New value for bitmask
3268
* @change: Boolean indicating if a write was done
3269
* @async: Boolean indicating asynchronously
3270
* @force: Boolean indicating use force update
3271
*
3272
* Perform a read/modify/write cycle on a register map with change, async, force
3273
* options.
3274
*
3275
* If async is true:
3276
*
3277
* With most buses the read must be done synchronously so this is most useful
3278
* for devices with a cache which do not need to interact with the hardware to
3279
* determine the current register value.
3280
*
3281
* Returns zero for success, a negative number on error.
3282
*/
3283
int regmap_update_bits_base(struct regmap *map, unsigned int reg,
3284
unsigned int mask, unsigned int val,
3285
bool *change, bool async, bool force)
3286
{
3287
int ret;
3288
3289
map->lock(map->lock_arg);
3290
3291
map->async = async;
3292
3293
ret = _regmap_update_bits(map, reg, mask, val, change, force);
3294
3295
map->async = false;
3296
3297
map->unlock(map->lock_arg);
3298
3299
return ret;
3300
}
3301
EXPORT_SYMBOL_GPL(regmap_update_bits_base);
3302
3303
/**
3304
* regmap_test_bits() - Check if all specified bits are set in a register.
3305
*
3306
* @map: Register map to operate on
3307
* @reg: Register to read from
3308
* @bits: Bits to test
3309
*
3310
* Returns 0 if at least one of the tested bits is not set, 1 if all tested
3311
* bits are set and a negative error number if the underlying regmap_read()
3312
* fails.
3313
*/
3314
int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits)
3315
{
3316
unsigned int val;
3317
int ret;
3318
3319
ret = regmap_read(map, reg, &val);
3320
if (ret)
3321
return ret;
3322
3323
return (val & bits) == bits;
3324
}
3325
EXPORT_SYMBOL_GPL(regmap_test_bits);
3326
3327
void regmap_async_complete_cb(struct regmap_async *async, int ret)
3328
{
3329
struct regmap *map = async->map;
3330
bool wake;
3331
3332
trace_regmap_async_io_complete(map);
3333
3334
spin_lock(&map->async_lock);
3335
list_move(&async->list, &map->async_free);
3336
wake = list_empty(&map->async_list);
3337
3338
if (ret != 0)
3339
map->async_ret = ret;
3340
3341
spin_unlock(&map->async_lock);
3342
3343
if (wake)
3344
wake_up(&map->async_waitq);
3345
}
3346
EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
3347
3348
static int regmap_async_is_done(struct regmap *map)
3349
{
3350
unsigned long flags;
3351
int ret;
3352
3353
spin_lock_irqsave(&map->async_lock, flags);
3354
ret = list_empty(&map->async_list);
3355
spin_unlock_irqrestore(&map->async_lock, flags);
3356
3357
return ret;
3358
}
3359
3360
/**
3361
* regmap_async_complete - Ensure all asynchronous I/O has completed.
3362
*
3363
* @map: Map to operate on.
3364
*
3365
* Blocks until any pending asynchronous I/O has completed. Returns
3366
* an error code for any failed I/O operations.
3367
*/
3368
int regmap_async_complete(struct regmap *map)
3369
{
3370
unsigned long flags;
3371
int ret;
3372
3373
/* Nothing to do with no async support */
3374
if (!map->bus || !map->bus->async_write)
3375
return 0;
3376
3377
trace_regmap_async_complete_start(map);
3378
3379
wait_event(map->async_waitq, regmap_async_is_done(map));
3380
3381
spin_lock_irqsave(&map->async_lock, flags);
3382
ret = map->async_ret;
3383
map->async_ret = 0;
3384
spin_unlock_irqrestore(&map->async_lock, flags);
3385
3386
trace_regmap_async_complete_done(map);
3387
3388
return ret;
3389
}
3390
EXPORT_SYMBOL_GPL(regmap_async_complete);
3391
3392
/**
3393
* regmap_register_patch - Register and apply register updates to be applied
3394
* on device initialistion
3395
*
3396
* @map: Register map to apply updates to.
3397
* @regs: Values to update.
3398
* @num_regs: Number of entries in regs.
3399
*
3400
* Register a set of register updates to be applied to the device
3401
* whenever the device registers are synchronised with the cache and
3402
* apply them immediately. Typically this is used to apply
3403
* corrections to be applied to the device defaults on startup, such
3404
* as the updates some vendors provide to undocumented registers.
3405
*
3406
* The caller must ensure that this function cannot be called
3407
* concurrently with either itself or regcache_sync().
3408
*/
3409
int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
3410
int num_regs)
3411
{
3412
struct reg_sequence *p;
3413
int ret;
3414
bool bypass;
3415
3416
if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
3417
num_regs))
3418
return 0;
3419
3420
p = krealloc(map->patch,
3421
sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
3422
GFP_KERNEL);
3423
if (p) {
3424
memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
3425
map->patch = p;
3426
map->patch_regs += num_regs;
3427
} else {
3428
return -ENOMEM;
3429
}
3430
3431
map->lock(map->lock_arg);
3432
3433
bypass = map->cache_bypass;
3434
3435
map->cache_bypass = true;
3436
map->async = true;
3437
3438
ret = _regmap_multi_reg_write(map, regs, num_regs);
3439
3440
map->async = false;
3441
map->cache_bypass = bypass;
3442
3443
map->unlock(map->lock_arg);
3444
3445
regmap_async_complete(map);
3446
3447
return ret;
3448
}
3449
EXPORT_SYMBOL_GPL(regmap_register_patch);
3450
3451
/**
3452
* regmap_get_val_bytes() - Report the size of a register value
3453
*
3454
* @map: Register map to operate on.
3455
*
3456
* Report the size of a register value, mainly intended to for use by
3457
* generic infrastructure built on top of regmap.
3458
*/
3459
int regmap_get_val_bytes(struct regmap *map)
3460
{
3461
if (map->format.format_write)
3462
return -EINVAL;
3463
3464
return map->format.val_bytes;
3465
}
3466
EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
3467
3468
/**
3469
* regmap_get_max_register() - Report the max register value
3470
*
3471
* @map: Register map to operate on.
3472
*
3473
* Report the max register value, mainly intended to for use by
3474
* generic infrastructure built on top of regmap.
3475
*/
3476
int regmap_get_max_register(struct regmap *map)
3477
{
3478
return map->max_register_is_set ? map->max_register : -EINVAL;
3479
}
3480
EXPORT_SYMBOL_GPL(regmap_get_max_register);
3481
3482
/**
3483
* regmap_get_reg_stride() - Report the register address stride
3484
*
3485
* @map: Register map to operate on.
3486
*
3487
* Report the register address stride, mainly intended to for use by
3488
* generic infrastructure built on top of regmap.
3489
*/
3490
int regmap_get_reg_stride(struct regmap *map)
3491
{
3492
return map->reg_stride;
3493
}
3494
EXPORT_SYMBOL_GPL(regmap_get_reg_stride);
3495
3496
/**
3497
* regmap_might_sleep() - Returns whether a regmap access might sleep.
3498
*
3499
* @map: Register map to operate on.
3500
*
3501
* Returns true if an access to the register might sleep, else false.
3502
*/
3503
bool regmap_might_sleep(struct regmap *map)
3504
{
3505
return map->can_sleep;
3506
}
3507
EXPORT_SYMBOL_GPL(regmap_might_sleep);
3508
3509
int regmap_parse_val(struct regmap *map, const void *buf,
3510
unsigned int *val)
3511
{
3512
if (!map->format.parse_val)
3513
return -EINVAL;
3514
3515
*val = map->format.parse_val(buf);
3516
3517
return 0;
3518
}
3519
EXPORT_SYMBOL_GPL(regmap_parse_val);
3520
3521
static int __init regmap_initcall(void)
3522
{
3523
regmap_debugfs_initcall();
3524
3525
return 0;
3526
}
3527
postcore_initcall(regmap_initcall);
3528
3529