Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/base/regmap/regmap.c
26427 views
1
// SPDX-License-Identifier: GPL-2.0
2
//
3
// Register map access API
4
//
5
// Copyright 2011 Wolfson Microelectronics plc
6
//
7
// Author: Mark Brown <[email protected]>
8
9
#include <linux/device.h>
10
#include <linux/slab.h>
11
#include <linux/export.h>
12
#include <linux/mutex.h>
13
#include <linux/err.h>
14
#include <linux/property.h>
15
#include <linux/rbtree.h>
16
#include <linux/sched.h>
17
#include <linux/delay.h>
18
#include <linux/log2.h>
19
#include <linux/hwspinlock.h>
20
#include <linux/unaligned.h>
21
22
#define CREATE_TRACE_POINTS
23
#include "trace.h"
24
25
#include "internal.h"
26
27
/*
28
* Sometimes for failures during very early init the trace
29
* infrastructure isn't available early enough to be used. For this
30
* sort of problem defining LOG_DEVICE will add printks for basic
31
* register I/O on a specific device.
32
*/
33
#undef LOG_DEVICE
34
35
#ifdef LOG_DEVICE
36
static inline bool regmap_should_log(struct regmap *map)
37
{
38
return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0);
39
}
40
#else
41
static inline bool regmap_should_log(struct regmap *map) { return false; }
42
#endif
43
44
45
static int _regmap_update_bits(struct regmap *map, unsigned int reg,
46
unsigned int mask, unsigned int val,
47
bool *change, bool force_write);
48
49
static int _regmap_bus_reg_read(void *context, unsigned int reg,
50
unsigned int *val);
51
static int _regmap_bus_read(void *context, unsigned int reg,
52
unsigned int *val);
53
static int _regmap_bus_formatted_write(void *context, unsigned int reg,
54
unsigned int val);
55
static int _regmap_bus_reg_write(void *context, unsigned int reg,
56
unsigned int val);
57
static int _regmap_bus_raw_write(void *context, unsigned int reg,
58
unsigned int val);
59
60
bool regmap_reg_in_ranges(unsigned int reg,
61
const struct regmap_range *ranges,
62
unsigned int nranges)
63
{
64
const struct regmap_range *r;
65
int i;
66
67
for (i = 0, r = ranges; i < nranges; i++, r++)
68
if (regmap_reg_in_range(reg, r))
69
return true;
70
return false;
71
}
72
EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
73
74
bool regmap_check_range_table(struct regmap *map, unsigned int reg,
75
const struct regmap_access_table *table)
76
{
77
/* Check "no ranges" first */
78
if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
79
return false;
80
81
/* In case zero "yes ranges" are supplied, any reg is OK */
82
if (!table->n_yes_ranges)
83
return true;
84
85
return regmap_reg_in_ranges(reg, table->yes_ranges,
86
table->n_yes_ranges);
87
}
88
EXPORT_SYMBOL_GPL(regmap_check_range_table);
89
90
bool regmap_writeable(struct regmap *map, unsigned int reg)
91
{
92
if (map->max_register_is_set && reg > map->max_register)
93
return false;
94
95
if (map->writeable_reg)
96
return map->writeable_reg(map->dev, reg);
97
98
if (map->wr_table)
99
return regmap_check_range_table(map, reg, map->wr_table);
100
101
return true;
102
}
103
104
bool regmap_cached(struct regmap *map, unsigned int reg)
105
{
106
int ret;
107
unsigned int val;
108
109
if (map->cache_type == REGCACHE_NONE)
110
return false;
111
112
if (!map->cache_ops)
113
return false;
114
115
if (map->max_register_is_set && reg > map->max_register)
116
return false;
117
118
map->lock(map->lock_arg);
119
ret = regcache_read(map, reg, &val);
120
map->unlock(map->lock_arg);
121
if (ret)
122
return false;
123
124
return true;
125
}
126
127
bool regmap_readable(struct regmap *map, unsigned int reg)
128
{
129
if (!map->reg_read)
130
return false;
131
132
if (map->max_register_is_set && reg > map->max_register)
133
return false;
134
135
if (map->format.format_write)
136
return false;
137
138
if (map->readable_reg)
139
return map->readable_reg(map->dev, reg);
140
141
if (map->rd_table)
142
return regmap_check_range_table(map, reg, map->rd_table);
143
144
return true;
145
}
146
147
bool regmap_volatile(struct regmap *map, unsigned int reg)
148
{
149
if (!map->format.format_write && !regmap_readable(map, reg))
150
return false;
151
152
if (map->volatile_reg)
153
return map->volatile_reg(map->dev, reg);
154
155
if (map->volatile_table)
156
return regmap_check_range_table(map, reg, map->volatile_table);
157
158
if (map->cache_ops)
159
return false;
160
else
161
return true;
162
}
163
164
bool regmap_precious(struct regmap *map, unsigned int reg)
165
{
166
if (!regmap_readable(map, reg))
167
return false;
168
169
if (map->precious_reg)
170
return map->precious_reg(map->dev, reg);
171
172
if (map->precious_table)
173
return regmap_check_range_table(map, reg, map->precious_table);
174
175
return false;
176
}
177
178
bool regmap_writeable_noinc(struct regmap *map, unsigned int reg)
179
{
180
if (map->writeable_noinc_reg)
181
return map->writeable_noinc_reg(map->dev, reg);
182
183
if (map->wr_noinc_table)
184
return regmap_check_range_table(map, reg, map->wr_noinc_table);
185
186
return true;
187
}
188
189
bool regmap_readable_noinc(struct regmap *map, unsigned int reg)
190
{
191
if (map->readable_noinc_reg)
192
return map->readable_noinc_reg(map->dev, reg);
193
194
if (map->rd_noinc_table)
195
return regmap_check_range_table(map, reg, map->rd_noinc_table);
196
197
return true;
198
}
199
200
static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
201
size_t num)
202
{
203
unsigned int i;
204
205
for (i = 0; i < num; i++)
206
if (!regmap_volatile(map, reg + regmap_get_offset(map, i)))
207
return false;
208
209
return true;
210
}
211
212
static void regmap_format_12_20_write(struct regmap *map,
213
unsigned int reg, unsigned int val)
214
{
215
u8 *out = map->work_buf;
216
217
out[0] = reg >> 4;
218
out[1] = (reg << 4) | (val >> 16);
219
out[2] = val >> 8;
220
out[3] = val;
221
}
222
223
224
static void regmap_format_2_6_write(struct regmap *map,
225
unsigned int reg, unsigned int val)
226
{
227
u8 *out = map->work_buf;
228
229
*out = (reg << 6) | val;
230
}
231
232
static void regmap_format_4_12_write(struct regmap *map,
233
unsigned int reg, unsigned int val)
234
{
235
__be16 *out = map->work_buf;
236
*out = cpu_to_be16((reg << 12) | val);
237
}
238
239
static void regmap_format_7_9_write(struct regmap *map,
240
unsigned int reg, unsigned int val)
241
{
242
__be16 *out = map->work_buf;
243
*out = cpu_to_be16((reg << 9) | val);
244
}
245
246
static void regmap_format_7_17_write(struct regmap *map,
247
unsigned int reg, unsigned int val)
248
{
249
u8 *out = map->work_buf;
250
251
out[2] = val;
252
out[1] = val >> 8;
253
out[0] = (val >> 16) | (reg << 1);
254
}
255
256
static void regmap_format_10_14_write(struct regmap *map,
257
unsigned int reg, unsigned int val)
258
{
259
u8 *out = map->work_buf;
260
261
out[2] = val;
262
out[1] = (val >> 8) | (reg << 6);
263
out[0] = reg >> 2;
264
}
265
266
static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
267
{
268
u8 *b = buf;
269
270
b[0] = val << shift;
271
}
272
273
static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
274
{
275
put_unaligned_be16(val << shift, buf);
276
}
277
278
static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
279
{
280
put_unaligned_le16(val << shift, buf);
281
}
282
283
static void regmap_format_16_native(void *buf, unsigned int val,
284
unsigned int shift)
285
{
286
u16 v = val << shift;
287
288
memcpy(buf, &v, sizeof(v));
289
}
290
291
static void regmap_format_24_be(void *buf, unsigned int val, unsigned int shift)
292
{
293
put_unaligned_be24(val << shift, buf);
294
}
295
296
static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
297
{
298
put_unaligned_be32(val << shift, buf);
299
}
300
301
static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
302
{
303
put_unaligned_le32(val << shift, buf);
304
}
305
306
static void regmap_format_32_native(void *buf, unsigned int val,
307
unsigned int shift)
308
{
309
u32 v = val << shift;
310
311
memcpy(buf, &v, sizeof(v));
312
}
313
314
static void regmap_parse_inplace_noop(void *buf)
315
{
316
}
317
318
static unsigned int regmap_parse_8(const void *buf)
319
{
320
const u8 *b = buf;
321
322
return b[0];
323
}
324
325
static unsigned int regmap_parse_16_be(const void *buf)
326
{
327
return get_unaligned_be16(buf);
328
}
329
330
static unsigned int regmap_parse_16_le(const void *buf)
331
{
332
return get_unaligned_le16(buf);
333
}
334
335
static void regmap_parse_16_be_inplace(void *buf)
336
{
337
u16 v = get_unaligned_be16(buf);
338
339
memcpy(buf, &v, sizeof(v));
340
}
341
342
static void regmap_parse_16_le_inplace(void *buf)
343
{
344
u16 v = get_unaligned_le16(buf);
345
346
memcpy(buf, &v, sizeof(v));
347
}
348
349
static unsigned int regmap_parse_16_native(const void *buf)
350
{
351
u16 v;
352
353
memcpy(&v, buf, sizeof(v));
354
return v;
355
}
356
357
static unsigned int regmap_parse_24_be(const void *buf)
358
{
359
return get_unaligned_be24(buf);
360
}
361
362
static unsigned int regmap_parse_32_be(const void *buf)
363
{
364
return get_unaligned_be32(buf);
365
}
366
367
static unsigned int regmap_parse_32_le(const void *buf)
368
{
369
return get_unaligned_le32(buf);
370
}
371
372
static void regmap_parse_32_be_inplace(void *buf)
373
{
374
u32 v = get_unaligned_be32(buf);
375
376
memcpy(buf, &v, sizeof(v));
377
}
378
379
static void regmap_parse_32_le_inplace(void *buf)
380
{
381
u32 v = get_unaligned_le32(buf);
382
383
memcpy(buf, &v, sizeof(v));
384
}
385
386
static unsigned int regmap_parse_32_native(const void *buf)
387
{
388
u32 v;
389
390
memcpy(&v, buf, sizeof(v));
391
return v;
392
}
393
394
static void regmap_lock_hwlock(void *__map)
395
{
396
struct regmap *map = __map;
397
398
hwspin_lock_timeout(map->hwlock, UINT_MAX);
399
}
400
401
static void regmap_lock_hwlock_irq(void *__map)
402
{
403
struct regmap *map = __map;
404
405
hwspin_lock_timeout_irq(map->hwlock, UINT_MAX);
406
}
407
408
static void regmap_lock_hwlock_irqsave(void *__map)
409
{
410
struct regmap *map = __map;
411
412
hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX,
413
&map->spinlock_flags);
414
}
415
416
static void regmap_unlock_hwlock(void *__map)
417
{
418
struct regmap *map = __map;
419
420
hwspin_unlock(map->hwlock);
421
}
422
423
static void regmap_unlock_hwlock_irq(void *__map)
424
{
425
struct regmap *map = __map;
426
427
hwspin_unlock_irq(map->hwlock);
428
}
429
430
static void regmap_unlock_hwlock_irqrestore(void *__map)
431
{
432
struct regmap *map = __map;
433
434
hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags);
435
}
436
437
static void regmap_lock_unlock_none(void *__map)
438
{
439
440
}
441
442
static void regmap_lock_mutex(void *__map)
443
{
444
struct regmap *map = __map;
445
mutex_lock(&map->mutex);
446
}
447
448
static void regmap_unlock_mutex(void *__map)
449
{
450
struct regmap *map = __map;
451
mutex_unlock(&map->mutex);
452
}
453
454
static void regmap_lock_spinlock(void *__map)
455
__acquires(&map->spinlock)
456
{
457
struct regmap *map = __map;
458
unsigned long flags;
459
460
spin_lock_irqsave(&map->spinlock, flags);
461
map->spinlock_flags = flags;
462
}
463
464
static void regmap_unlock_spinlock(void *__map)
465
__releases(&map->spinlock)
466
{
467
struct regmap *map = __map;
468
spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
469
}
470
471
static void regmap_lock_raw_spinlock(void *__map)
472
__acquires(&map->raw_spinlock)
473
{
474
struct regmap *map = __map;
475
unsigned long flags;
476
477
raw_spin_lock_irqsave(&map->raw_spinlock, flags);
478
map->raw_spinlock_flags = flags;
479
}
480
481
static void regmap_unlock_raw_spinlock(void *__map)
482
__releases(&map->raw_spinlock)
483
{
484
struct regmap *map = __map;
485
raw_spin_unlock_irqrestore(&map->raw_spinlock, map->raw_spinlock_flags);
486
}
487
488
static void dev_get_regmap_release(struct device *dev, void *res)
489
{
490
/*
491
* We don't actually have anything to do here; the goal here
492
* is not to manage the regmap but to provide a simple way to
493
* get the regmap back given a struct device.
494
*/
495
}
496
497
static bool _regmap_range_add(struct regmap *map,
498
struct regmap_range_node *data)
499
{
500
struct rb_root *root = &map->range_tree;
501
struct rb_node **new = &(root->rb_node), *parent = NULL;
502
503
while (*new) {
504
struct regmap_range_node *this =
505
rb_entry(*new, struct regmap_range_node, node);
506
507
parent = *new;
508
if (data->range_max < this->range_min)
509
new = &((*new)->rb_left);
510
else if (data->range_min > this->range_max)
511
new = &((*new)->rb_right);
512
else
513
return false;
514
}
515
516
rb_link_node(&data->node, parent, new);
517
rb_insert_color(&data->node, root);
518
519
return true;
520
}
521
522
static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
523
unsigned int reg)
524
{
525
struct rb_node *node = map->range_tree.rb_node;
526
527
while (node) {
528
struct regmap_range_node *this =
529
rb_entry(node, struct regmap_range_node, node);
530
531
if (reg < this->range_min)
532
node = node->rb_left;
533
else if (reg > this->range_max)
534
node = node->rb_right;
535
else
536
return this;
537
}
538
539
return NULL;
540
}
541
542
static void regmap_range_exit(struct regmap *map)
543
{
544
struct rb_node *next;
545
struct regmap_range_node *range_node;
546
547
next = rb_first(&map->range_tree);
548
while (next) {
549
range_node = rb_entry(next, struct regmap_range_node, node);
550
next = rb_next(&range_node->node);
551
rb_erase(&range_node->node, &map->range_tree);
552
kfree(range_node);
553
}
554
555
kfree(map->selector_work_buf);
556
}
557
558
static int regmap_set_name(struct regmap *map, const struct regmap_config *config)
559
{
560
if (config->name) {
561
const char *name = kstrdup_const(config->name, GFP_KERNEL);
562
563
if (!name)
564
return -ENOMEM;
565
566
kfree_const(map->name);
567
map->name = name;
568
}
569
570
return 0;
571
}
572
573
int regmap_attach_dev(struct device *dev, struct regmap *map,
574
const struct regmap_config *config)
575
{
576
struct regmap **m;
577
int ret;
578
579
map->dev = dev;
580
581
ret = regmap_set_name(map, config);
582
if (ret)
583
return ret;
584
585
regmap_debugfs_exit(map);
586
regmap_debugfs_init(map);
587
588
/* Add a devres resource for dev_get_regmap() */
589
m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
590
if (!m) {
591
regmap_debugfs_exit(map);
592
return -ENOMEM;
593
}
594
*m = map;
595
devres_add(dev, m);
596
597
return 0;
598
}
599
EXPORT_SYMBOL_GPL(regmap_attach_dev);
600
601
static int dev_get_regmap_match(struct device *dev, void *res, void *data);
602
603
static int regmap_detach_dev(struct device *dev, struct regmap *map)
604
{
605
if (!dev)
606
return 0;
607
608
return devres_release(dev, dev_get_regmap_release,
609
dev_get_regmap_match, (void *)map->name);
610
}
611
612
static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
613
const struct regmap_config *config)
614
{
615
enum regmap_endian endian;
616
617
/* Retrieve the endianness specification from the regmap config */
618
endian = config->reg_format_endian;
619
620
/* If the regmap config specified a non-default value, use that */
621
if (endian != REGMAP_ENDIAN_DEFAULT)
622
return endian;
623
624
/* Retrieve the endianness specification from the bus config */
625
if (bus && bus->reg_format_endian_default)
626
endian = bus->reg_format_endian_default;
627
628
/* If the bus specified a non-default value, use that */
629
if (endian != REGMAP_ENDIAN_DEFAULT)
630
return endian;
631
632
/* Use this if no other value was found */
633
return REGMAP_ENDIAN_BIG;
634
}
635
636
enum regmap_endian regmap_get_val_endian(struct device *dev,
637
const struct regmap_bus *bus,
638
const struct regmap_config *config)
639
{
640
struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL;
641
enum regmap_endian endian;
642
643
/* Retrieve the endianness specification from the regmap config */
644
endian = config->val_format_endian;
645
646
/* If the regmap config specified a non-default value, use that */
647
if (endian != REGMAP_ENDIAN_DEFAULT)
648
return endian;
649
650
/* If the firmware node exist try to get endianness from it */
651
if (fwnode_property_read_bool(fwnode, "big-endian"))
652
endian = REGMAP_ENDIAN_BIG;
653
else if (fwnode_property_read_bool(fwnode, "little-endian"))
654
endian = REGMAP_ENDIAN_LITTLE;
655
else if (fwnode_property_read_bool(fwnode, "native-endian"))
656
endian = REGMAP_ENDIAN_NATIVE;
657
658
/* If the endianness was specified in fwnode, use that */
659
if (endian != REGMAP_ENDIAN_DEFAULT)
660
return endian;
661
662
/* Retrieve the endianness specification from the bus config */
663
if (bus && bus->val_format_endian_default)
664
endian = bus->val_format_endian_default;
665
666
/* If the bus specified a non-default value, use that */
667
if (endian != REGMAP_ENDIAN_DEFAULT)
668
return endian;
669
670
/* Use this if no other value was found */
671
return REGMAP_ENDIAN_BIG;
672
}
673
EXPORT_SYMBOL_GPL(regmap_get_val_endian);
674
675
struct regmap *__regmap_init(struct device *dev,
676
const struct regmap_bus *bus,
677
void *bus_context,
678
const struct regmap_config *config,
679
struct lock_class_key *lock_key,
680
const char *lock_name)
681
{
682
struct regmap *map;
683
int ret = -EINVAL;
684
enum regmap_endian reg_endian, val_endian;
685
int i, j;
686
687
if (!config)
688
goto err;
689
690
map = kzalloc(sizeof(*map), GFP_KERNEL);
691
if (map == NULL) {
692
ret = -ENOMEM;
693
goto err;
694
}
695
696
ret = regmap_set_name(map, config);
697
if (ret)
698
goto err_map;
699
700
ret = -EINVAL; /* Later error paths rely on this */
701
702
if (config->disable_locking) {
703
map->lock = map->unlock = regmap_lock_unlock_none;
704
map->can_sleep = config->can_sleep;
705
regmap_debugfs_disable(map);
706
} else if (config->lock && config->unlock) {
707
map->lock = config->lock;
708
map->unlock = config->unlock;
709
map->lock_arg = config->lock_arg;
710
map->can_sleep = config->can_sleep;
711
} else if (config->use_hwlock) {
712
map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
713
if (!map->hwlock) {
714
ret = -ENXIO;
715
goto err_name;
716
}
717
718
switch (config->hwlock_mode) {
719
case HWLOCK_IRQSTATE:
720
map->lock = regmap_lock_hwlock_irqsave;
721
map->unlock = regmap_unlock_hwlock_irqrestore;
722
break;
723
case HWLOCK_IRQ:
724
map->lock = regmap_lock_hwlock_irq;
725
map->unlock = regmap_unlock_hwlock_irq;
726
break;
727
default:
728
map->lock = regmap_lock_hwlock;
729
map->unlock = regmap_unlock_hwlock;
730
break;
731
}
732
733
map->lock_arg = map;
734
} else {
735
if ((bus && bus->fast_io) ||
736
config->fast_io) {
737
if (config->use_raw_spinlock) {
738
raw_spin_lock_init(&map->raw_spinlock);
739
map->lock = regmap_lock_raw_spinlock;
740
map->unlock = regmap_unlock_raw_spinlock;
741
lockdep_set_class_and_name(&map->raw_spinlock,
742
lock_key, lock_name);
743
} else {
744
spin_lock_init(&map->spinlock);
745
map->lock = regmap_lock_spinlock;
746
map->unlock = regmap_unlock_spinlock;
747
lockdep_set_class_and_name(&map->spinlock,
748
lock_key, lock_name);
749
}
750
} else {
751
mutex_init(&map->mutex);
752
map->lock = regmap_lock_mutex;
753
map->unlock = regmap_unlock_mutex;
754
map->can_sleep = true;
755
lockdep_set_class_and_name(&map->mutex,
756
lock_key, lock_name);
757
}
758
map->lock_arg = map;
759
map->lock_key = lock_key;
760
}
761
762
/*
763
* When we write in fast-paths with regmap_bulk_write() don't allocate
764
* scratch buffers with sleeping allocations.
765
*/
766
if ((bus && bus->fast_io) || config->fast_io)
767
map->alloc_flags = GFP_ATOMIC;
768
else
769
map->alloc_flags = GFP_KERNEL;
770
771
map->reg_base = config->reg_base;
772
map->reg_shift = config->pad_bits % 8;
773
774
map->format.pad_bytes = config->pad_bits / 8;
775
map->format.reg_shift = config->reg_shift;
776
map->format.reg_bytes = BITS_TO_BYTES(config->reg_bits);
777
map->format.val_bytes = BITS_TO_BYTES(config->val_bits);
778
map->format.buf_size = BITS_TO_BYTES(config->reg_bits + config->val_bits + config->pad_bits);
779
if (config->reg_stride)
780
map->reg_stride = config->reg_stride;
781
else
782
map->reg_stride = 1;
783
if (is_power_of_2(map->reg_stride))
784
map->reg_stride_order = ilog2(map->reg_stride);
785
else
786
map->reg_stride_order = -1;
787
map->use_single_read = config->use_single_read || !(config->read || (bus && bus->read));
788
map->use_single_write = config->use_single_write || !(config->write || (bus && bus->write));
789
map->can_multi_write = config->can_multi_write && (config->write || (bus && bus->write));
790
if (bus) {
791
map->max_raw_read = bus->max_raw_read;
792
map->max_raw_write = bus->max_raw_write;
793
} else if (config->max_raw_read && config->max_raw_write) {
794
map->max_raw_read = config->max_raw_read;
795
map->max_raw_write = config->max_raw_write;
796
}
797
map->dev = dev;
798
map->bus = bus;
799
map->bus_context = bus_context;
800
map->max_register = config->max_register;
801
map->max_register_is_set = map->max_register ?: config->max_register_is_0;
802
map->wr_table = config->wr_table;
803
map->rd_table = config->rd_table;
804
map->volatile_table = config->volatile_table;
805
map->precious_table = config->precious_table;
806
map->wr_noinc_table = config->wr_noinc_table;
807
map->rd_noinc_table = config->rd_noinc_table;
808
map->writeable_reg = config->writeable_reg;
809
map->readable_reg = config->readable_reg;
810
map->volatile_reg = config->volatile_reg;
811
map->precious_reg = config->precious_reg;
812
map->writeable_noinc_reg = config->writeable_noinc_reg;
813
map->readable_noinc_reg = config->readable_noinc_reg;
814
map->cache_type = config->cache_type;
815
816
spin_lock_init(&map->async_lock);
817
INIT_LIST_HEAD(&map->async_list);
818
INIT_LIST_HEAD(&map->async_free);
819
init_waitqueue_head(&map->async_waitq);
820
821
if (config->read_flag_mask ||
822
config->write_flag_mask ||
823
config->zero_flag_mask) {
824
map->read_flag_mask = config->read_flag_mask;
825
map->write_flag_mask = config->write_flag_mask;
826
} else if (bus) {
827
map->read_flag_mask = bus->read_flag_mask;
828
}
829
830
if (config && config->read && config->write) {
831
map->reg_read = _regmap_bus_read;
832
if (config->reg_update_bits)
833
map->reg_update_bits = config->reg_update_bits;
834
835
/* Bulk read/write */
836
map->read = config->read;
837
map->write = config->write;
838
839
reg_endian = REGMAP_ENDIAN_NATIVE;
840
val_endian = REGMAP_ENDIAN_NATIVE;
841
} else if (!bus) {
842
map->reg_read = config->reg_read;
843
map->reg_write = config->reg_write;
844
map->reg_update_bits = config->reg_update_bits;
845
846
map->defer_caching = false;
847
goto skip_format_initialization;
848
} else if (!bus->read || !bus->write) {
849
map->reg_read = _regmap_bus_reg_read;
850
map->reg_write = _regmap_bus_reg_write;
851
map->reg_update_bits = bus->reg_update_bits;
852
853
map->defer_caching = false;
854
goto skip_format_initialization;
855
} else {
856
map->reg_read = _regmap_bus_read;
857
map->reg_update_bits = bus->reg_update_bits;
858
/* Bulk read/write */
859
map->read = bus->read;
860
map->write = bus->write;
861
862
reg_endian = regmap_get_reg_endian(bus, config);
863
val_endian = regmap_get_val_endian(dev, bus, config);
864
}
865
866
switch (config->reg_bits + map->reg_shift) {
867
case 2:
868
switch (config->val_bits) {
869
case 6:
870
map->format.format_write = regmap_format_2_6_write;
871
break;
872
default:
873
goto err_hwlock;
874
}
875
break;
876
877
case 4:
878
switch (config->val_bits) {
879
case 12:
880
map->format.format_write = regmap_format_4_12_write;
881
break;
882
default:
883
goto err_hwlock;
884
}
885
break;
886
887
case 7:
888
switch (config->val_bits) {
889
case 9:
890
map->format.format_write = regmap_format_7_9_write;
891
break;
892
case 17:
893
map->format.format_write = regmap_format_7_17_write;
894
break;
895
default:
896
goto err_hwlock;
897
}
898
break;
899
900
case 10:
901
switch (config->val_bits) {
902
case 14:
903
map->format.format_write = regmap_format_10_14_write;
904
break;
905
default:
906
goto err_hwlock;
907
}
908
break;
909
910
case 12:
911
switch (config->val_bits) {
912
case 20:
913
map->format.format_write = regmap_format_12_20_write;
914
break;
915
default:
916
goto err_hwlock;
917
}
918
break;
919
920
case 8:
921
map->format.format_reg = regmap_format_8;
922
break;
923
924
case 16:
925
switch (reg_endian) {
926
case REGMAP_ENDIAN_BIG:
927
map->format.format_reg = regmap_format_16_be;
928
break;
929
case REGMAP_ENDIAN_LITTLE:
930
map->format.format_reg = regmap_format_16_le;
931
break;
932
case REGMAP_ENDIAN_NATIVE:
933
map->format.format_reg = regmap_format_16_native;
934
break;
935
default:
936
goto err_hwlock;
937
}
938
break;
939
940
case 24:
941
switch (reg_endian) {
942
case REGMAP_ENDIAN_BIG:
943
map->format.format_reg = regmap_format_24_be;
944
break;
945
default:
946
goto err_hwlock;
947
}
948
break;
949
950
case 32:
951
switch (reg_endian) {
952
case REGMAP_ENDIAN_BIG:
953
map->format.format_reg = regmap_format_32_be;
954
break;
955
case REGMAP_ENDIAN_LITTLE:
956
map->format.format_reg = regmap_format_32_le;
957
break;
958
case REGMAP_ENDIAN_NATIVE:
959
map->format.format_reg = regmap_format_32_native;
960
break;
961
default:
962
goto err_hwlock;
963
}
964
break;
965
966
default:
967
goto err_hwlock;
968
}
969
970
if (val_endian == REGMAP_ENDIAN_NATIVE)
971
map->format.parse_inplace = regmap_parse_inplace_noop;
972
973
switch (config->val_bits) {
974
case 8:
975
map->format.format_val = regmap_format_8;
976
map->format.parse_val = regmap_parse_8;
977
map->format.parse_inplace = regmap_parse_inplace_noop;
978
break;
979
case 16:
980
switch (val_endian) {
981
case REGMAP_ENDIAN_BIG:
982
map->format.format_val = regmap_format_16_be;
983
map->format.parse_val = regmap_parse_16_be;
984
map->format.parse_inplace = regmap_parse_16_be_inplace;
985
break;
986
case REGMAP_ENDIAN_LITTLE:
987
map->format.format_val = regmap_format_16_le;
988
map->format.parse_val = regmap_parse_16_le;
989
map->format.parse_inplace = regmap_parse_16_le_inplace;
990
break;
991
case REGMAP_ENDIAN_NATIVE:
992
map->format.format_val = regmap_format_16_native;
993
map->format.parse_val = regmap_parse_16_native;
994
break;
995
default:
996
goto err_hwlock;
997
}
998
break;
999
case 24:
1000
switch (val_endian) {
1001
case REGMAP_ENDIAN_BIG:
1002
map->format.format_val = regmap_format_24_be;
1003
map->format.parse_val = regmap_parse_24_be;
1004
break;
1005
default:
1006
goto err_hwlock;
1007
}
1008
break;
1009
case 32:
1010
switch (val_endian) {
1011
case REGMAP_ENDIAN_BIG:
1012
map->format.format_val = regmap_format_32_be;
1013
map->format.parse_val = regmap_parse_32_be;
1014
map->format.parse_inplace = regmap_parse_32_be_inplace;
1015
break;
1016
case REGMAP_ENDIAN_LITTLE:
1017
map->format.format_val = regmap_format_32_le;
1018
map->format.parse_val = regmap_parse_32_le;
1019
map->format.parse_inplace = regmap_parse_32_le_inplace;
1020
break;
1021
case REGMAP_ENDIAN_NATIVE:
1022
map->format.format_val = regmap_format_32_native;
1023
map->format.parse_val = regmap_parse_32_native;
1024
break;
1025
default:
1026
goto err_hwlock;
1027
}
1028
break;
1029
}
1030
1031
if (map->format.format_write) {
1032
if ((reg_endian != REGMAP_ENDIAN_BIG) ||
1033
(val_endian != REGMAP_ENDIAN_BIG))
1034
goto err_hwlock;
1035
map->use_single_write = true;
1036
}
1037
1038
if (!map->format.format_write &&
1039
!(map->format.format_reg && map->format.format_val))
1040
goto err_hwlock;
1041
1042
map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
1043
if (map->work_buf == NULL) {
1044
ret = -ENOMEM;
1045
goto err_hwlock;
1046
}
1047
1048
if (map->format.format_write) {
1049
map->defer_caching = false;
1050
map->reg_write = _regmap_bus_formatted_write;
1051
} else if (map->format.format_val) {
1052
map->defer_caching = true;
1053
map->reg_write = _regmap_bus_raw_write;
1054
}
1055
1056
skip_format_initialization:
1057
1058
map->range_tree = RB_ROOT;
1059
for (i = 0; i < config->num_ranges; i++) {
1060
const struct regmap_range_cfg *range_cfg = &config->ranges[i];
1061
struct regmap_range_node *new;
1062
1063
/* Sanity check */
1064
if (range_cfg->range_max < range_cfg->range_min) {
1065
dev_err(map->dev, "Invalid range %d: %u < %u\n", i,
1066
range_cfg->range_max, range_cfg->range_min);
1067
goto err_range;
1068
}
1069
1070
if (range_cfg->range_max > map->max_register) {
1071
dev_err(map->dev, "Invalid range %d: %u > %u\n", i,
1072
range_cfg->range_max, map->max_register);
1073
goto err_range;
1074
}
1075
1076
if (range_cfg->selector_reg > map->max_register) {
1077
dev_err(map->dev,
1078
"Invalid range %d: selector out of map\n", i);
1079
goto err_range;
1080
}
1081
1082
if (range_cfg->window_len == 0) {
1083
dev_err(map->dev, "Invalid range %d: window_len 0\n",
1084
i);
1085
goto err_range;
1086
}
1087
1088
/* Make sure, that this register range has no selector
1089
or data window within its boundary */
1090
for (j = 0; j < config->num_ranges; j++) {
1091
unsigned int sel_reg = config->ranges[j].selector_reg;
1092
unsigned int win_min = config->ranges[j].window_start;
1093
unsigned int win_max = win_min +
1094
config->ranges[j].window_len - 1;
1095
1096
/* Allow data window inside its own virtual range */
1097
if (j == i)
1098
continue;
1099
1100
if (range_cfg->range_min <= sel_reg &&
1101
sel_reg <= range_cfg->range_max) {
1102
dev_err(map->dev,
1103
"Range %d: selector for %d in window\n",
1104
i, j);
1105
goto err_range;
1106
}
1107
1108
if (!(win_max < range_cfg->range_min ||
1109
win_min > range_cfg->range_max)) {
1110
dev_err(map->dev,
1111
"Range %d: window for %d in window\n",
1112
i, j);
1113
goto err_range;
1114
}
1115
}
1116
1117
new = kzalloc(sizeof(*new), GFP_KERNEL);
1118
if (new == NULL) {
1119
ret = -ENOMEM;
1120
goto err_range;
1121
}
1122
1123
new->map = map;
1124
new->name = range_cfg->name;
1125
new->range_min = range_cfg->range_min;
1126
new->range_max = range_cfg->range_max;
1127
new->selector_reg = range_cfg->selector_reg;
1128
new->selector_mask = range_cfg->selector_mask;
1129
new->selector_shift = range_cfg->selector_shift;
1130
new->window_start = range_cfg->window_start;
1131
new->window_len = range_cfg->window_len;
1132
1133
if (!_regmap_range_add(map, new)) {
1134
dev_err(map->dev, "Failed to add range %d\n", i);
1135
kfree(new);
1136
goto err_range;
1137
}
1138
1139
if (map->selector_work_buf == NULL) {
1140
map->selector_work_buf =
1141
kzalloc(map->format.buf_size, GFP_KERNEL);
1142
if (map->selector_work_buf == NULL) {
1143
ret = -ENOMEM;
1144
goto err_range;
1145
}
1146
}
1147
}
1148
1149
ret = regcache_init(map, config);
1150
if (ret != 0)
1151
goto err_range;
1152
1153
if (dev) {
1154
ret = regmap_attach_dev(dev, map, config);
1155
if (ret != 0)
1156
goto err_regcache;
1157
} else {
1158
regmap_debugfs_init(map);
1159
}
1160
1161
return map;
1162
1163
err_regcache:
1164
regcache_exit(map);
1165
err_range:
1166
regmap_range_exit(map);
1167
kfree(map->work_buf);
1168
err_hwlock:
1169
if (map->hwlock)
1170
hwspin_lock_free(map->hwlock);
1171
err_name:
1172
kfree_const(map->name);
1173
err_map:
1174
kfree(map);
1175
err:
1176
if (bus && bus->free_on_exit)
1177
kfree(bus);
1178
return ERR_PTR(ret);
1179
}
1180
EXPORT_SYMBOL_GPL(__regmap_init);
1181
1182
static void devm_regmap_release(struct device *dev, void *res)
1183
{
1184
regmap_exit(*(struct regmap **)res);
1185
}
1186
1187
struct regmap *__devm_regmap_init(struct device *dev,
1188
const struct regmap_bus *bus,
1189
void *bus_context,
1190
const struct regmap_config *config,
1191
struct lock_class_key *lock_key,
1192
const char *lock_name)
1193
{
1194
struct regmap **ptr, *regmap;
1195
1196
ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
1197
if (!ptr)
1198
return ERR_PTR(-ENOMEM);
1199
1200
regmap = __regmap_init(dev, bus, bus_context, config,
1201
lock_key, lock_name);
1202
if (!IS_ERR(regmap)) {
1203
*ptr = regmap;
1204
devres_add(dev, ptr);
1205
} else {
1206
devres_free(ptr);
1207
}
1208
1209
return regmap;
1210
}
1211
EXPORT_SYMBOL_GPL(__devm_regmap_init);
1212
1213
static void regmap_field_init(struct regmap_field *rm_field,
1214
struct regmap *regmap, struct reg_field reg_field)
1215
{
1216
rm_field->regmap = regmap;
1217
rm_field->reg = reg_field.reg;
1218
rm_field->shift = reg_field.lsb;
1219
rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb);
1220
1221
WARN_ONCE(rm_field->mask == 0, "invalid empty mask defined\n");
1222
1223
rm_field->id_size = reg_field.id_size;
1224
rm_field->id_offset = reg_field.id_offset;
1225
}
1226
1227
/**
1228
* devm_regmap_field_alloc() - Allocate and initialise a register field.
1229
*
1230
* @dev: Device that will be interacted with
1231
* @regmap: regmap bank in which this register field is located.
1232
* @reg_field: Register field with in the bank.
1233
*
1234
* The return value will be an ERR_PTR() on error or a valid pointer
1235
* to a struct regmap_field. The regmap_field will be automatically freed
1236
* by the device management code.
1237
*/
1238
struct regmap_field *devm_regmap_field_alloc(struct device *dev,
1239
struct regmap *regmap, struct reg_field reg_field)
1240
{
1241
struct regmap_field *rm_field = devm_kzalloc(dev,
1242
sizeof(*rm_field), GFP_KERNEL);
1243
if (!rm_field)
1244
return ERR_PTR(-ENOMEM);
1245
1246
regmap_field_init(rm_field, regmap, reg_field);
1247
1248
return rm_field;
1249
1250
}
1251
EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
1252
1253
1254
/**
1255
* regmap_field_bulk_alloc() - Allocate and initialise a bulk register field.
1256
*
1257
* @regmap: regmap bank in which this register field is located.
1258
* @rm_field: regmap register fields within the bank.
1259
* @reg_field: Register fields within the bank.
1260
* @num_fields: Number of register fields.
1261
*
1262
* The return value will be an -ENOMEM on error or zero for success.
1263
* Newly allocated regmap_fields should be freed by calling
1264
* regmap_field_bulk_free()
1265
*/
1266
int regmap_field_bulk_alloc(struct regmap *regmap,
1267
struct regmap_field **rm_field,
1268
const struct reg_field *reg_field,
1269
int num_fields)
1270
{
1271
struct regmap_field *rf;
1272
int i;
1273
1274
rf = kcalloc(num_fields, sizeof(*rf), GFP_KERNEL);
1275
if (!rf)
1276
return -ENOMEM;
1277
1278
for (i = 0; i < num_fields; i++) {
1279
regmap_field_init(&rf[i], regmap, reg_field[i]);
1280
rm_field[i] = &rf[i];
1281
}
1282
1283
return 0;
1284
}
1285
EXPORT_SYMBOL_GPL(regmap_field_bulk_alloc);
1286
1287
/**
1288
* devm_regmap_field_bulk_alloc() - Allocate and initialise a bulk register
1289
* fields.
1290
*
1291
* @dev: Device that will be interacted with
1292
* @regmap: regmap bank in which this register field is located.
1293
* @rm_field: regmap register fields within the bank.
1294
* @reg_field: Register fields within the bank.
1295
* @num_fields: Number of register fields.
1296
*
1297
* The return value will be an -ENOMEM on error or zero for success.
1298
* Newly allocated regmap_fields will be automatically freed by the
1299
* device management code.
1300
*/
1301
int devm_regmap_field_bulk_alloc(struct device *dev,
1302
struct regmap *regmap,
1303
struct regmap_field **rm_field,
1304
const struct reg_field *reg_field,
1305
int num_fields)
1306
{
1307
struct regmap_field *rf;
1308
int i;
1309
1310
rf = devm_kcalloc(dev, num_fields, sizeof(*rf), GFP_KERNEL);
1311
if (!rf)
1312
return -ENOMEM;
1313
1314
for (i = 0; i < num_fields; i++) {
1315
regmap_field_init(&rf[i], regmap, reg_field[i]);
1316
rm_field[i] = &rf[i];
1317
}
1318
1319
return 0;
1320
}
1321
EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_alloc);
1322
1323
/**
1324
* regmap_field_bulk_free() - Free register field allocated using
1325
* regmap_field_bulk_alloc.
1326
*
1327
* @field: regmap fields which should be freed.
1328
*/
1329
void regmap_field_bulk_free(struct regmap_field *field)
1330
{
1331
kfree(field);
1332
}
1333
EXPORT_SYMBOL_GPL(regmap_field_bulk_free);
1334
1335
/**
1336
* devm_regmap_field_bulk_free() - Free a bulk register field allocated using
1337
* devm_regmap_field_bulk_alloc.
1338
*
1339
* @dev: Device that will be interacted with
1340
* @field: regmap field which should be freed.
1341
*
1342
* Free register field allocated using devm_regmap_field_bulk_alloc(). Usually
1343
* drivers need not call this function, as the memory allocated via devm
1344
* will be freed as per device-driver life-cycle.
1345
*/
1346
void devm_regmap_field_bulk_free(struct device *dev,
1347
struct regmap_field *field)
1348
{
1349
devm_kfree(dev, field);
1350
}
1351
EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_free);
1352
1353
/**
1354
* devm_regmap_field_free() - Free a register field allocated using
1355
* devm_regmap_field_alloc.
1356
*
1357
* @dev: Device that will be interacted with
1358
* @field: regmap field which should be freed.
1359
*
1360
* Free register field allocated using devm_regmap_field_alloc(). Usually
1361
* drivers need not call this function, as the memory allocated via devm
1362
* will be freed as per device-driver life-cyle.
1363
*/
1364
void devm_regmap_field_free(struct device *dev,
1365
struct regmap_field *field)
1366
{
1367
devm_kfree(dev, field);
1368
}
1369
EXPORT_SYMBOL_GPL(devm_regmap_field_free);
1370
1371
/**
1372
* regmap_field_alloc() - Allocate and initialise a register field.
1373
*
1374
* @regmap: regmap bank in which this register field is located.
1375
* @reg_field: Register field with in the bank.
1376
*
1377
* The return value will be an ERR_PTR() on error or a valid pointer
1378
* to a struct regmap_field. The regmap_field should be freed by the
1379
* user once its finished working with it using regmap_field_free().
1380
*/
1381
struct regmap_field *regmap_field_alloc(struct regmap *regmap,
1382
struct reg_field reg_field)
1383
{
1384
struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL);
1385
1386
if (!rm_field)
1387
return ERR_PTR(-ENOMEM);
1388
1389
regmap_field_init(rm_field, regmap, reg_field);
1390
1391
return rm_field;
1392
}
1393
EXPORT_SYMBOL_GPL(regmap_field_alloc);
1394
1395
/**
1396
* regmap_field_free() - Free register field allocated using
1397
* regmap_field_alloc.
1398
*
1399
* @field: regmap field which should be freed.
1400
*/
1401
void regmap_field_free(struct regmap_field *field)
1402
{
1403
kfree(field);
1404
}
1405
EXPORT_SYMBOL_GPL(regmap_field_free);
1406
1407
/**
1408
* regmap_reinit_cache() - Reinitialise the current register cache
1409
*
1410
* @map: Register map to operate on.
1411
* @config: New configuration. Only the cache data will be used.
1412
*
1413
* Discard any existing register cache for the map and initialize a
1414
* new cache. This can be used to restore the cache to defaults or to
1415
* update the cache configuration to reflect runtime discovery of the
1416
* hardware.
1417
*
1418
* No explicit locking is done here, the user needs to ensure that
1419
* this function will not race with other calls to regmap.
1420
*/
1421
int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
1422
{
1423
int ret;
1424
1425
regcache_exit(map);
1426
regmap_debugfs_exit(map);
1427
1428
map->max_register = config->max_register;
1429
map->max_register_is_set = map->max_register ?: config->max_register_is_0;
1430
map->writeable_reg = config->writeable_reg;
1431
map->readable_reg = config->readable_reg;
1432
map->volatile_reg = config->volatile_reg;
1433
map->precious_reg = config->precious_reg;
1434
map->writeable_noinc_reg = config->writeable_noinc_reg;
1435
map->readable_noinc_reg = config->readable_noinc_reg;
1436
map->cache_type = config->cache_type;
1437
1438
ret = regmap_set_name(map, config);
1439
if (ret)
1440
return ret;
1441
1442
regmap_debugfs_init(map);
1443
1444
map->cache_bypass = false;
1445
map->cache_only = false;
1446
1447
return regcache_init(map, config);
1448
}
1449
EXPORT_SYMBOL_GPL(regmap_reinit_cache);
1450
1451
/**
1452
* regmap_exit() - Free a previously allocated register map
1453
*
1454
* @map: Register map to operate on.
1455
*/
1456
void regmap_exit(struct regmap *map)
1457
{
1458
struct regmap_async *async;
1459
1460
regmap_detach_dev(map->dev, map);
1461
regcache_exit(map);
1462
1463
regmap_debugfs_exit(map);
1464
regmap_range_exit(map);
1465
if (map->bus && map->bus->free_context)
1466
map->bus->free_context(map->bus_context);
1467
kfree(map->work_buf);
1468
while (!list_empty(&map->async_free)) {
1469
async = list_first_entry_or_null(&map->async_free,
1470
struct regmap_async,
1471
list);
1472
list_del(&async->list);
1473
kfree(async->work_buf);
1474
kfree(async);
1475
}
1476
if (map->hwlock)
1477
hwspin_lock_free(map->hwlock);
1478
if (map->lock == regmap_lock_mutex)
1479
mutex_destroy(&map->mutex);
1480
kfree_const(map->name);
1481
kfree(map->patch);
1482
if (map->bus && map->bus->free_on_exit)
1483
kfree(map->bus);
1484
kfree(map);
1485
}
1486
EXPORT_SYMBOL_GPL(regmap_exit);
1487
1488
static int dev_get_regmap_match(struct device *dev, void *res, void *data)
1489
{
1490
struct regmap **r = res;
1491
if (!r || !*r) {
1492
WARN_ON(!r || !*r);
1493
return 0;
1494
}
1495
1496
/* If the user didn't specify a name match any */
1497
if (data)
1498
return (*r)->name && !strcmp((*r)->name, data);
1499
else
1500
return 1;
1501
}
1502
1503
/**
1504
* dev_get_regmap() - Obtain the regmap (if any) for a device
1505
*
1506
* @dev: Device to retrieve the map for
1507
* @name: Optional name for the register map, usually NULL.
1508
*
1509
* Returns the regmap for the device if one is present, or NULL. If
1510
* name is specified then it must match the name specified when
1511
* registering the device, if it is NULL then the first regmap found
1512
* will be used. Devices with multiple register maps are very rare,
1513
* generic code should normally not need to specify a name.
1514
*/
1515
struct regmap *dev_get_regmap(struct device *dev, const char *name)
1516
{
1517
struct regmap **r = devres_find(dev, dev_get_regmap_release,
1518
dev_get_regmap_match, (void *)name);
1519
1520
if (!r)
1521
return NULL;
1522
return *r;
1523
}
1524
EXPORT_SYMBOL_GPL(dev_get_regmap);
1525
1526
/**
1527
* regmap_get_device() - Obtain the device from a regmap
1528
*
1529
* @map: Register map to operate on.
1530
*
1531
* Returns the underlying device that the regmap has been created for.
1532
*/
1533
struct device *regmap_get_device(struct regmap *map)
1534
{
1535
return map->dev;
1536
}
1537
EXPORT_SYMBOL_GPL(regmap_get_device);
1538
1539
static int _regmap_select_page(struct regmap *map, unsigned int *reg,
1540
struct regmap_range_node *range,
1541
unsigned int val_num)
1542
{
1543
void *orig_work_buf;
1544
unsigned int win_offset;
1545
unsigned int win_page;
1546
bool page_chg;
1547
int ret;
1548
1549
win_offset = (*reg - range->range_min) % range->window_len;
1550
win_page = (*reg - range->range_min) / range->window_len;
1551
1552
if (val_num > 1) {
1553
/* Bulk write shouldn't cross range boundary */
1554
if (*reg + val_num - 1 > range->range_max)
1555
return -EINVAL;
1556
1557
/* ... or single page boundary */
1558
if (val_num > range->window_len - win_offset)
1559
return -EINVAL;
1560
}
1561
1562
/* It is possible to have selector register inside data window.
1563
In that case, selector register is located on every page and
1564
it needs no page switching, when accessed alone. */
1565
if (val_num > 1 ||
1566
range->window_start + win_offset != range->selector_reg) {
1567
/* Use separate work_buf during page switching */
1568
orig_work_buf = map->work_buf;
1569
map->work_buf = map->selector_work_buf;
1570
1571
ret = _regmap_update_bits(map, range->selector_reg,
1572
range->selector_mask,
1573
win_page << range->selector_shift,
1574
&page_chg, false);
1575
1576
map->work_buf = orig_work_buf;
1577
1578
if (ret != 0)
1579
return ret;
1580
}
1581
1582
*reg = range->window_start + win_offset;
1583
1584
return 0;
1585
}
1586
1587
static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
1588
unsigned long mask)
1589
{
1590
u8 *buf;
1591
int i;
1592
1593
if (!mask || !map->work_buf)
1594
return;
1595
1596
buf = map->work_buf;
1597
1598
for (i = 0; i < max_bytes; i++)
1599
buf[i] |= (mask >> (8 * i)) & 0xff;
1600
}
1601
1602
static unsigned int regmap_reg_addr(struct regmap *map, unsigned int reg)
1603
{
1604
reg += map->reg_base;
1605
1606
if (map->format.reg_shift > 0)
1607
reg >>= map->format.reg_shift;
1608
else if (map->format.reg_shift < 0)
1609
reg <<= -(map->format.reg_shift);
1610
1611
return reg;
1612
}
1613
1614
static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
1615
const void *val, size_t val_len, bool noinc)
1616
{
1617
struct regmap_range_node *range;
1618
unsigned long flags;
1619
void *work_val = map->work_buf + map->format.reg_bytes +
1620
map->format.pad_bytes;
1621
void *buf;
1622
int ret = -ENOTSUPP;
1623
size_t len;
1624
int i;
1625
1626
/* Check for unwritable or noinc registers in range
1627
* before we start
1628
*/
1629
if (!regmap_writeable_noinc(map, reg)) {
1630
for (i = 0; i < val_len / map->format.val_bytes; i++) {
1631
unsigned int element =
1632
reg + regmap_get_offset(map, i);
1633
if (!regmap_writeable(map, element) ||
1634
regmap_writeable_noinc(map, element))
1635
return -EINVAL;
1636
}
1637
}
1638
1639
if (!map->cache_bypass && map->format.parse_val) {
1640
unsigned int ival, offset;
1641
int val_bytes = map->format.val_bytes;
1642
1643
/* Cache the last written value for noinc writes */
1644
i = noinc ? val_len - val_bytes : 0;
1645
for (; i < val_len; i += val_bytes) {
1646
ival = map->format.parse_val(val + i);
1647
offset = noinc ? 0 : regmap_get_offset(map, i / val_bytes);
1648
ret = regcache_write(map, reg + offset, ival);
1649
if (ret) {
1650
dev_err(map->dev,
1651
"Error in caching of register: %x ret: %d\n",
1652
reg + offset, ret);
1653
return ret;
1654
}
1655
}
1656
if (map->cache_only) {
1657
map->cache_dirty = true;
1658
return 0;
1659
}
1660
}
1661
1662
range = _regmap_range_lookup(map, reg);
1663
if (range) {
1664
int val_num = val_len / map->format.val_bytes;
1665
int win_offset = (reg - range->range_min) % range->window_len;
1666
int win_residue = range->window_len - win_offset;
1667
1668
/* If the write goes beyond the end of the window split it */
1669
while (val_num > win_residue) {
1670
dev_dbg(map->dev, "Writing window %d/%zu\n",
1671
win_residue, val_len / map->format.val_bytes);
1672
ret = _regmap_raw_write_impl(map, reg, val,
1673
win_residue *
1674
map->format.val_bytes, noinc);
1675
if (ret != 0)
1676
return ret;
1677
1678
reg += win_residue;
1679
val_num -= win_residue;
1680
val += win_residue * map->format.val_bytes;
1681
val_len -= win_residue * map->format.val_bytes;
1682
1683
win_offset = (reg - range->range_min) %
1684
range->window_len;
1685
win_residue = range->window_len - win_offset;
1686
}
1687
1688
ret = _regmap_select_page(map, &reg, range, noinc ? 1 : val_num);
1689
if (ret != 0)
1690
return ret;
1691
}
1692
1693
reg = regmap_reg_addr(map, reg);
1694
map->format.format_reg(map->work_buf, reg, map->reg_shift);
1695
regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
1696
map->write_flag_mask);
1697
1698
/*
1699
* Essentially all I/O mechanisms will be faster with a single
1700
* buffer to write. Since register syncs often generate raw
1701
* writes of single registers optimise that case.
1702
*/
1703
if (val != work_val && val_len == map->format.val_bytes) {
1704
memcpy(work_val, val, map->format.val_bytes);
1705
val = work_val;
1706
}
1707
1708
if (map->async && map->bus && map->bus->async_write) {
1709
struct regmap_async *async;
1710
1711
trace_regmap_async_write_start(map, reg, val_len);
1712
1713
spin_lock_irqsave(&map->async_lock, flags);
1714
async = list_first_entry_or_null(&map->async_free,
1715
struct regmap_async,
1716
list);
1717
if (async)
1718
list_del(&async->list);
1719
spin_unlock_irqrestore(&map->async_lock, flags);
1720
1721
if (!async) {
1722
async = map->bus->async_alloc();
1723
if (!async)
1724
return -ENOMEM;
1725
1726
async->work_buf = kzalloc(map->format.buf_size,
1727
GFP_KERNEL | GFP_DMA);
1728
if (!async->work_buf) {
1729
kfree(async);
1730
return -ENOMEM;
1731
}
1732
}
1733
1734
async->map = map;
1735
1736
/* If the caller supplied the value we can use it safely. */
1737
memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
1738
map->format.reg_bytes + map->format.val_bytes);
1739
1740
spin_lock_irqsave(&map->async_lock, flags);
1741
list_add_tail(&async->list, &map->async_list);
1742
spin_unlock_irqrestore(&map->async_lock, flags);
1743
1744
if (val != work_val)
1745
ret = map->bus->async_write(map->bus_context,
1746
async->work_buf,
1747
map->format.reg_bytes +
1748
map->format.pad_bytes,
1749
val, val_len, async);
1750
else
1751
ret = map->bus->async_write(map->bus_context,
1752
async->work_buf,
1753
map->format.reg_bytes +
1754
map->format.pad_bytes +
1755
val_len, NULL, 0, async);
1756
1757
if (ret != 0) {
1758
dev_err(map->dev, "Failed to schedule write: %d\n",
1759
ret);
1760
1761
spin_lock_irqsave(&map->async_lock, flags);
1762
list_move(&async->list, &map->async_free);
1763
spin_unlock_irqrestore(&map->async_lock, flags);
1764
}
1765
1766
return ret;
1767
}
1768
1769
trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
1770
1771
/* If we're doing a single register write we can probably just
1772
* send the work_buf directly, otherwise try to do a gather
1773
* write.
1774
*/
1775
if (val == work_val)
1776
ret = map->write(map->bus_context, map->work_buf,
1777
map->format.reg_bytes +
1778
map->format.pad_bytes +
1779
val_len);
1780
else if (map->bus && map->bus->gather_write)
1781
ret = map->bus->gather_write(map->bus_context, map->work_buf,
1782
map->format.reg_bytes +
1783
map->format.pad_bytes,
1784
val, val_len);
1785
else
1786
ret = -ENOTSUPP;
1787
1788
/* If that didn't work fall back on linearising by hand. */
1789
if (ret == -ENOTSUPP) {
1790
len = map->format.reg_bytes + map->format.pad_bytes + val_len;
1791
buf = kzalloc(len, GFP_KERNEL);
1792
if (!buf)
1793
return -ENOMEM;
1794
1795
memcpy(buf, map->work_buf, map->format.reg_bytes);
1796
memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
1797
val, val_len);
1798
ret = map->write(map->bus_context, buf, len);
1799
1800
kfree(buf);
1801
} else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
1802
/* regcache_drop_region() takes lock that we already have,
1803
* thus call map->cache_ops->drop() directly
1804
*/
1805
if (map->cache_ops && map->cache_ops->drop)
1806
map->cache_ops->drop(map, reg, reg + 1);
1807
}
1808
1809
trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
1810
1811
return ret;
1812
}
1813
1814
/**
1815
* regmap_can_raw_write - Test if regmap_raw_write() is supported
1816
*
1817
* @map: Map to check.
1818
*/
1819
bool regmap_can_raw_write(struct regmap *map)
1820
{
1821
return map->write && map->format.format_val && map->format.format_reg;
1822
}
1823
EXPORT_SYMBOL_GPL(regmap_can_raw_write);
1824
1825
/**
1826
* regmap_get_raw_read_max - Get the maximum size we can read
1827
*
1828
* @map: Map to check.
1829
*/
1830
size_t regmap_get_raw_read_max(struct regmap *map)
1831
{
1832
return map->max_raw_read;
1833
}
1834
EXPORT_SYMBOL_GPL(regmap_get_raw_read_max);
1835
1836
/**
1837
* regmap_get_raw_write_max - Get the maximum size we can read
1838
*
1839
* @map: Map to check.
1840
*/
1841
size_t regmap_get_raw_write_max(struct regmap *map)
1842
{
1843
return map->max_raw_write;
1844
}
1845
EXPORT_SYMBOL_GPL(regmap_get_raw_write_max);
1846
1847
static int _regmap_bus_formatted_write(void *context, unsigned int reg,
1848
unsigned int val)
1849
{
1850
int ret;
1851
struct regmap_range_node *range;
1852
struct regmap *map = context;
1853
1854
WARN_ON(!map->format.format_write);
1855
1856
range = _regmap_range_lookup(map, reg);
1857
if (range) {
1858
ret = _regmap_select_page(map, &reg, range, 1);
1859
if (ret != 0)
1860
return ret;
1861
}
1862
1863
reg = regmap_reg_addr(map, reg);
1864
map->format.format_write(map, reg, val);
1865
1866
trace_regmap_hw_write_start(map, reg, 1);
1867
1868
ret = map->write(map->bus_context, map->work_buf, map->format.buf_size);
1869
1870
trace_regmap_hw_write_done(map, reg, 1);
1871
1872
return ret;
1873
}
1874
1875
static int _regmap_bus_reg_write(void *context, unsigned int reg,
1876
unsigned int val)
1877
{
1878
struct regmap *map = context;
1879
struct regmap_range_node *range;
1880
int ret;
1881
1882
range = _regmap_range_lookup(map, reg);
1883
if (range) {
1884
ret = _regmap_select_page(map, &reg, range, 1);
1885
if (ret != 0)
1886
return ret;
1887
}
1888
1889
reg = regmap_reg_addr(map, reg);
1890
return map->bus->reg_write(map->bus_context, reg, val);
1891
}
1892
1893
static int _regmap_bus_raw_write(void *context, unsigned int reg,
1894
unsigned int val)
1895
{
1896
struct regmap *map = context;
1897
1898
WARN_ON(!map->format.format_val);
1899
1900
map->format.format_val(map->work_buf + map->format.reg_bytes
1901
+ map->format.pad_bytes, val, 0);
1902
return _regmap_raw_write_impl(map, reg,
1903
map->work_buf +
1904
map->format.reg_bytes +
1905
map->format.pad_bytes,
1906
map->format.val_bytes,
1907
false);
1908
}
1909
1910
static inline void *_regmap_map_get_context(struct regmap *map)
1911
{
1912
return (map->bus || (!map->bus && map->read)) ? map : map->bus_context;
1913
}
1914
1915
int _regmap_write(struct regmap *map, unsigned int reg,
1916
unsigned int val)
1917
{
1918
int ret;
1919
void *context = _regmap_map_get_context(map);
1920
1921
if (!regmap_writeable(map, reg))
1922
return -EIO;
1923
1924
if (!map->cache_bypass && !map->defer_caching) {
1925
ret = regcache_write(map, reg, val);
1926
if (ret != 0)
1927
return ret;
1928
if (map->cache_only) {
1929
map->cache_dirty = true;
1930
return 0;
1931
}
1932
}
1933
1934
ret = map->reg_write(context, reg, val);
1935
if (ret == 0) {
1936
if (regmap_should_log(map))
1937
dev_info(map->dev, "%x <= %x\n", reg, val);
1938
1939
trace_regmap_reg_write(map, reg, val);
1940
}
1941
1942
return ret;
1943
}
1944
1945
/**
1946
* regmap_write() - Write a value to a single register
1947
*
1948
* @map: Register map to write to
1949
* @reg: Register to write to
1950
* @val: Value to be written
1951
*
1952
* A value of zero will be returned on success, a negative errno will
1953
* be returned in error cases.
1954
*/
1955
int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
1956
{
1957
int ret;
1958
1959
if (!IS_ALIGNED(reg, map->reg_stride))
1960
return -EINVAL;
1961
1962
map->lock(map->lock_arg);
1963
1964
ret = _regmap_write(map, reg, val);
1965
1966
map->unlock(map->lock_arg);
1967
1968
return ret;
1969
}
1970
EXPORT_SYMBOL_GPL(regmap_write);
1971
1972
/**
1973
* regmap_write_async() - Write a value to a single register asynchronously
1974
*
1975
* @map: Register map to write to
1976
* @reg: Register to write to
1977
* @val: Value to be written
1978
*
1979
* A value of zero will be returned on success, a negative errno will
1980
* be returned in error cases.
1981
*/
1982
int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
1983
{
1984
int ret;
1985
1986
if (!IS_ALIGNED(reg, map->reg_stride))
1987
return -EINVAL;
1988
1989
map->lock(map->lock_arg);
1990
1991
map->async = true;
1992
1993
ret = _regmap_write(map, reg, val);
1994
1995
map->async = false;
1996
1997
map->unlock(map->lock_arg);
1998
1999
return ret;
2000
}
2001
EXPORT_SYMBOL_GPL(regmap_write_async);
2002
2003
int _regmap_raw_write(struct regmap *map, unsigned int reg,
2004
const void *val, size_t val_len, bool noinc)
2005
{
2006
size_t val_bytes = map->format.val_bytes;
2007
size_t val_count = val_len / val_bytes;
2008
size_t chunk_count, chunk_bytes;
2009
size_t chunk_regs = val_count;
2010
int ret, i;
2011
2012
if (!val_count)
2013
return -EINVAL;
2014
2015
if (map->use_single_write)
2016
chunk_regs = 1;
2017
else if (map->max_raw_write && val_len > map->max_raw_write)
2018
chunk_regs = map->max_raw_write / val_bytes;
2019
2020
chunk_count = val_count / chunk_regs;
2021
chunk_bytes = chunk_regs * val_bytes;
2022
2023
/* Write as many bytes as possible with chunk_size */
2024
for (i = 0; i < chunk_count; i++) {
2025
ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes, noinc);
2026
if (ret)
2027
return ret;
2028
2029
reg += regmap_get_offset(map, chunk_regs);
2030
val += chunk_bytes;
2031
val_len -= chunk_bytes;
2032
}
2033
2034
/* Write remaining bytes */
2035
if (val_len)
2036
ret = _regmap_raw_write_impl(map, reg, val, val_len, noinc);
2037
2038
return ret;
2039
}
2040
2041
/**
2042
* regmap_raw_write() - Write raw values to one or more registers
2043
*
2044
* @map: Register map to write to
2045
* @reg: Initial register to write to
2046
* @val: Block of data to be written, laid out for direct transmission to the
2047
* device
2048
* @val_len: Length of data pointed to by val.
2049
*
2050
* This function is intended to be used for things like firmware
2051
* download where a large block of data needs to be transferred to the
2052
* device. No formatting will be done on the data provided.
2053
*
2054
* A value of zero will be returned on success, a negative errno will
2055
* be returned in error cases.
2056
*/
2057
int regmap_raw_write(struct regmap *map, unsigned int reg,
2058
const void *val, size_t val_len)
2059
{
2060
int ret;
2061
2062
if (!regmap_can_raw_write(map))
2063
return -EINVAL;
2064
if (val_len % map->format.val_bytes)
2065
return -EINVAL;
2066
2067
map->lock(map->lock_arg);
2068
2069
ret = _regmap_raw_write(map, reg, val, val_len, false);
2070
2071
map->unlock(map->lock_arg);
2072
2073
return ret;
2074
}
2075
EXPORT_SYMBOL_GPL(regmap_raw_write);
2076
2077
static int regmap_noinc_readwrite(struct regmap *map, unsigned int reg,
2078
void *val, unsigned int val_len, bool write)
2079
{
2080
size_t val_bytes = map->format.val_bytes;
2081
size_t val_count = val_len / val_bytes;
2082
unsigned int lastval;
2083
u8 *u8p;
2084
u16 *u16p;
2085
u32 *u32p;
2086
int ret;
2087
int i;
2088
2089
switch (val_bytes) {
2090
case 1:
2091
u8p = val;
2092
if (write)
2093
lastval = (unsigned int)u8p[val_count - 1];
2094
break;
2095
case 2:
2096
u16p = val;
2097
if (write)
2098
lastval = (unsigned int)u16p[val_count - 1];
2099
break;
2100
case 4:
2101
u32p = val;
2102
if (write)
2103
lastval = (unsigned int)u32p[val_count - 1];
2104
break;
2105
default:
2106
return -EINVAL;
2107
}
2108
2109
/*
2110
* Update the cache with the last value we write, the rest is just
2111
* gone down in the hardware FIFO. We can't cache FIFOs. This makes
2112
* sure a single read from the cache will work.
2113
*/
2114
if (write) {
2115
if (!map->cache_bypass && !map->defer_caching) {
2116
ret = regcache_write(map, reg, lastval);
2117
if (ret != 0)
2118
return ret;
2119
if (map->cache_only) {
2120
map->cache_dirty = true;
2121
return 0;
2122
}
2123
}
2124
ret = map->bus->reg_noinc_write(map->bus_context, reg, val, val_count);
2125
} else {
2126
ret = map->bus->reg_noinc_read(map->bus_context, reg, val, val_count);
2127
}
2128
2129
if (!ret && regmap_should_log(map)) {
2130
dev_info(map->dev, "%x %s [", reg, write ? "<=" : "=>");
2131
for (i = 0; i < val_count; i++) {
2132
switch (val_bytes) {
2133
case 1:
2134
pr_cont("%x", u8p[i]);
2135
break;
2136
case 2:
2137
pr_cont("%x", u16p[i]);
2138
break;
2139
case 4:
2140
pr_cont("%x", u32p[i]);
2141
break;
2142
default:
2143
break;
2144
}
2145
if (i == (val_count - 1))
2146
pr_cont("]\n");
2147
else
2148
pr_cont(",");
2149
}
2150
}
2151
2152
return 0;
2153
}
2154
2155
/**
2156
* regmap_noinc_write(): Write data to a register without incrementing the
2157
* register number
2158
*
2159
* @map: Register map to write to
2160
* @reg: Register to write to
2161
* @val: Pointer to data buffer
2162
* @val_len: Length of output buffer in bytes.
2163
*
2164
* The regmap API usually assumes that bulk bus write operations will write a
2165
* range of registers. Some devices have certain registers for which a write
2166
* operation can write to an internal FIFO.
2167
*
2168
* The target register must be volatile but registers after it can be
2169
* completely unrelated cacheable registers.
2170
*
2171
* This will attempt multiple writes as required to write val_len bytes.
2172
*
2173
* A value of zero will be returned on success, a negative errno will be
2174
* returned in error cases.
2175
*/
2176
int regmap_noinc_write(struct regmap *map, unsigned int reg,
2177
const void *val, size_t val_len)
2178
{
2179
size_t write_len;
2180
int ret;
2181
2182
if (!map->write && !(map->bus && map->bus->reg_noinc_write))
2183
return -EINVAL;
2184
if (val_len % map->format.val_bytes)
2185
return -EINVAL;
2186
if (!IS_ALIGNED(reg, map->reg_stride))
2187
return -EINVAL;
2188
if (val_len == 0)
2189
return -EINVAL;
2190
2191
map->lock(map->lock_arg);
2192
2193
if (!regmap_volatile(map, reg) || !regmap_writeable_noinc(map, reg)) {
2194
ret = -EINVAL;
2195
goto out_unlock;
2196
}
2197
2198
/*
2199
* Use the accelerated operation if we can. The val drops the const
2200
* typing in order to facilitate code reuse in regmap_noinc_readwrite().
2201
*/
2202
if (map->bus->reg_noinc_write) {
2203
ret = regmap_noinc_readwrite(map, reg, (void *)val, val_len, true);
2204
goto out_unlock;
2205
}
2206
2207
while (val_len) {
2208
if (map->max_raw_write && map->max_raw_write < val_len)
2209
write_len = map->max_raw_write;
2210
else
2211
write_len = val_len;
2212
ret = _regmap_raw_write(map, reg, val, write_len, true);
2213
if (ret)
2214
goto out_unlock;
2215
val = ((u8 *)val) + write_len;
2216
val_len -= write_len;
2217
}
2218
2219
out_unlock:
2220
map->unlock(map->lock_arg);
2221
return ret;
2222
}
2223
EXPORT_SYMBOL_GPL(regmap_noinc_write);
2224
2225
/**
2226
* regmap_field_update_bits_base() - Perform a read/modify/write cycle a
2227
* register field.
2228
*
2229
* @field: Register field to write to
2230
* @mask: Bitmask to change
2231
* @val: Value to be written
2232
* @change: Boolean indicating if a write was done
2233
* @async: Boolean indicating asynchronously
2234
* @force: Boolean indicating use force update
2235
*
2236
* Perform a read/modify/write cycle on the register field with change,
2237
* async, force option.
2238
*
2239
* A value of zero will be returned on success, a negative errno will
2240
* be returned in error cases.
2241
*/
2242
int regmap_field_update_bits_base(struct regmap_field *field,
2243
unsigned int mask, unsigned int val,
2244
bool *change, bool async, bool force)
2245
{
2246
mask = (mask << field->shift) & field->mask;
2247
2248
return regmap_update_bits_base(field->regmap, field->reg,
2249
mask, val << field->shift,
2250
change, async, force);
2251
}
2252
EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
2253
2254
/**
2255
* regmap_field_test_bits() - Check if all specified bits are set in a
2256
* register field.
2257
*
2258
* @field: Register field to operate on
2259
* @bits: Bits to test
2260
*
2261
* Returns -1 if the underlying regmap_field_read() fails, 0 if at least one of the
2262
* tested bits is not set and 1 if all tested bits are set.
2263
*/
2264
int regmap_field_test_bits(struct regmap_field *field, unsigned int bits)
2265
{
2266
unsigned int val, ret;
2267
2268
ret = regmap_field_read(field, &val);
2269
if (ret)
2270
return ret;
2271
2272
return (val & bits) == bits;
2273
}
2274
EXPORT_SYMBOL_GPL(regmap_field_test_bits);
2275
2276
/**
2277
* regmap_fields_update_bits_base() - Perform a read/modify/write cycle a
2278
* register field with port ID
2279
*
2280
* @field: Register field to write to
2281
* @id: port ID
2282
* @mask: Bitmask to change
2283
* @val: Value to be written
2284
* @change: Boolean indicating if a write was done
2285
* @async: Boolean indicating asynchronously
2286
* @force: Boolean indicating use force update
2287
*
2288
* A value of zero will be returned on success, a negative errno will
2289
* be returned in error cases.
2290
*/
2291
int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
2292
unsigned int mask, unsigned int val,
2293
bool *change, bool async, bool force)
2294
{
2295
if (id >= field->id_size)
2296
return -EINVAL;
2297
2298
mask = (mask << field->shift) & field->mask;
2299
2300
return regmap_update_bits_base(field->regmap,
2301
field->reg + (field->id_offset * id),
2302
mask, val << field->shift,
2303
change, async, force);
2304
}
2305
EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base);
2306
2307
/**
2308
* regmap_bulk_write() - Write multiple registers to the device
2309
*
2310
* @map: Register map to write to
2311
* @reg: First register to be write from
2312
* @val: Block of data to be written, in native register size for device
2313
* @val_count: Number of registers to write
2314
*
2315
* This function is intended to be used for writing a large block of
2316
* data to the device either in single transfer or multiple transfer.
2317
*
2318
* A value of zero will be returned on success, a negative errno will
2319
* be returned in error cases.
2320
*/
2321
int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
2322
size_t val_count)
2323
{
2324
int ret = 0, i;
2325
size_t val_bytes = map->format.val_bytes;
2326
2327
if (!IS_ALIGNED(reg, map->reg_stride))
2328
return -EINVAL;
2329
2330
/*
2331
* Some devices don't support bulk write, for them we have a series of
2332
* single write operations.
2333
*/
2334
if (!map->write || !map->format.parse_inplace) {
2335
map->lock(map->lock_arg);
2336
for (i = 0; i < val_count; i++) {
2337
unsigned int ival;
2338
2339
switch (val_bytes) {
2340
case 1:
2341
ival = *(u8 *)(val + (i * val_bytes));
2342
break;
2343
case 2:
2344
ival = *(u16 *)(val + (i * val_bytes));
2345
break;
2346
case 4:
2347
ival = *(u32 *)(val + (i * val_bytes));
2348
break;
2349
default:
2350
ret = -EINVAL;
2351
goto out;
2352
}
2353
2354
ret = _regmap_write(map,
2355
reg + regmap_get_offset(map, i),
2356
ival);
2357
if (ret != 0)
2358
goto out;
2359
}
2360
out:
2361
map->unlock(map->lock_arg);
2362
} else {
2363
void *wval;
2364
2365
wval = kmemdup_array(val, val_count, val_bytes, map->alloc_flags);
2366
if (!wval)
2367
return -ENOMEM;
2368
2369
for (i = 0; i < val_count * val_bytes; i += val_bytes)
2370
map->format.parse_inplace(wval + i);
2371
2372
ret = regmap_raw_write(map, reg, wval, val_bytes * val_count);
2373
2374
kfree(wval);
2375
}
2376
2377
if (!ret)
2378
trace_regmap_bulk_write(map, reg, val, val_bytes * val_count);
2379
2380
return ret;
2381
}
2382
EXPORT_SYMBOL_GPL(regmap_bulk_write);
2383
2384
/*
2385
* _regmap_raw_multi_reg_write()
2386
*
2387
* the (register,newvalue) pairs in regs have not been formatted, but
2388
* they are all in the same page and have been changed to being page
2389
* relative. The page register has been written if that was necessary.
2390
*/
2391
static int _regmap_raw_multi_reg_write(struct regmap *map,
2392
const struct reg_sequence *regs,
2393
size_t num_regs)
2394
{
2395
int ret;
2396
void *buf;
2397
int i;
2398
u8 *u8;
2399
size_t val_bytes = map->format.val_bytes;
2400
size_t reg_bytes = map->format.reg_bytes;
2401
size_t pad_bytes = map->format.pad_bytes;
2402
size_t pair_size = reg_bytes + pad_bytes + val_bytes;
2403
size_t len = pair_size * num_regs;
2404
2405
if (!len)
2406
return -EINVAL;
2407
2408
buf = kzalloc(len, GFP_KERNEL);
2409
if (!buf)
2410
return -ENOMEM;
2411
2412
/* We have to linearise by hand. */
2413
2414
u8 = buf;
2415
2416
for (i = 0; i < num_regs; i++) {
2417
unsigned int reg = regs[i].reg;
2418
unsigned int val = regs[i].def;
2419
trace_regmap_hw_write_start(map, reg, 1);
2420
reg = regmap_reg_addr(map, reg);
2421
map->format.format_reg(u8, reg, map->reg_shift);
2422
u8 += reg_bytes + pad_bytes;
2423
map->format.format_val(u8, val, 0);
2424
u8 += val_bytes;
2425
}
2426
u8 = buf;
2427
*u8 |= map->write_flag_mask;
2428
2429
ret = map->write(map->bus_context, buf, len);
2430
2431
kfree(buf);
2432
2433
for (i = 0; i < num_regs; i++) {
2434
int reg = regs[i].reg;
2435
trace_regmap_hw_write_done(map, reg, 1);
2436
}
2437
return ret;
2438
}
2439
2440
static unsigned int _regmap_register_page(struct regmap *map,
2441
unsigned int reg,
2442
struct regmap_range_node *range)
2443
{
2444
unsigned int win_page = (reg - range->range_min) / range->window_len;
2445
2446
return win_page;
2447
}
2448
2449
static int _regmap_range_multi_paged_reg_write(struct regmap *map,
2450
struct reg_sequence *regs,
2451
size_t num_regs)
2452
{
2453
int ret;
2454
int i, n;
2455
struct reg_sequence *base;
2456
unsigned int this_page = 0;
2457
unsigned int page_change = 0;
2458
/*
2459
* the set of registers are not neccessarily in order, but
2460
* since the order of write must be preserved this algorithm
2461
* chops the set each time the page changes. This also applies
2462
* if there is a delay required at any point in the sequence.
2463
*/
2464
base = regs;
2465
for (i = 0, n = 0; i < num_regs; i++, n++) {
2466
unsigned int reg = regs[i].reg;
2467
struct regmap_range_node *range;
2468
2469
range = _regmap_range_lookup(map, reg);
2470
if (range) {
2471
unsigned int win_page = _regmap_register_page(map, reg,
2472
range);
2473
2474
if (i == 0)
2475
this_page = win_page;
2476
if (win_page != this_page) {
2477
this_page = win_page;
2478
page_change = 1;
2479
}
2480
}
2481
2482
/* If we have both a page change and a delay make sure to
2483
* write the regs and apply the delay before we change the
2484
* page.
2485
*/
2486
2487
if (page_change || regs[i].delay_us) {
2488
2489
/* For situations where the first write requires
2490
* a delay we need to make sure we don't call
2491
* raw_multi_reg_write with n=0
2492
* This can't occur with page breaks as we
2493
* never write on the first iteration
2494
*/
2495
if (regs[i].delay_us && i == 0)
2496
n = 1;
2497
2498
ret = _regmap_raw_multi_reg_write(map, base, n);
2499
if (ret != 0)
2500
return ret;
2501
2502
if (regs[i].delay_us) {
2503
if (map->can_sleep)
2504
fsleep(regs[i].delay_us);
2505
else
2506
udelay(regs[i].delay_us);
2507
}
2508
2509
base += n;
2510
n = 0;
2511
2512
if (page_change) {
2513
ret = _regmap_select_page(map,
2514
&base[n].reg,
2515
range, 1);
2516
if (ret != 0)
2517
return ret;
2518
2519
page_change = 0;
2520
}
2521
2522
}
2523
2524
}
2525
if (n > 0)
2526
return _regmap_raw_multi_reg_write(map, base, n);
2527
return 0;
2528
}
2529
2530
static int _regmap_multi_reg_write(struct regmap *map,
2531
const struct reg_sequence *regs,
2532
size_t num_regs)
2533
{
2534
int i;
2535
int ret;
2536
2537
if (!map->can_multi_write) {
2538
for (i = 0; i < num_regs; i++) {
2539
ret = _regmap_write(map, regs[i].reg, regs[i].def);
2540
if (ret != 0)
2541
return ret;
2542
2543
if (regs[i].delay_us) {
2544
if (map->can_sleep)
2545
fsleep(regs[i].delay_us);
2546
else
2547
udelay(regs[i].delay_us);
2548
}
2549
}
2550
return 0;
2551
}
2552
2553
if (!map->format.parse_inplace)
2554
return -EINVAL;
2555
2556
if (map->writeable_reg)
2557
for (i = 0; i < num_regs; i++) {
2558
int reg = regs[i].reg;
2559
if (!map->writeable_reg(map->dev, reg))
2560
return -EINVAL;
2561
if (!IS_ALIGNED(reg, map->reg_stride))
2562
return -EINVAL;
2563
}
2564
2565
if (!map->cache_bypass) {
2566
for (i = 0; i < num_regs; i++) {
2567
unsigned int val = regs[i].def;
2568
unsigned int reg = regs[i].reg;
2569
ret = regcache_write(map, reg, val);
2570
if (ret) {
2571
dev_err(map->dev,
2572
"Error in caching of register: %x ret: %d\n",
2573
reg, ret);
2574
return ret;
2575
}
2576
}
2577
if (map->cache_only) {
2578
map->cache_dirty = true;
2579
return 0;
2580
}
2581
}
2582
2583
WARN_ON(!map->bus);
2584
2585
for (i = 0; i < num_regs; i++) {
2586
unsigned int reg = regs[i].reg;
2587
struct regmap_range_node *range;
2588
2589
/* Coalesce all the writes between a page break or a delay
2590
* in a sequence
2591
*/
2592
range = _regmap_range_lookup(map, reg);
2593
if (range || regs[i].delay_us) {
2594
size_t len = sizeof(struct reg_sequence)*num_regs;
2595
struct reg_sequence *base = kmemdup(regs, len,
2596
GFP_KERNEL);
2597
if (!base)
2598
return -ENOMEM;
2599
ret = _regmap_range_multi_paged_reg_write(map, base,
2600
num_regs);
2601
kfree(base);
2602
2603
return ret;
2604
}
2605
}
2606
return _regmap_raw_multi_reg_write(map, regs, num_regs);
2607
}
2608
2609
/**
2610
* regmap_multi_reg_write() - Write multiple registers to the device
2611
*
2612
* @map: Register map to write to
2613
* @regs: Array of structures containing register,value to be written
2614
* @num_regs: Number of registers to write
2615
*
2616
* Write multiple registers to the device where the set of register, value
2617
* pairs are supplied in any order, possibly not all in a single range.
2618
*
2619
* The 'normal' block write mode will send ultimately send data on the
2620
* target bus as R,V1,V2,V3,..,Vn where successively higher registers are
2621
* addressed. However, this alternative block multi write mode will send
2622
* the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
2623
* must of course support the mode.
2624
*
2625
* A value of zero will be returned on success, a negative errno will be
2626
* returned in error cases.
2627
*/
2628
int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
2629
int num_regs)
2630
{
2631
int ret;
2632
2633
map->lock(map->lock_arg);
2634
2635
ret = _regmap_multi_reg_write(map, regs, num_regs);
2636
2637
map->unlock(map->lock_arg);
2638
2639
return ret;
2640
}
2641
EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
2642
2643
/**
2644
* regmap_multi_reg_write_bypassed() - Write multiple registers to the
2645
* device but not the cache
2646
*
2647
* @map: Register map to write to
2648
* @regs: Array of structures containing register,value to be written
2649
* @num_regs: Number of registers to write
2650
*
2651
* Write multiple registers to the device but not the cache where the set
2652
* of register are supplied in any order.
2653
*
2654
* This function is intended to be used for writing a large block of data
2655
* atomically to the device in single transfer for those I2C client devices
2656
* that implement this alternative block write mode.
2657
*
2658
* A value of zero will be returned on success, a negative errno will
2659
* be returned in error cases.
2660
*/
2661
int regmap_multi_reg_write_bypassed(struct regmap *map,
2662
const struct reg_sequence *regs,
2663
int num_regs)
2664
{
2665
int ret;
2666
bool bypass;
2667
2668
map->lock(map->lock_arg);
2669
2670
bypass = map->cache_bypass;
2671
map->cache_bypass = true;
2672
2673
ret = _regmap_multi_reg_write(map, regs, num_regs);
2674
2675
map->cache_bypass = bypass;
2676
2677
map->unlock(map->lock_arg);
2678
2679
return ret;
2680
}
2681
EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
2682
2683
/**
2684
* regmap_raw_write_async() - Write raw values to one or more registers
2685
* asynchronously
2686
*
2687
* @map: Register map to write to
2688
* @reg: Initial register to write to
2689
* @val: Block of data to be written, laid out for direct transmission to the
2690
* device. Must be valid until regmap_async_complete() is called.
2691
* @val_len: Length of data pointed to by val.
2692
*
2693
* This function is intended to be used for things like firmware
2694
* download where a large block of data needs to be transferred to the
2695
* device. No formatting will be done on the data provided.
2696
*
2697
* If supported by the underlying bus the write will be scheduled
2698
* asynchronously, helping maximise I/O speed on higher speed buses
2699
* like SPI. regmap_async_complete() can be called to ensure that all
2700
* asynchrnous writes have been completed.
2701
*
2702
* A value of zero will be returned on success, a negative errno will
2703
* be returned in error cases.
2704
*/
2705
int regmap_raw_write_async(struct regmap *map, unsigned int reg,
2706
const void *val, size_t val_len)
2707
{
2708
int ret;
2709
2710
if (val_len % map->format.val_bytes)
2711
return -EINVAL;
2712
if (!IS_ALIGNED(reg, map->reg_stride))
2713
return -EINVAL;
2714
2715
map->lock(map->lock_arg);
2716
2717
map->async = true;
2718
2719
ret = _regmap_raw_write(map, reg, val, val_len, false);
2720
2721
map->async = false;
2722
2723
map->unlock(map->lock_arg);
2724
2725
return ret;
2726
}
2727
EXPORT_SYMBOL_GPL(regmap_raw_write_async);
2728
2729
static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2730
unsigned int val_len, bool noinc)
2731
{
2732
struct regmap_range_node *range;
2733
int ret;
2734
2735
if (!map->read)
2736
return -EINVAL;
2737
2738
range = _regmap_range_lookup(map, reg);
2739
if (range) {
2740
ret = _regmap_select_page(map, &reg, range,
2741
noinc ? 1 : val_len / map->format.val_bytes);
2742
if (ret != 0)
2743
return ret;
2744
}
2745
2746
reg = regmap_reg_addr(map, reg);
2747
map->format.format_reg(map->work_buf, reg, map->reg_shift);
2748
regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
2749
map->read_flag_mask);
2750
trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
2751
2752
ret = map->read(map->bus_context, map->work_buf,
2753
map->format.reg_bytes + map->format.pad_bytes,
2754
val, val_len);
2755
2756
trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
2757
2758
return ret;
2759
}
2760
2761
static int _regmap_bus_reg_read(void *context, unsigned int reg,
2762
unsigned int *val)
2763
{
2764
struct regmap *map = context;
2765
struct regmap_range_node *range;
2766
int ret;
2767
2768
range = _regmap_range_lookup(map, reg);
2769
if (range) {
2770
ret = _regmap_select_page(map, &reg, range, 1);
2771
if (ret != 0)
2772
return ret;
2773
}
2774
2775
reg = regmap_reg_addr(map, reg);
2776
return map->bus->reg_read(map->bus_context, reg, val);
2777
}
2778
2779
static int _regmap_bus_read(void *context, unsigned int reg,
2780
unsigned int *val)
2781
{
2782
int ret;
2783
struct regmap *map = context;
2784
void *work_val = map->work_buf + map->format.reg_bytes +
2785
map->format.pad_bytes;
2786
2787
if (!map->format.parse_val)
2788
return -EINVAL;
2789
2790
ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false);
2791
if (ret == 0)
2792
*val = map->format.parse_val(work_val);
2793
2794
return ret;
2795
}
2796
2797
static int _regmap_read(struct regmap *map, unsigned int reg,
2798
unsigned int *val)
2799
{
2800
int ret;
2801
void *context = _regmap_map_get_context(map);
2802
2803
if (!map->cache_bypass) {
2804
ret = regcache_read(map, reg, val);
2805
if (ret == 0)
2806
return 0;
2807
}
2808
2809
if (map->cache_only)
2810
return -EBUSY;
2811
2812
if (!regmap_readable(map, reg))
2813
return -EIO;
2814
2815
ret = map->reg_read(context, reg, val);
2816
if (ret == 0) {
2817
if (regmap_should_log(map))
2818
dev_info(map->dev, "%x => %x\n", reg, *val);
2819
2820
trace_regmap_reg_read(map, reg, *val);
2821
2822
if (!map->cache_bypass)
2823
regcache_write(map, reg, *val);
2824
}
2825
2826
return ret;
2827
}
2828
2829
/**
2830
* regmap_read() - Read a value from a single register
2831
*
2832
* @map: Register map to read from
2833
* @reg: Register to be read from
2834
* @val: Pointer to store read value
2835
*
2836
* A value of zero will be returned on success, a negative errno will
2837
* be returned in error cases.
2838
*/
2839
int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
2840
{
2841
int ret;
2842
2843
if (!IS_ALIGNED(reg, map->reg_stride))
2844
return -EINVAL;
2845
2846
map->lock(map->lock_arg);
2847
2848
ret = _regmap_read(map, reg, val);
2849
2850
map->unlock(map->lock_arg);
2851
2852
return ret;
2853
}
2854
EXPORT_SYMBOL_GPL(regmap_read);
2855
2856
/**
2857
* regmap_read_bypassed() - Read a value from a single register direct
2858
* from the device, bypassing the cache
2859
*
2860
* @map: Register map to read from
2861
* @reg: Register to be read from
2862
* @val: Pointer to store read value
2863
*
2864
* A value of zero will be returned on success, a negative errno will
2865
* be returned in error cases.
2866
*/
2867
int regmap_read_bypassed(struct regmap *map, unsigned int reg, unsigned int *val)
2868
{
2869
int ret;
2870
bool bypass, cache_only;
2871
2872
if (!IS_ALIGNED(reg, map->reg_stride))
2873
return -EINVAL;
2874
2875
map->lock(map->lock_arg);
2876
2877
bypass = map->cache_bypass;
2878
cache_only = map->cache_only;
2879
map->cache_bypass = true;
2880
map->cache_only = false;
2881
2882
ret = _regmap_read(map, reg, val);
2883
2884
map->cache_bypass = bypass;
2885
map->cache_only = cache_only;
2886
2887
map->unlock(map->lock_arg);
2888
2889
return ret;
2890
}
2891
EXPORT_SYMBOL_GPL(regmap_read_bypassed);
2892
2893
/**
2894
* regmap_raw_read() - Read raw data from the device
2895
*
2896
* @map: Register map to read from
2897
* @reg: First register to be read from
2898
* @val: Pointer to store read value
2899
* @val_len: Size of data to read
2900
*
2901
* A value of zero will be returned on success, a negative errno will
2902
* be returned in error cases.
2903
*/
2904
int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2905
size_t val_len)
2906
{
2907
size_t val_bytes = map->format.val_bytes;
2908
size_t val_count = val_len / val_bytes;
2909
unsigned int v;
2910
int ret, i;
2911
2912
if (val_len % map->format.val_bytes)
2913
return -EINVAL;
2914
if (!IS_ALIGNED(reg, map->reg_stride))
2915
return -EINVAL;
2916
if (val_count == 0)
2917
return -EINVAL;
2918
2919
map->lock(map->lock_arg);
2920
2921
if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
2922
map->cache_type == REGCACHE_NONE) {
2923
size_t chunk_count, chunk_bytes;
2924
size_t chunk_regs = val_count;
2925
2926
if (!map->cache_bypass && map->cache_only) {
2927
ret = -EBUSY;
2928
goto out;
2929
}
2930
2931
if (!map->read) {
2932
ret = -ENOTSUPP;
2933
goto out;
2934
}
2935
2936
if (map->use_single_read)
2937
chunk_regs = 1;
2938
else if (map->max_raw_read && val_len > map->max_raw_read)
2939
chunk_regs = map->max_raw_read / val_bytes;
2940
2941
chunk_count = val_count / chunk_regs;
2942
chunk_bytes = chunk_regs * val_bytes;
2943
2944
/* Read bytes that fit into whole chunks */
2945
for (i = 0; i < chunk_count; i++) {
2946
ret = _regmap_raw_read(map, reg, val, chunk_bytes, false);
2947
if (ret != 0)
2948
goto out;
2949
2950
reg += regmap_get_offset(map, chunk_regs);
2951
val += chunk_bytes;
2952
val_len -= chunk_bytes;
2953
}
2954
2955
/* Read remaining bytes */
2956
if (val_len) {
2957
ret = _regmap_raw_read(map, reg, val, val_len, false);
2958
if (ret != 0)
2959
goto out;
2960
}
2961
} else {
2962
/* Otherwise go word by word for the cache; should be low
2963
* cost as we expect to hit the cache.
2964
*/
2965
for (i = 0; i < val_count; i++) {
2966
ret = _regmap_read(map, reg + regmap_get_offset(map, i),
2967
&v);
2968
if (ret != 0)
2969
goto out;
2970
2971
map->format.format_val(val + (i * val_bytes), v, 0);
2972
}
2973
}
2974
2975
out:
2976
map->unlock(map->lock_arg);
2977
2978
return ret;
2979
}
2980
EXPORT_SYMBOL_GPL(regmap_raw_read);
2981
2982
/**
2983
* regmap_noinc_read(): Read data from a register without incrementing the
2984
* register number
2985
*
2986
* @map: Register map to read from
2987
* @reg: Register to read from
2988
* @val: Pointer to data buffer
2989
* @val_len: Length of output buffer in bytes.
2990
*
2991
* The regmap API usually assumes that bulk read operations will read a
2992
* range of registers. Some devices have certain registers for which a read
2993
* operation read will read from an internal FIFO.
2994
*
2995
* The target register must be volatile but registers after it can be
2996
* completely unrelated cacheable registers.
2997
*
2998
* This will attempt multiple reads as required to read val_len bytes.
2999
*
3000
* A value of zero will be returned on success, a negative errno will be
3001
* returned in error cases.
3002
*/
3003
int regmap_noinc_read(struct regmap *map, unsigned int reg,
3004
void *val, size_t val_len)
3005
{
3006
size_t read_len;
3007
int ret;
3008
3009
if (!map->read)
3010
return -ENOTSUPP;
3011
3012
if (val_len % map->format.val_bytes)
3013
return -EINVAL;
3014
if (!IS_ALIGNED(reg, map->reg_stride))
3015
return -EINVAL;
3016
if (val_len == 0)
3017
return -EINVAL;
3018
3019
map->lock(map->lock_arg);
3020
3021
if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) {
3022
ret = -EINVAL;
3023
goto out_unlock;
3024
}
3025
3026
/*
3027
* We have not defined the FIFO semantics for cache, as the
3028
* cache is just one value deep. Should we return the last
3029
* written value? Just avoid this by always reading the FIFO
3030
* even when using cache. Cache only will not work.
3031
*/
3032
if (!map->cache_bypass && map->cache_only) {
3033
ret = -EBUSY;
3034
goto out_unlock;
3035
}
3036
3037
/* Use the accelerated operation if we can */
3038
if (map->bus->reg_noinc_read) {
3039
ret = regmap_noinc_readwrite(map, reg, val, val_len, false);
3040
goto out_unlock;
3041
}
3042
3043
while (val_len) {
3044
if (map->max_raw_read && map->max_raw_read < val_len)
3045
read_len = map->max_raw_read;
3046
else
3047
read_len = val_len;
3048
ret = _regmap_raw_read(map, reg, val, read_len, true);
3049
if (ret)
3050
goto out_unlock;
3051
val = ((u8 *)val) + read_len;
3052
val_len -= read_len;
3053
}
3054
3055
out_unlock:
3056
map->unlock(map->lock_arg);
3057
return ret;
3058
}
3059
EXPORT_SYMBOL_GPL(regmap_noinc_read);
3060
3061
/**
3062
* regmap_field_read(): Read a value to a single register field
3063
*
3064
* @field: Register field to read from
3065
* @val: Pointer to store read value
3066
*
3067
* A value of zero will be returned on success, a negative errno will
3068
* be returned in error cases.
3069
*/
3070
int regmap_field_read(struct regmap_field *field, unsigned int *val)
3071
{
3072
int ret;
3073
unsigned int reg_val;
3074
ret = regmap_read(field->regmap, field->reg, &reg_val);
3075
if (ret != 0)
3076
return ret;
3077
3078
reg_val &= field->mask;
3079
reg_val >>= field->shift;
3080
*val = reg_val;
3081
3082
return ret;
3083
}
3084
EXPORT_SYMBOL_GPL(regmap_field_read);
3085
3086
/**
3087
* regmap_fields_read() - Read a value to a single register field with port ID
3088
*
3089
* @field: Register field to read from
3090
* @id: port ID
3091
* @val: Pointer to store read value
3092
*
3093
* A value of zero will be returned on success, a negative errno will
3094
* be returned in error cases.
3095
*/
3096
int regmap_fields_read(struct regmap_field *field, unsigned int id,
3097
unsigned int *val)
3098
{
3099
int ret;
3100
unsigned int reg_val;
3101
3102
if (id >= field->id_size)
3103
return -EINVAL;
3104
3105
ret = regmap_read(field->regmap,
3106
field->reg + (field->id_offset * id),
3107
&reg_val);
3108
if (ret != 0)
3109
return ret;
3110
3111
reg_val &= field->mask;
3112
reg_val >>= field->shift;
3113
*val = reg_val;
3114
3115
return ret;
3116
}
3117
EXPORT_SYMBOL_GPL(regmap_fields_read);
3118
3119
static int _regmap_bulk_read(struct regmap *map, unsigned int reg,
3120
const unsigned int *regs, void *val, size_t val_count)
3121
{
3122
u32 *u32 = val;
3123
u16 *u16 = val;
3124
u8 *u8 = val;
3125
int ret, i;
3126
3127
map->lock(map->lock_arg);
3128
3129
for (i = 0; i < val_count; i++) {
3130
unsigned int ival;
3131
3132
if (regs) {
3133
if (!IS_ALIGNED(regs[i], map->reg_stride)) {
3134
ret = -EINVAL;
3135
goto out;
3136
}
3137
ret = _regmap_read(map, regs[i], &ival);
3138
} else {
3139
ret = _regmap_read(map, reg + regmap_get_offset(map, i), &ival);
3140
}
3141
if (ret != 0)
3142
goto out;
3143
3144
switch (map->format.val_bytes) {
3145
case 4:
3146
u32[i] = ival;
3147
break;
3148
case 2:
3149
u16[i] = ival;
3150
break;
3151
case 1:
3152
u8[i] = ival;
3153
break;
3154
default:
3155
ret = -EINVAL;
3156
goto out;
3157
}
3158
}
3159
out:
3160
map->unlock(map->lock_arg);
3161
return ret;
3162
}
3163
3164
/**
3165
* regmap_bulk_read() - Read multiple sequential registers from the device
3166
*
3167
* @map: Register map to read from
3168
* @reg: First register to be read from
3169
* @val: Pointer to store read value, in native register size for device
3170
* @val_count: Number of registers to read
3171
*
3172
* A value of zero will be returned on success, a negative errno will
3173
* be returned in error cases.
3174
*/
3175
int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
3176
size_t val_count)
3177
{
3178
int ret, i;
3179
size_t val_bytes = map->format.val_bytes;
3180
bool vol = regmap_volatile_range(map, reg, val_count);
3181
3182
if (!IS_ALIGNED(reg, map->reg_stride))
3183
return -EINVAL;
3184
if (val_count == 0)
3185
return -EINVAL;
3186
3187
if (map->read && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
3188
ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
3189
if (ret != 0)
3190
return ret;
3191
3192
for (i = 0; i < val_count * val_bytes; i += val_bytes)
3193
map->format.parse_inplace(val + i);
3194
} else {
3195
ret = _regmap_bulk_read(map, reg, NULL, val, val_count);
3196
}
3197
if (!ret)
3198
trace_regmap_bulk_read(map, reg, val, val_bytes * val_count);
3199
return ret;
3200
}
3201
EXPORT_SYMBOL_GPL(regmap_bulk_read);
3202
3203
/**
3204
* regmap_multi_reg_read() - Read multiple non-sequential registers from the device
3205
*
3206
* @map: Register map to read from
3207
* @regs: Array of registers to read from
3208
* @val: Pointer to store read value, in native register size for device
3209
* @val_count: Number of registers to read
3210
*
3211
* A value of zero will be returned on success, a negative errno will
3212
* be returned in error cases.
3213
*/
3214
int regmap_multi_reg_read(struct regmap *map, const unsigned int *regs, void *val,
3215
size_t val_count)
3216
{
3217
if (val_count == 0)
3218
return -EINVAL;
3219
3220
return _regmap_bulk_read(map, 0, regs, val, val_count);
3221
}
3222
EXPORT_SYMBOL_GPL(regmap_multi_reg_read);
3223
3224
static int _regmap_update_bits(struct regmap *map, unsigned int reg,
3225
unsigned int mask, unsigned int val,
3226
bool *change, bool force_write)
3227
{
3228
int ret;
3229
unsigned int tmp, orig;
3230
3231
if (change)
3232
*change = false;
3233
3234
if (regmap_volatile(map, reg) && map->reg_update_bits) {
3235
reg = regmap_reg_addr(map, reg);
3236
ret = map->reg_update_bits(map->bus_context, reg, mask, val);
3237
if (ret == 0 && change)
3238
*change = true;
3239
} else {
3240
ret = _regmap_read(map, reg, &orig);
3241
if (ret != 0)
3242
return ret;
3243
3244
tmp = orig & ~mask;
3245
tmp |= val & mask;
3246
3247
if (force_write || (tmp != orig) || map->force_write_field) {
3248
ret = _regmap_write(map, reg, tmp);
3249
if (ret == 0 && change)
3250
*change = true;
3251
}
3252
}
3253
3254
return ret;
3255
}
3256
3257
/**
3258
* regmap_update_bits_base() - Perform a read/modify/write cycle on a register
3259
*
3260
* @map: Register map to update
3261
* @reg: Register to update
3262
* @mask: Bitmask to change
3263
* @val: New value for bitmask
3264
* @change: Boolean indicating if a write was done
3265
* @async: Boolean indicating asynchronously
3266
* @force: Boolean indicating use force update
3267
*
3268
* Perform a read/modify/write cycle on a register map with change, async, force
3269
* options.
3270
*
3271
* If async is true:
3272
*
3273
* With most buses the read must be done synchronously so this is most useful
3274
* for devices with a cache which do not need to interact with the hardware to
3275
* determine the current register value.
3276
*
3277
* Returns zero for success, a negative number on error.
3278
*/
3279
int regmap_update_bits_base(struct regmap *map, unsigned int reg,
3280
unsigned int mask, unsigned int val,
3281
bool *change, bool async, bool force)
3282
{
3283
int ret;
3284
3285
map->lock(map->lock_arg);
3286
3287
map->async = async;
3288
3289
ret = _regmap_update_bits(map, reg, mask, val, change, force);
3290
3291
map->async = false;
3292
3293
map->unlock(map->lock_arg);
3294
3295
return ret;
3296
}
3297
EXPORT_SYMBOL_GPL(regmap_update_bits_base);
3298
3299
/**
3300
* regmap_test_bits() - Check if all specified bits are set in a register.
3301
*
3302
* @map: Register map to operate on
3303
* @reg: Register to read from
3304
* @bits: Bits to test
3305
*
3306
* Returns 0 if at least one of the tested bits is not set, 1 if all tested
3307
* bits are set and a negative error number if the underlying regmap_read()
3308
* fails.
3309
*/
3310
int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits)
3311
{
3312
unsigned int val, ret;
3313
3314
ret = regmap_read(map, reg, &val);
3315
if (ret)
3316
return ret;
3317
3318
return (val & bits) == bits;
3319
}
3320
EXPORT_SYMBOL_GPL(regmap_test_bits);
3321
3322
void regmap_async_complete_cb(struct regmap_async *async, int ret)
3323
{
3324
struct regmap *map = async->map;
3325
bool wake;
3326
3327
trace_regmap_async_io_complete(map);
3328
3329
spin_lock(&map->async_lock);
3330
list_move(&async->list, &map->async_free);
3331
wake = list_empty(&map->async_list);
3332
3333
if (ret != 0)
3334
map->async_ret = ret;
3335
3336
spin_unlock(&map->async_lock);
3337
3338
if (wake)
3339
wake_up(&map->async_waitq);
3340
}
3341
EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
3342
3343
static int regmap_async_is_done(struct regmap *map)
3344
{
3345
unsigned long flags;
3346
int ret;
3347
3348
spin_lock_irqsave(&map->async_lock, flags);
3349
ret = list_empty(&map->async_list);
3350
spin_unlock_irqrestore(&map->async_lock, flags);
3351
3352
return ret;
3353
}
3354
3355
/**
3356
* regmap_async_complete - Ensure all asynchronous I/O has completed.
3357
*
3358
* @map: Map to operate on.
3359
*
3360
* Blocks until any pending asynchronous I/O has completed. Returns
3361
* an error code for any failed I/O operations.
3362
*/
3363
int regmap_async_complete(struct regmap *map)
3364
{
3365
unsigned long flags;
3366
int ret;
3367
3368
/* Nothing to do with no async support */
3369
if (!map->bus || !map->bus->async_write)
3370
return 0;
3371
3372
trace_regmap_async_complete_start(map);
3373
3374
wait_event(map->async_waitq, regmap_async_is_done(map));
3375
3376
spin_lock_irqsave(&map->async_lock, flags);
3377
ret = map->async_ret;
3378
map->async_ret = 0;
3379
spin_unlock_irqrestore(&map->async_lock, flags);
3380
3381
trace_regmap_async_complete_done(map);
3382
3383
return ret;
3384
}
3385
EXPORT_SYMBOL_GPL(regmap_async_complete);
3386
3387
/**
3388
* regmap_register_patch - Register and apply register updates to be applied
3389
* on device initialistion
3390
*
3391
* @map: Register map to apply updates to.
3392
* @regs: Values to update.
3393
* @num_regs: Number of entries in regs.
3394
*
3395
* Register a set of register updates to be applied to the device
3396
* whenever the device registers are synchronised with the cache and
3397
* apply them immediately. Typically this is used to apply
3398
* corrections to be applied to the device defaults on startup, such
3399
* as the updates some vendors provide to undocumented registers.
3400
*
3401
* The caller must ensure that this function cannot be called
3402
* concurrently with either itself or regcache_sync().
3403
*/
3404
int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
3405
int num_regs)
3406
{
3407
struct reg_sequence *p;
3408
int ret;
3409
bool bypass;
3410
3411
if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
3412
num_regs))
3413
return 0;
3414
3415
p = krealloc(map->patch,
3416
sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
3417
GFP_KERNEL);
3418
if (p) {
3419
memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
3420
map->patch = p;
3421
map->patch_regs += num_regs;
3422
} else {
3423
return -ENOMEM;
3424
}
3425
3426
map->lock(map->lock_arg);
3427
3428
bypass = map->cache_bypass;
3429
3430
map->cache_bypass = true;
3431
map->async = true;
3432
3433
ret = _regmap_multi_reg_write(map, regs, num_regs);
3434
3435
map->async = false;
3436
map->cache_bypass = bypass;
3437
3438
map->unlock(map->lock_arg);
3439
3440
regmap_async_complete(map);
3441
3442
return ret;
3443
}
3444
EXPORT_SYMBOL_GPL(regmap_register_patch);
3445
3446
/**
3447
* regmap_get_val_bytes() - Report the size of a register value
3448
*
3449
* @map: Register map to operate on.
3450
*
3451
* Report the size of a register value, mainly intended to for use by
3452
* generic infrastructure built on top of regmap.
3453
*/
3454
int regmap_get_val_bytes(struct regmap *map)
3455
{
3456
if (map->format.format_write)
3457
return -EINVAL;
3458
3459
return map->format.val_bytes;
3460
}
3461
EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
3462
3463
/**
3464
* regmap_get_max_register() - Report the max register value
3465
*
3466
* @map: Register map to operate on.
3467
*
3468
* Report the max register value, mainly intended to for use by
3469
* generic infrastructure built on top of regmap.
3470
*/
3471
int regmap_get_max_register(struct regmap *map)
3472
{
3473
return map->max_register_is_set ? map->max_register : -EINVAL;
3474
}
3475
EXPORT_SYMBOL_GPL(regmap_get_max_register);
3476
3477
/**
3478
* regmap_get_reg_stride() - Report the register address stride
3479
*
3480
* @map: Register map to operate on.
3481
*
3482
* Report the register address stride, mainly intended to for use by
3483
* generic infrastructure built on top of regmap.
3484
*/
3485
int regmap_get_reg_stride(struct regmap *map)
3486
{
3487
return map->reg_stride;
3488
}
3489
EXPORT_SYMBOL_GPL(regmap_get_reg_stride);
3490
3491
/**
3492
* regmap_might_sleep() - Returns whether a regmap access might sleep.
3493
*
3494
* @map: Register map to operate on.
3495
*
3496
* Returns true if an access to the register might sleep, else false.
3497
*/
3498
bool regmap_might_sleep(struct regmap *map)
3499
{
3500
return map->can_sleep;
3501
}
3502
EXPORT_SYMBOL_GPL(regmap_might_sleep);
3503
3504
int regmap_parse_val(struct regmap *map, const void *buf,
3505
unsigned int *val)
3506
{
3507
if (!map->format.parse_val)
3508
return -EINVAL;
3509
3510
*val = map->format.parse_val(buf);
3511
3512
return 0;
3513
}
3514
EXPORT_SYMBOL_GPL(regmap_parse_val);
3515
3516
static int __init regmap_initcall(void)
3517
{
3518
regmap_debugfs_initcall();
3519
3520
return 0;
3521
}
3522
postcore_initcall(regmap_initcall);
3523
3524