Path: blob/master/drivers/base/regmap/regcache-rbtree.c
49885 views
// SPDX-License-Identifier: GPL-2.01//2// Register cache access API - rbtree caching support3//4// Copyright 2011 Wolfson Microelectronics plc5//6// Author: Dimitris Papastamos <[email protected]>78#include <linux/debugfs.h>9#include <linux/device.h>10#include <linux/rbtree.h>11#include <linux/seq_file.h>12#include <linux/slab.h>1314#include "internal.h"1516static int regcache_rbtree_write(struct regmap *map, unsigned int reg,17unsigned int value);18static int regcache_rbtree_exit(struct regmap *map);1920struct regcache_rbtree_node {21/* block of adjacent registers */22void *block;23/* Which registers are present */24unsigned long *cache_present;25/* base register handled by this block */26unsigned int base_reg;27/* number of registers available in the block */28unsigned int blklen;29/* the actual rbtree node holding this block */30struct rb_node node;31};3233struct regcache_rbtree_ctx {34struct rb_root root;35struct regcache_rbtree_node *cached_rbnode;36};3738static inline void regcache_rbtree_get_base_top_reg(39struct regmap *map,40struct regcache_rbtree_node *rbnode,41unsigned int *base, unsigned int *top)42{43*base = rbnode->base_reg;44*top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride);45}4647static unsigned int regcache_rbtree_get_register(struct regmap *map,48struct regcache_rbtree_node *rbnode, unsigned int idx)49{50return regcache_get_val(map, rbnode->block, idx);51}5253static void regcache_rbtree_set_register(struct regmap *map,54struct regcache_rbtree_node *rbnode,55unsigned int idx, unsigned int val)56{57set_bit(idx, rbnode->cache_present);58regcache_set_val(map, rbnode->block, idx, val);59}6061static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,62unsigned int reg)63{64struct regcache_rbtree_ctx *rbtree_ctx = map->cache;65struct rb_node *node;66struct regcache_rbtree_node *rbnode;67unsigned int base_reg, top_reg;6869rbnode = rbtree_ctx->cached_rbnode;70if (rbnode) {71regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,72&top_reg);73if (reg >= base_reg && reg <= top_reg)74return rbnode;75}7677node = rbtree_ctx->root.rb_node;78while (node) {79rbnode = rb_entry(node, struct regcache_rbtree_node, node);80regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,81&top_reg);82if (reg >= base_reg && reg <= top_reg) {83rbtree_ctx->cached_rbnode = rbnode;84return rbnode;85} else if (reg > top_reg) {86node = node->rb_right;87} else if (reg < base_reg) {88node = node->rb_left;89}90}9192return NULL;93}9495static int regcache_rbtree_insert(struct regmap *map, struct rb_root *root,96struct regcache_rbtree_node *rbnode)97{98struct rb_node **new, *parent;99struct regcache_rbtree_node *rbnode_tmp;100unsigned int base_reg_tmp, top_reg_tmp;101unsigned int base_reg;102103parent = NULL;104new = &root->rb_node;105while (*new) {106rbnode_tmp = rb_entry(*new, struct regcache_rbtree_node, node);107/* base and top registers of the current rbnode */108regcache_rbtree_get_base_top_reg(map, rbnode_tmp, &base_reg_tmp,109&top_reg_tmp);110/* base register of the rbnode to be added */111base_reg = rbnode->base_reg;112parent = *new;113/* if this register has already been inserted, just return */114if (base_reg >= base_reg_tmp &&115base_reg <= top_reg_tmp)116return 0;117else if (base_reg > top_reg_tmp)118new = &((*new)->rb_right);119else if (base_reg < base_reg_tmp)120new = &((*new)->rb_left);121}122123/* insert the node into the rbtree */124rb_link_node(&rbnode->node, parent, new);125rb_insert_color(&rbnode->node, root);126127return 1;128}129130#ifdef CONFIG_DEBUG_FS131static int rbtree_show(struct seq_file *s, void *ignored)132{133struct regmap *map = s->private;134struct regcache_rbtree_ctx *rbtree_ctx = map->cache;135struct regcache_rbtree_node *n;136struct rb_node *node;137unsigned int base, top;138size_t mem_size;139int nodes = 0;140int registers = 0;141int this_registers, average;142143map->lock(map->lock_arg);144145mem_size = sizeof(*rbtree_ctx);146147for (node = rb_first(&rbtree_ctx->root); node != NULL;148node = rb_next(node)) {149n = rb_entry(node, struct regcache_rbtree_node, node);150mem_size += sizeof(*n);151mem_size += (n->blklen * map->cache_word_size);152mem_size += BITS_TO_LONGS(n->blklen) * sizeof(long);153154regcache_rbtree_get_base_top_reg(map, n, &base, &top);155this_registers = ((top - base) / map->reg_stride) + 1;156seq_printf(s, "%x-%x (%d)\n", base, top, this_registers);157158nodes++;159registers += this_registers;160}161162if (nodes)163average = registers / nodes;164else165average = 0;166167seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n",168nodes, registers, average, mem_size);169170map->unlock(map->lock_arg);171172return 0;173}174175DEFINE_SHOW_ATTRIBUTE(rbtree);176177static void rbtree_debugfs_init(struct regmap *map)178{179debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops);180}181#endif182183static int regcache_rbtree_init(struct regmap *map)184{185struct regcache_rbtree_ctx *rbtree_ctx;186187map->cache = kmalloc(sizeof *rbtree_ctx, map->alloc_flags);188if (!map->cache)189return -ENOMEM;190191rbtree_ctx = map->cache;192rbtree_ctx->root = RB_ROOT;193rbtree_ctx->cached_rbnode = NULL;194195return 0;196}197198static int regcache_rbtree_exit(struct regmap *map)199{200struct rb_node *next;201struct regcache_rbtree_ctx *rbtree_ctx;202struct regcache_rbtree_node *rbtree_node;203204/* if we've already been called then just return */205rbtree_ctx = map->cache;206if (!rbtree_ctx)207return 0;208209/* free up the rbtree */210next = rb_first(&rbtree_ctx->root);211while (next) {212rbtree_node = rb_entry(next, struct regcache_rbtree_node, node);213next = rb_next(&rbtree_node->node);214rb_erase(&rbtree_node->node, &rbtree_ctx->root);215kfree(rbtree_node->cache_present);216kfree(rbtree_node->block);217kfree(rbtree_node);218}219220/* release the resources */221kfree(map->cache);222map->cache = NULL;223224return 0;225}226227static int regcache_rbtree_populate(struct regmap *map)228{229unsigned int i;230int ret;231232for (i = 0; i < map->num_reg_defaults; i++) {233ret = regcache_rbtree_write(map,234map->reg_defaults[i].reg,235map->reg_defaults[i].def);236if (ret)237return ret;238}239240return 0;241}242243static int regcache_rbtree_read(struct regmap *map,244unsigned int reg, unsigned int *value)245{246struct regcache_rbtree_node *rbnode;247unsigned int reg_tmp;248249rbnode = regcache_rbtree_lookup(map, reg);250if (rbnode) {251reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;252if (!test_bit(reg_tmp, rbnode->cache_present))253return -ENOENT;254*value = regcache_rbtree_get_register(map, rbnode, reg_tmp);255} else {256return -ENOENT;257}258259return 0;260}261262263static int regcache_rbtree_insert_to_block(struct regmap *map,264struct regcache_rbtree_node *rbnode,265unsigned int base_reg,266unsigned int top_reg,267unsigned int reg,268unsigned int value)269{270unsigned int blklen;271unsigned int pos, offset;272unsigned long *present;273u8 *blk;274275blklen = (top_reg - base_reg) / map->reg_stride + 1;276pos = (reg - base_reg) / map->reg_stride;277offset = (rbnode->base_reg - base_reg) / map->reg_stride;278279blk = krealloc_array(rbnode->block, blklen, map->cache_word_size, map->alloc_flags);280if (!blk)281return -ENOMEM;282283rbnode->block = blk;284285if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {286present = krealloc_array(rbnode->cache_present,287BITS_TO_LONGS(blklen), sizeof(*present),288map->alloc_flags);289if (!present)290return -ENOMEM;291292memset(present + BITS_TO_LONGS(rbnode->blklen), 0,293(BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))294* sizeof(*present));295} else {296present = rbnode->cache_present;297}298299/* insert the register value in the correct place in the rbnode block */300if (pos == 0) {301memmove(blk + offset * map->cache_word_size,302blk, rbnode->blklen * map->cache_word_size);303bitmap_shift_left(present, present, offset, blklen);304}305306/* update the rbnode block, its size and the base register */307rbnode->blklen = blklen;308rbnode->base_reg = base_reg;309rbnode->cache_present = present;310311regcache_rbtree_set_register(map, rbnode, pos, value);312return 0;313}314315static struct regcache_rbtree_node *316regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)317{318struct regcache_rbtree_node *rbnode;319const struct regmap_range *range;320int i;321322rbnode = kzalloc(sizeof(*rbnode), map->alloc_flags);323if (!rbnode)324return NULL;325326/* If there is a read table then use it to guess at an allocation */327if (map->rd_table) {328for (i = 0; i < map->rd_table->n_yes_ranges; i++) {329if (regmap_reg_in_range(reg,330&map->rd_table->yes_ranges[i]))331break;332}333334if (i != map->rd_table->n_yes_ranges) {335range = &map->rd_table->yes_ranges[i];336rbnode->blklen = (range->range_max - range->range_min) /337map->reg_stride + 1;338rbnode->base_reg = range->range_min;339}340}341342if (!rbnode->blklen) {343rbnode->blklen = 1;344rbnode->base_reg = reg;345}346347rbnode->block = kmalloc_array(rbnode->blklen, map->cache_word_size,348map->alloc_flags);349if (!rbnode->block)350goto err_free;351352rbnode->cache_present = kcalloc(BITS_TO_LONGS(rbnode->blklen),353sizeof(*rbnode->cache_present),354map->alloc_flags);355if (!rbnode->cache_present)356goto err_free_block;357358return rbnode;359360err_free_block:361kfree(rbnode->block);362err_free:363kfree(rbnode);364return NULL;365}366367static int regcache_rbtree_write(struct regmap *map, unsigned int reg,368unsigned int value)369{370struct regcache_rbtree_ctx *rbtree_ctx;371struct regcache_rbtree_node *rbnode, *rbnode_tmp;372struct rb_node *node;373unsigned int reg_tmp;374int ret;375376rbtree_ctx = map->cache;377378/* if we can't locate it in the cached rbnode we'll have379* to traverse the rbtree looking for it.380*/381rbnode = regcache_rbtree_lookup(map, reg);382if (rbnode) {383reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;384regcache_rbtree_set_register(map, rbnode, reg_tmp, value);385} else {386unsigned int base_reg, top_reg;387unsigned int new_base_reg, new_top_reg;388unsigned int min, max;389unsigned int max_dist;390unsigned int dist, best_dist = UINT_MAX;391392max_dist = map->reg_stride * sizeof(*rbnode_tmp) /393map->cache_word_size;394if (reg < max_dist)395min = 0;396else397min = reg - max_dist;398max = reg + max_dist;399400/* look for an adjacent register to the one we are about to add */401node = rbtree_ctx->root.rb_node;402while (node) {403rbnode_tmp = rb_entry(node, struct regcache_rbtree_node,404node);405406regcache_rbtree_get_base_top_reg(map, rbnode_tmp,407&base_reg, &top_reg);408409if (base_reg <= max && top_reg >= min) {410if (reg < base_reg)411dist = base_reg - reg;412else if (reg > top_reg)413dist = reg - top_reg;414else415dist = 0;416if (dist < best_dist) {417rbnode = rbnode_tmp;418best_dist = dist;419new_base_reg = min(reg, base_reg);420new_top_reg = max(reg, top_reg);421}422}423424/*425* Keep looking, we want to choose the closest block,426* otherwise we might end up creating overlapping427* blocks, which breaks the rbtree.428*/429if (reg < base_reg)430node = node->rb_left;431else if (reg > top_reg)432node = node->rb_right;433else434break;435}436437if (rbnode) {438ret = regcache_rbtree_insert_to_block(map, rbnode,439new_base_reg,440new_top_reg, reg,441value);442if (ret)443return ret;444rbtree_ctx->cached_rbnode = rbnode;445return 0;446}447448/* We did not manage to find a place to insert it in449* an existing block so create a new rbnode.450*/451rbnode = regcache_rbtree_node_alloc(map, reg);452if (!rbnode)453return -ENOMEM;454regcache_rbtree_set_register(map, rbnode,455(reg - rbnode->base_reg) / map->reg_stride,456value);457regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);458rbtree_ctx->cached_rbnode = rbnode;459}460461return 0;462}463464static int regcache_rbtree_sync(struct regmap *map, unsigned int min,465unsigned int max)466{467struct regcache_rbtree_ctx *rbtree_ctx;468struct rb_node *node;469struct regcache_rbtree_node *rbnode;470unsigned int base_reg, top_reg;471unsigned int start, end;472int ret;473474map->async = true;475476rbtree_ctx = map->cache;477for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {478rbnode = rb_entry(node, struct regcache_rbtree_node, node);479480regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,481&top_reg);482if (base_reg > max)483break;484if (top_reg < min)485continue;486487if (min > base_reg)488start = (min - base_reg) / map->reg_stride;489else490start = 0;491492if (max < top_reg)493end = (max - base_reg) / map->reg_stride + 1;494else495end = rbnode->blklen;496497ret = regcache_sync_block(map, rbnode->block,498rbnode->cache_present,499rbnode->base_reg, start, end);500if (ret != 0)501return ret;502}503504map->async = false;505506return regmap_async_complete(map);507}508509static int regcache_rbtree_drop(struct regmap *map, unsigned int min,510unsigned int max)511{512struct regcache_rbtree_ctx *rbtree_ctx;513struct regcache_rbtree_node *rbnode;514struct rb_node *node;515unsigned int base_reg, top_reg;516unsigned int start, end;517518rbtree_ctx = map->cache;519for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {520rbnode = rb_entry(node, struct regcache_rbtree_node, node);521522regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,523&top_reg);524if (base_reg > max)525break;526if (top_reg < min)527continue;528529if (min > base_reg)530start = (min - base_reg) / map->reg_stride;531else532start = 0;533534if (max < top_reg)535end = (max - base_reg) / map->reg_stride + 1;536else537end = rbnode->blklen;538539bitmap_clear(rbnode->cache_present, start, end - start);540}541542return 0;543}544545struct regcache_ops regcache_rbtree_ops = {546.type = REGCACHE_RBTREE,547.name = "rbtree",548.init = regcache_rbtree_init,549.exit = regcache_rbtree_exit,550.populate = regcache_rbtree_populate,551#ifdef CONFIG_DEBUG_FS552.debugfs_init = rbtree_debugfs_init,553#endif554.read = regcache_rbtree_read,555.write = regcache_rbtree_write,556.sync = regcache_rbtree_sync,557.drop = regcache_rbtree_drop,558};559560561