2019-04-26 02:06:18 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
//
|
|
|
|
// Register cache access API - rbtree caching support
|
|
|
|
//
|
|
|
|
// Copyright 2011 Wolfson Microelectronics plc
|
|
|
|
//
|
|
|
|
// Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
|
2011-09-19 21:34:02 +08:00
|
|
|
|
2011-11-22 03:44:44 +08:00
|
|
|
#include <linux/debugfs.h>
|
2014-10-09 17:02:52 +08:00
|
|
|
#include <linux/device.h>
|
2011-09-19 21:34:02 +08:00
|
|
|
#include <linux/rbtree.h>
|
2011-11-22 03:44:44 +08:00
|
|
|
#include <linux/seq_file.h>
|
2014-10-09 17:02:52 +08:00
|
|
|
#include <linux/slab.h>
|
2011-09-19 21:34:02 +08:00
|
|
|
|
|
|
|
#include "internal.h"
|
|
|
|
|
|
|
|
static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
|
|
|
|
unsigned int value);
|
2011-11-15 20:34:40 +08:00
|
|
|
static int regcache_rbtree_exit(struct regmap *map);
|
2011-09-19 21:34:02 +08:00
|
|
|
|
|
|
|
struct regcache_rbtree_node {
|
|
|
|
/* block of adjacent registers */
|
|
|
|
void *block;
|
2013-08-29 16:26:34 +08:00
|
|
|
/* Which registers are present */
|
|
|
|
long *cache_present;
|
2014-04-02 04:26:48 +08:00
|
|
|
/* base register handled by this block */
|
|
|
|
unsigned int base_reg;
|
2011-09-19 21:34:02 +08:00
|
|
|
/* number of registers available in the block */
|
|
|
|
unsigned int blklen;
|
2014-04-02 04:26:48 +08:00
|
|
|
/* the actual rbtree node holding this block */
|
|
|
|
struct rb_node node;
|
2019-01-25 02:06:24 +08:00
|
|
|
};
|
2011-09-19 21:34:02 +08:00
|
|
|
|
|
|
|
struct regcache_rbtree_ctx {
|
|
|
|
struct rb_root root;
|
|
|
|
struct regcache_rbtree_node *cached_rbnode;
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline void regcache_rbtree_get_base_top_reg(
|
2012-04-10 03:40:24 +08:00
|
|
|
struct regmap *map,
|
2011-09-19 21:34:02 +08:00
|
|
|
struct regcache_rbtree_node *rbnode,
|
|
|
|
unsigned int *base, unsigned int *top)
|
|
|
|
{
|
|
|
|
*base = rbnode->base_reg;
|
2012-04-10 03:40:24 +08:00
|
|
|
*top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride);
|
2011-09-19 21:34:02 +08:00
|
|
|
}
|
|
|
|
|
2013-02-22 02:03:13 +08:00
|
|
|
static unsigned int regcache_rbtree_get_register(struct regmap *map,
|
|
|
|
struct regcache_rbtree_node *rbnode, unsigned int idx)
|
2011-09-19 21:34:02 +08:00
|
|
|
{
|
2013-02-22 02:03:13 +08:00
|
|
|
return regcache_get_val(map, rbnode->block, idx);
|
2011-09-19 21:34:02 +08:00
|
|
|
}
|
|
|
|
|
2013-02-22 02:03:13 +08:00
|
|
|
static void regcache_rbtree_set_register(struct regmap *map,
|
|
|
|
struct regcache_rbtree_node *rbnode,
|
|
|
|
unsigned int idx, unsigned int val)
|
2011-09-19 21:34:02 +08:00
|
|
|
{
|
2013-08-29 16:26:34 +08:00
|
|
|
set_bit(idx, rbnode->cache_present);
|
2013-02-22 02:03:13 +08:00
|
|
|
regcache_set_val(map, rbnode->block, idx, val);
|
2011-09-19 21:34:02 +08:00
|
|
|
}
|
|
|
|
|
2011-09-28 02:15:38 +08:00
|
|
|
static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
|
2013-02-22 02:03:13 +08:00
|
|
|
unsigned int reg)
|
2011-09-19 21:34:02 +08:00
|
|
|
{
|
2011-09-28 02:15:38 +08:00
|
|
|
struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
|
2011-09-19 21:34:02 +08:00
|
|
|
struct rb_node *node;
|
|
|
|
struct regcache_rbtree_node *rbnode;
|
|
|
|
unsigned int base_reg, top_reg;
|
|
|
|
|
2011-09-28 02:15:38 +08:00
|
|
|
rbnode = rbtree_ctx->cached_rbnode;
|
|
|
|
if (rbnode) {
|
2012-04-10 03:40:24 +08:00
|
|
|
regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
|
|
|
|
&top_reg);
|
2011-09-28 02:15:38 +08:00
|
|
|
if (reg >= base_reg && reg <= top_reg)
|
|
|
|
return rbnode;
|
|
|
|
}
|
|
|
|
|
|
|
|
node = rbtree_ctx->root.rb_node;
|
2011-09-19 21:34:02 +08:00
|
|
|
while (node) {
|
2016-12-19 22:40:25 +08:00
|
|
|
rbnode = rb_entry(node, struct regcache_rbtree_node, node);
|
2012-04-10 03:40:24 +08:00
|
|
|
regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
|
|
|
|
&top_reg);
|
2011-09-28 02:15:38 +08:00
|
|
|
if (reg >= base_reg && reg <= top_reg) {
|
|
|
|
rbtree_ctx->cached_rbnode = rbnode;
|
2011-09-19 21:34:02 +08:00
|
|
|
return rbnode;
|
2011-09-28 02:15:38 +08:00
|
|
|
} else if (reg > top_reg) {
|
2011-09-19 21:34:02 +08:00
|
|
|
node = node->rb_right;
|
2011-09-28 02:15:38 +08:00
|
|
|
} else if (reg < base_reg) {
|
2011-09-19 21:34:02 +08:00
|
|
|
node = node->rb_left;
|
2011-09-28 02:15:38 +08:00
|
|
|
}
|
2011-09-19 21:34:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2012-04-10 03:40:24 +08:00
|
|
|
static int regcache_rbtree_insert(struct regmap *map, struct rb_root *root,
|
2011-09-19 21:34:02 +08:00
|
|
|
struct regcache_rbtree_node *rbnode)
|
|
|
|
{
|
|
|
|
struct rb_node **new, *parent;
|
|
|
|
struct regcache_rbtree_node *rbnode_tmp;
|
|
|
|
unsigned int base_reg_tmp, top_reg_tmp;
|
|
|
|
unsigned int base_reg;
|
|
|
|
|
|
|
|
parent = NULL;
|
|
|
|
new = &root->rb_node;
|
|
|
|
while (*new) {
|
2016-12-19 22:40:25 +08:00
|
|
|
rbnode_tmp = rb_entry(*new, struct regcache_rbtree_node, node);
|
2011-09-19 21:34:02 +08:00
|
|
|
/* base and top registers of the current rbnode */
|
2012-04-10 03:40:24 +08:00
|
|
|
regcache_rbtree_get_base_top_reg(map, rbnode_tmp, &base_reg_tmp,
|
2011-09-19 21:34:02 +08:00
|
|
|
&top_reg_tmp);
|
|
|
|
/* base register of the rbnode to be added */
|
|
|
|
base_reg = rbnode->base_reg;
|
|
|
|
parent = *new;
|
|
|
|
/* if this register has already been inserted, just return */
|
|
|
|
if (base_reg >= base_reg_tmp &&
|
|
|
|
base_reg <= top_reg_tmp)
|
|
|
|
return 0;
|
|
|
|
else if (base_reg > top_reg_tmp)
|
|
|
|
new = &((*new)->rb_right);
|
|
|
|
else if (base_reg < base_reg_tmp)
|
|
|
|
new = &((*new)->rb_left);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* insert the node into the rbtree */
|
|
|
|
rb_link_node(&rbnode->node, parent, new);
|
|
|
|
rb_insert_color(&rbnode->node, root);
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2011-11-22 03:44:44 +08:00
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
|
|
static int rbtree_show(struct seq_file *s, void *ignored)
|
|
|
|
{
|
|
|
|
struct regmap *map = s->private;
|
|
|
|
struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
|
|
|
|
struct regcache_rbtree_node *n;
|
|
|
|
struct rb_node *node;
|
|
|
|
unsigned int base, top;
|
2013-03-13 01:26:49 +08:00
|
|
|
size_t mem_size;
|
2011-11-22 03:44:44 +08:00
|
|
|
int nodes = 0;
|
|
|
|
int registers = 0;
|
2012-04-10 03:40:24 +08:00
|
|
|
int this_registers, average;
|
2011-11-22 03:44:44 +08:00
|
|
|
|
2013-05-23 23:23:49 +08:00
|
|
|
map->lock(map->lock_arg);
|
2011-11-22 03:44:44 +08:00
|
|
|
|
2013-03-13 01:26:49 +08:00
|
|
|
mem_size = sizeof(*rbtree_ctx);
|
|
|
|
|
2011-11-22 03:44:44 +08:00
|
|
|
for (node = rb_first(&rbtree_ctx->root); node != NULL;
|
|
|
|
node = rb_next(node)) {
|
2016-12-19 22:40:25 +08:00
|
|
|
n = rb_entry(node, struct regcache_rbtree_node, node);
|
2013-03-13 01:26:49 +08:00
|
|
|
mem_size += sizeof(*n);
|
|
|
|
mem_size += (n->blklen * map->cache_word_size);
|
2013-08-29 16:26:34 +08:00
|
|
|
mem_size += BITS_TO_LONGS(n->blklen) * sizeof(long);
|
2011-11-22 03:44:44 +08:00
|
|
|
|
2012-04-10 03:40:24 +08:00
|
|
|
regcache_rbtree_get_base_top_reg(map, n, &base, &top);
|
|
|
|
this_registers = ((top - base) / map->reg_stride) + 1;
|
|
|
|
seq_printf(s, "%x-%x (%d)\n", base, top, this_registers);
|
2011-11-22 03:44:44 +08:00
|
|
|
|
|
|
|
nodes++;
|
2012-04-10 03:40:24 +08:00
|
|
|
registers += this_registers;
|
2011-11-22 03:44:44 +08:00
|
|
|
}
|
|
|
|
|
2012-04-05 05:48:33 +08:00
|
|
|
if (nodes)
|
|
|
|
average = registers / nodes;
|
|
|
|
else
|
|
|
|
average = 0;
|
|
|
|
|
2013-03-13 01:26:49 +08:00
|
|
|
seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
|
|
|
|
nodes, registers, average, mem_size);
|
2011-11-22 03:44:44 +08:00
|
|
|
|
2013-05-23 23:23:49 +08:00
|
|
|
map->unlock(map->lock_arg);
|
2011-11-22 03:44:44 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-12-15 16:38:37 +08:00
|
|
|
DEFINE_SHOW_ATTRIBUTE(rbtree);
|
2011-11-22 19:33:31 +08:00
|
|
|
|
|
|
|
static void rbtree_debugfs_init(struct regmap *map)
|
|
|
|
{
|
|
|
|
debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops);
|
|
|
|
}
|
2011-11-22 03:44:44 +08:00
|
|
|
#endif
|
|
|
|
|
2011-09-19 21:34:02 +08:00
|
|
|
static int regcache_rbtree_init(struct regmap *map)
|
|
|
|
{
|
|
|
|
struct regcache_rbtree_ctx *rbtree_ctx;
|
|
|
|
int i;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
|
|
|
|
if (!map->cache)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
rbtree_ctx = map->cache;
|
|
|
|
rbtree_ctx->root = RB_ROOT;
|
|
|
|
rbtree_ctx->cached_rbnode = NULL;
|
|
|
|
|
|
|
|
for (i = 0; i < map->num_reg_defaults; i++) {
|
|
|
|
ret = regcache_rbtree_write(map,
|
|
|
|
map->reg_defaults[i].reg,
|
|
|
|
map->reg_defaults[i].def);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
2011-11-15 20:34:40 +08:00
|
|
|
regcache_rbtree_exit(map);
|
2011-09-19 21:34:02 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int regcache_rbtree_exit(struct regmap *map)
|
|
|
|
{
|
|
|
|
struct rb_node *next;
|
|
|
|
struct regcache_rbtree_ctx *rbtree_ctx;
|
|
|
|
struct regcache_rbtree_node *rbtree_node;
|
|
|
|
|
|
|
|
/* if we've already been called then just return */
|
|
|
|
rbtree_ctx = map->cache;
|
|
|
|
if (!rbtree_ctx)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* free up the rbtree */
|
|
|
|
next = rb_first(&rbtree_ctx->root);
|
|
|
|
while (next) {
|
|
|
|
rbtree_node = rb_entry(next, struct regcache_rbtree_node, node);
|
|
|
|
next = rb_next(&rbtree_node->node);
|
|
|
|
rb_erase(&rbtree_node->node, &rbtree_ctx->root);
|
2013-08-29 16:26:34 +08:00
|
|
|
kfree(rbtree_node->cache_present);
|
2011-09-19 21:34:02 +08:00
|
|
|
kfree(rbtree_node->block);
|
|
|
|
kfree(rbtree_node);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* release the resources */
|
|
|
|
kfree(map->cache);
|
|
|
|
map->cache = NULL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int regcache_rbtree_read(struct regmap *map,
|
|
|
|
unsigned int reg, unsigned int *value)
|
|
|
|
{
|
|
|
|
struct regcache_rbtree_node *rbnode;
|
|
|
|
unsigned int reg_tmp;
|
|
|
|
|
2011-09-28 02:15:38 +08:00
|
|
|
rbnode = regcache_rbtree_lookup(map, reg);
|
2011-09-19 21:34:02 +08:00
|
|
|
if (rbnode) {
|
2012-04-10 03:40:24 +08:00
|
|
|
reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
|
2013-08-29 16:26:34 +08:00
|
|
|
if (!test_bit(reg_tmp, rbnode->cache_present))
|
2013-03-15 22:54:35 +08:00
|
|
|
return -ENOENT;
|
2013-02-22 02:03:13 +08:00
|
|
|
*value = regcache_rbtree_get_register(map, rbnode, reg_tmp);
|
2011-09-19 21:34:02 +08:00
|
|
|
} else {
|
2011-10-09 20:23:31 +08:00
|
|
|
return -ENOENT;
|
2011-09-19 21:34:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-02-22 02:03:13 +08:00
|
|
|
static int regcache_rbtree_insert_to_block(struct regmap *map,
|
|
|
|
struct regcache_rbtree_node *rbnode,
|
2013-08-29 16:26:33 +08:00
|
|
|
unsigned int base_reg,
|
|
|
|
unsigned int top_reg,
|
|
|
|
unsigned int reg,
|
2013-02-22 02:03:13 +08:00
|
|
|
unsigned int value)
|
2011-09-19 21:34:02 +08:00
|
|
|
{
|
2013-08-29 16:26:33 +08:00
|
|
|
unsigned int blklen;
|
|
|
|
unsigned int pos, offset;
|
2013-08-29 16:26:34 +08:00
|
|
|
unsigned long *present;
|
2011-09-19 21:34:02 +08:00
|
|
|
u8 *blk;
|
|
|
|
|
2013-08-29 16:26:33 +08:00
|
|
|
blklen = (top_reg - base_reg) / map->reg_stride + 1;
|
|
|
|
pos = (reg - base_reg) / map->reg_stride;
|
|
|
|
offset = (rbnode->base_reg - base_reg) / map->reg_stride;
|
|
|
|
|
2011-09-19 21:34:02 +08:00
|
|
|
blk = krealloc(rbnode->block,
|
2013-08-29 16:26:33 +08:00
|
|
|
blklen * map->cache_word_size,
|
2013-02-22 02:03:13 +08:00
|
|
|
GFP_KERNEL);
|
2011-09-19 21:34:02 +08:00
|
|
|
if (!blk)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2021-10-12 10:37:35 +08:00
|
|
|
rbnode->block = blk;
|
|
|
|
|
2015-07-27 12:34:50 +08:00
|
|
|
if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
|
|
|
|
present = krealloc(rbnode->cache_present,
|
|
|
|
BITS_TO_LONGS(blklen) * sizeof(*present),
|
|
|
|
GFP_KERNEL);
|
2021-10-12 10:37:35 +08:00
|
|
|
if (!present)
|
2015-07-27 12:34:50 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
|
|
|
|
(BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
|
|
|
|
* sizeof(*present));
|
|
|
|
} else {
|
|
|
|
present = rbnode->cache_present;
|
2013-08-29 16:26:34 +08:00
|
|
|
}
|
|
|
|
|
2011-09-19 21:34:02 +08:00
|
|
|
/* insert the register value in the correct place in the rbnode block */
|
2013-08-29 16:26:34 +08:00
|
|
|
if (pos == 0) {
|
2013-08-29 16:26:33 +08:00
|
|
|
memmove(blk + offset * map->cache_word_size,
|
|
|
|
blk, rbnode->blklen * map->cache_word_size);
|
2015-03-08 00:10:01 +08:00
|
|
|
bitmap_shift_left(present, present, offset, blklen);
|
2013-08-29 16:26:34 +08:00
|
|
|
}
|
2011-09-19 21:34:02 +08:00
|
|
|
|
|
|
|
/* update the rbnode block, its size and the base register */
|
2013-08-29 16:26:33 +08:00
|
|
|
rbnode->blklen = blklen;
|
|
|
|
rbnode->base_reg = base_reg;
|
2013-08-29 16:26:34 +08:00
|
|
|
rbnode->cache_present = present;
|
2011-09-19 21:34:02 +08:00
|
|
|
|
2013-02-22 02:03:13 +08:00
|
|
|
regcache_rbtree_set_register(map, rbnode, pos, value);
|
2011-09-19 21:34:02 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-05-08 20:55:24 +08:00
|
|
|
static struct regcache_rbtree_node *
|
|
|
|
regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
|
|
|
|
{
|
|
|
|
struct regcache_rbtree_node *rbnode;
|
2013-05-08 20:55:25 +08:00
|
|
|
const struct regmap_range *range;
|
|
|
|
int i;
|
2013-05-08 20:55:24 +08:00
|
|
|
|
|
|
|
rbnode = kzalloc(sizeof(*rbnode), GFP_KERNEL);
|
|
|
|
if (!rbnode)
|
|
|
|
return NULL;
|
|
|
|
|
2013-05-08 20:55:25 +08:00
|
|
|
/* If there is a read table then use it to guess at an allocation */
|
|
|
|
if (map->rd_table) {
|
|
|
|
for (i = 0; i < map->rd_table->n_yes_ranges; i++) {
|
|
|
|
if (regmap_reg_in_range(reg,
|
|
|
|
&map->rd_table->yes_ranges[i]))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (i != map->rd_table->n_yes_ranges) {
|
|
|
|
range = &map->rd_table->yes_ranges[i];
|
2013-08-27 19:03:03 +08:00
|
|
|
rbnode->blklen = (range->range_max - range->range_min) /
|
|
|
|
map->reg_stride + 1;
|
2013-05-08 20:55:25 +08:00
|
|
|
rbnode->base_reg = range->range_min;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!rbnode->blklen) {
|
2013-08-21 23:37:22 +08:00
|
|
|
rbnode->blklen = 1;
|
2013-05-08 20:55:25 +08:00
|
|
|
rbnode->base_reg = reg;
|
|
|
|
}
|
|
|
|
|
2015-11-20 18:06:30 +08:00
|
|
|
rbnode->block = kmalloc_array(rbnode->blklen, map->cache_word_size,
|
|
|
|
GFP_KERNEL);
|
2013-08-29 16:26:34 +08:00
|
|
|
if (!rbnode->block)
|
|
|
|
goto err_free;
|
|
|
|
|
2015-11-20 18:06:29 +08:00
|
|
|
rbnode->cache_present = kcalloc(BITS_TO_LONGS(rbnode->blklen),
|
|
|
|
sizeof(*rbnode->cache_present),
|
|
|
|
GFP_KERNEL);
|
2013-08-29 16:26:34 +08:00
|
|
|
if (!rbnode->cache_present)
|
|
|
|
goto err_free_block;
|
2013-05-08 20:55:24 +08:00
|
|
|
|
|
|
|
return rbnode;
|
2013-08-29 16:26:34 +08:00
|
|
|
|
|
|
|
err_free_block:
|
|
|
|
kfree(rbnode->block);
|
|
|
|
err_free:
|
|
|
|
kfree(rbnode);
|
|
|
|
return NULL;
|
2013-05-08 20:55:24 +08:00
|
|
|
}
|
|
|
|
|
2011-09-19 21:34:02 +08:00
|
|
|
static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
|
|
|
|
unsigned int value)
|
|
|
|
{
|
|
|
|
struct regcache_rbtree_ctx *rbtree_ctx;
|
|
|
|
struct regcache_rbtree_node *rbnode, *rbnode_tmp;
|
|
|
|
struct rb_node *node;
|
|
|
|
unsigned int reg_tmp;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
rbtree_ctx = map->cache;
|
2013-03-15 22:54:35 +08:00
|
|
|
|
2011-09-19 21:34:02 +08:00
|
|
|
/* if we can't locate it in the cached rbnode we'll have
|
|
|
|
* to traverse the rbtree looking for it.
|
|
|
|
*/
|
2011-09-28 02:15:38 +08:00
|
|
|
rbnode = regcache_rbtree_lookup(map, reg);
|
2011-09-19 21:34:02 +08:00
|
|
|
if (rbnode) {
|
2012-04-10 03:40:24 +08:00
|
|
|
reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
|
2013-02-22 02:03:13 +08:00
|
|
|
regcache_rbtree_set_register(map, rbnode, reg_tmp, value);
|
2011-09-19 21:34:02 +08:00
|
|
|
} else {
|
2013-08-29 16:26:33 +08:00
|
|
|
unsigned int base_reg, top_reg;
|
|
|
|
unsigned int new_base_reg, new_top_reg;
|
|
|
|
unsigned int min, max;
|
|
|
|
unsigned int max_dist;
|
regmap: rbtree: Avoid overlapping nodes
When searching for a suitable node that should be used for inserting a new
register, which does not fall within the range of any existing node, we not
only looks for nodes which are directly adjacent to the new register, but
for nodes within a certain proximity. This is done to avoid creating lots
of small nodes with just a few registers spacing in between, which would
increase memory usage as well as tree traversal time.
This means there might be multiple node candidates which fall within the
proximity range of the new register. If we choose the first node we
encounter, under certain register insertion patterns it is possible to end
up with overlapping ranges. This will break order in the rbtree and can
cause the cached register value to become corrupted.
E.g. take the simplified example where the proximity range is 2 and the
register insertion sequence is 1, 4, 2, 3, 5.
* Insert of register 1 creates a new node, this is the root of the rbtree
* Insert of register 4 creates a new node, which is inserted to the right
of the root.
* Insert of register 2 gets inserted to the first node
* Insert of register 3 gets inserted to the first node
* Insert of register 5 also gets inserted into the first node since
this is the first node encountered and it is within the proximity range.
Now there are two overlapping nodes.
To avoid this always choose the node that is closest to the new register.
This will ensure that nodes will not overlap. The tree traversal is still
done as a binary search, we just don't stop at the first node found. So the
complexity of the algorithm stays within the same order.
Ideally if a new register is in the range of two adjacent blocks those
blocks should be merged, but that is a much more invasive change and left
for later.
The issue was initially introduced in commit 472fdec7380c ("regmap: rbtree:
Reduce number of nodes, take 2"), but became much more exposed by commit
6399aea629b0 ("regmap: rbtree: When adding a reg do a bsearch for target
node") which changed the order in which nodes are looked-up.
Fixes: 6399aea629b0 ("regmap: rbtree: When adding a reg do a bsearch for target node")
Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
Signed-off-by: Mark Brown <broonie@kernel.org>
2016-08-04 23:22:16 +08:00
|
|
|
unsigned int dist, best_dist = UINT_MAX;
|
2013-08-29 16:26:33 +08:00
|
|
|
|
|
|
|
max_dist = map->reg_stride * sizeof(*rbnode_tmp) /
|
|
|
|
map->cache_word_size;
|
|
|
|
if (reg < max_dist)
|
|
|
|
min = 0;
|
|
|
|
else
|
|
|
|
min = reg - max_dist;
|
|
|
|
max = reg + max_dist;
|
|
|
|
|
2011-09-19 21:34:02 +08:00
|
|
|
/* look for an adjacent register to the one we are about to add */
|
2015-10-21 21:16:14 +08:00
|
|
|
node = rbtree_ctx->root.rb_node;
|
|
|
|
while (node) {
|
2012-04-10 03:40:24 +08:00
|
|
|
rbnode_tmp = rb_entry(node, struct regcache_rbtree_node,
|
|
|
|
node);
|
2013-08-29 16:26:32 +08:00
|
|
|
|
|
|
|
regcache_rbtree_get_base_top_reg(map, rbnode_tmp,
|
|
|
|
&base_reg, &top_reg);
|
|
|
|
|
2013-08-29 16:26:33 +08:00
|
|
|
if (base_reg <= max && top_reg >= min) {
|
regmap: rbtree: Avoid overlapping nodes
When searching for a suitable node that should be used for inserting a new
register, which does not fall within the range of any existing node, we not
only looks for nodes which are directly adjacent to the new register, but
for nodes within a certain proximity. This is done to avoid creating lots
of small nodes with just a few registers spacing in between, which would
increase memory usage as well as tree traversal time.
This means there might be multiple node candidates which fall within the
proximity range of the new register. If we choose the first node we
encounter, under certain register insertion patterns it is possible to end
up with overlapping ranges. This will break order in the rbtree and can
cause the cached register value to become corrupted.
E.g. take the simplified example where the proximity range is 2 and the
register insertion sequence is 1, 4, 2, 3, 5.
* Insert of register 1 creates a new node, this is the root of the rbtree
* Insert of register 4 creates a new node, which is inserted to the right
of the root.
* Insert of register 2 gets inserted to the first node
* Insert of register 3 gets inserted to the first node
* Insert of register 5 also gets inserted into the first node since
this is the first node encountered and it is within the proximity range.
Now there are two overlapping nodes.
To avoid this always choose the node that is closest to the new register.
This will ensure that nodes will not overlap. The tree traversal is still
done as a binary search, we just don't stop at the first node found. So the
complexity of the algorithm stays within the same order.
Ideally if a new register is in the range of two adjacent blocks those
blocks should be merged, but that is a much more invasive change and left
for later.
The issue was initially introduced in commit 472fdec7380c ("regmap: rbtree:
Reduce number of nodes, take 2"), but became much more exposed by commit
6399aea629b0 ("regmap: rbtree: When adding a reg do a bsearch for target
node") which changed the order in which nodes are looked-up.
Fixes: 6399aea629b0 ("regmap: rbtree: When adding a reg do a bsearch for target node")
Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
Signed-off-by: Mark Brown <broonie@kernel.org>
2016-08-04 23:22:16 +08:00
|
|
|
if (reg < base_reg)
|
|
|
|
dist = base_reg - reg;
|
|
|
|
else if (reg > top_reg)
|
|
|
|
dist = reg - top_reg;
|
2015-10-21 21:16:14 +08:00
|
|
|
else
|
regmap: rbtree: Avoid overlapping nodes
When searching for a suitable node that should be used for inserting a new
register, which does not fall within the range of any existing node, we not
only looks for nodes which are directly adjacent to the new register, but
for nodes within a certain proximity. This is done to avoid creating lots
of small nodes with just a few registers spacing in between, which would
increase memory usage as well as tree traversal time.
This means there might be multiple node candidates which fall within the
proximity range of the new register. If we choose the first node we
encounter, under certain register insertion patterns it is possible to end
up with overlapping ranges. This will break order in the rbtree and can
cause the cached register value to become corrupted.
E.g. take the simplified example where the proximity range is 2 and the
register insertion sequence is 1, 4, 2, 3, 5.
* Insert of register 1 creates a new node, this is the root of the rbtree
* Insert of register 4 creates a new node, which is inserted to the right
of the root.
* Insert of register 2 gets inserted to the first node
* Insert of register 3 gets inserted to the first node
* Insert of register 5 also gets inserted into the first node since
this is the first node encountered and it is within the proximity range.
Now there are two overlapping nodes.
To avoid this always choose the node that is closest to the new register.
This will ensure that nodes will not overlap. The tree traversal is still
done as a binary search, we just don't stop at the first node found. So the
complexity of the algorithm stays within the same order.
Ideally if a new register is in the range of two adjacent blocks those
blocks should be merged, but that is a much more invasive change and left
for later.
The issue was initially introduced in commit 472fdec7380c ("regmap: rbtree:
Reduce number of nodes, take 2"), but became much more exposed by commit
6399aea629b0 ("regmap: rbtree: When adding a reg do a bsearch for target
node") which changed the order in which nodes are looked-up.
Fixes: 6399aea629b0 ("regmap: rbtree: When adding a reg do a bsearch for target node")
Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
Signed-off-by: Mark Brown <broonie@kernel.org>
2016-08-04 23:22:16 +08:00
|
|
|
dist = 0;
|
|
|
|
if (dist < best_dist) {
|
|
|
|
rbnode = rbnode_tmp;
|
|
|
|
best_dist = dist;
|
|
|
|
new_base_reg = min(reg, base_reg);
|
|
|
|
new_top_reg = max(reg, top_reg);
|
|
|
|
}
|
2011-09-19 21:34:02 +08:00
|
|
|
}
|
2013-08-29 16:26:32 +08:00
|
|
|
|
regmap: rbtree: Avoid overlapping nodes
When searching for a suitable node that should be used for inserting a new
register, which does not fall within the range of any existing node, we not
only looks for nodes which are directly adjacent to the new register, but
for nodes within a certain proximity. This is done to avoid creating lots
of small nodes with just a few registers spacing in between, which would
increase memory usage as well as tree traversal time.
This means there might be multiple node candidates which fall within the
proximity range of the new register. If we choose the first node we
encounter, under certain register insertion patterns it is possible to end
up with overlapping ranges. This will break order in the rbtree and can
cause the cached register value to become corrupted.
E.g. take the simplified example where the proximity range is 2 and the
register insertion sequence is 1, 4, 2, 3, 5.
* Insert of register 1 creates a new node, this is the root of the rbtree
* Insert of register 4 creates a new node, which is inserted to the right
of the root.
* Insert of register 2 gets inserted to the first node
* Insert of register 3 gets inserted to the first node
* Insert of register 5 also gets inserted into the first node since
this is the first node encountered and it is within the proximity range.
Now there are two overlapping nodes.
To avoid this always choose the node that is closest to the new register.
This will ensure that nodes will not overlap. The tree traversal is still
done as a binary search, we just don't stop at the first node found. So the
complexity of the algorithm stays within the same order.
Ideally if a new register is in the range of two adjacent blocks those
blocks should be merged, but that is a much more invasive change and left
for later.
The issue was initially introduced in commit 472fdec7380c ("regmap: rbtree:
Reduce number of nodes, take 2"), but became much more exposed by commit
6399aea629b0 ("regmap: rbtree: When adding a reg do a bsearch for target
node") which changed the order in which nodes are looked-up.
Fixes: 6399aea629b0 ("regmap: rbtree: When adding a reg do a bsearch for target node")
Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
Signed-off-by: Mark Brown <broonie@kernel.org>
2016-08-04 23:22:16 +08:00
|
|
|
/*
|
|
|
|
* Keep looking, we want to choose the closest block,
|
|
|
|
* otherwise we might end up creating overlapping
|
|
|
|
* blocks, which breaks the rbtree.
|
|
|
|
*/
|
|
|
|
if (reg < base_reg)
|
|
|
|
node = node->rb_left;
|
|
|
|
else if (reg > top_reg)
|
|
|
|
node = node->rb_right;
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rbnode) {
|
|
|
|
ret = regcache_rbtree_insert_to_block(map, rbnode,
|
2013-08-29 16:26:33 +08:00
|
|
|
new_base_reg,
|
|
|
|
new_top_reg, reg,
|
|
|
|
value);
|
2013-08-29 16:26:32 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
regmap: rbtree: Avoid overlapping nodes
When searching for a suitable node that should be used for inserting a new
register, which does not fall within the range of any existing node, we not
only looks for nodes which are directly adjacent to the new register, but
for nodes within a certain proximity. This is done to avoid creating lots
of small nodes with just a few registers spacing in between, which would
increase memory usage as well as tree traversal time.
This means there might be multiple node candidates which fall within the
proximity range of the new register. If we choose the first node we
encounter, under certain register insertion patterns it is possible to end
up with overlapping ranges. This will break order in the rbtree and can
cause the cached register value to become corrupted.
E.g. take the simplified example where the proximity range is 2 and the
register insertion sequence is 1, 4, 2, 3, 5.
* Insert of register 1 creates a new node, this is the root of the rbtree
* Insert of register 4 creates a new node, which is inserted to the right
of the root.
* Insert of register 2 gets inserted to the first node
* Insert of register 3 gets inserted to the first node
* Insert of register 5 also gets inserted into the first node since
this is the first node encountered and it is within the proximity range.
Now there are two overlapping nodes.
To avoid this always choose the node that is closest to the new register.
This will ensure that nodes will not overlap. The tree traversal is still
done as a binary search, we just don't stop at the first node found. So the
complexity of the algorithm stays within the same order.
Ideally if a new register is in the range of two adjacent blocks those
blocks should be merged, but that is a much more invasive change and left
for later.
The issue was initially introduced in commit 472fdec7380c ("regmap: rbtree:
Reduce number of nodes, take 2"), but became much more exposed by commit
6399aea629b0 ("regmap: rbtree: When adding a reg do a bsearch for target
node") which changed the order in which nodes are looked-up.
Fixes: 6399aea629b0 ("regmap: rbtree: When adding a reg do a bsearch for target node")
Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
Signed-off-by: Mark Brown <broonie@kernel.org>
2016-08-04 23:22:16 +08:00
|
|
|
rbtree_ctx->cached_rbnode = rbnode;
|
2013-08-29 16:26:32 +08:00
|
|
|
return 0;
|
2011-09-19 21:34:02 +08:00
|
|
|
}
|
2013-05-08 20:55:24 +08:00
|
|
|
|
|
|
|
/* We did not manage to find a place to insert it in
|
|
|
|
* an existing block so create a new rbnode.
|
2011-09-19 21:34:02 +08:00
|
|
|
*/
|
2013-05-08 20:55:24 +08:00
|
|
|
rbnode = regcache_rbtree_node_alloc(map, reg);
|
2011-09-19 21:34:02 +08:00
|
|
|
if (!rbnode)
|
|
|
|
return -ENOMEM;
|
2013-05-08 20:55:24 +08:00
|
|
|
regcache_rbtree_set_register(map, rbnode,
|
|
|
|
reg - rbnode->base_reg, value);
|
2012-04-10 03:40:24 +08:00
|
|
|
regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
|
2011-09-19 21:34:02 +08:00
|
|
|
rbtree_ctx->cached_rbnode = rbnode;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-02-24 03:31:04 +08:00
|
|
|
static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
|
|
|
|
unsigned int max)
|
2011-09-19 21:34:02 +08:00
|
|
|
{
|
|
|
|
struct regcache_rbtree_ctx *rbtree_ctx;
|
|
|
|
struct rb_node *node;
|
|
|
|
struct regcache_rbtree_node *rbnode;
|
2013-08-27 19:03:03 +08:00
|
|
|
unsigned int base_reg, top_reg;
|
|
|
|
unsigned int start, end;
|
2011-09-19 21:34:02 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
rbtree_ctx = map->cache;
|
|
|
|
for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
|
|
|
|
rbnode = rb_entry(node, struct regcache_rbtree_node, node);
|
2012-02-24 03:31:04 +08:00
|
|
|
|
2013-08-27 19:03:03 +08:00
|
|
|
regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
|
|
|
|
&top_reg);
|
|
|
|
if (base_reg > max)
|
2012-02-24 03:31:04 +08:00
|
|
|
break;
|
2013-08-27 19:03:03 +08:00
|
|
|
if (top_reg < min)
|
2012-02-24 03:31:04 +08:00
|
|
|
continue;
|
|
|
|
|
2013-08-27 19:03:03 +08:00
|
|
|
if (min > base_reg)
|
|
|
|
start = (min - base_reg) / map->reg_stride;
|
2012-02-24 03:31:04 +08:00
|
|
|
else
|
2013-08-27 19:03:03 +08:00
|
|
|
start = 0;
|
2012-02-24 03:31:04 +08:00
|
|
|
|
2013-08-27 19:03:03 +08:00
|
|
|
if (max < top_reg)
|
|
|
|
end = (max - base_reg) / map->reg_stride + 1;
|
2012-02-24 03:31:04 +08:00
|
|
|
else
|
|
|
|
end = rbnode->blklen;
|
|
|
|
|
2013-08-29 16:26:34 +08:00
|
|
|
ret = regcache_sync_block(map, rbnode->block,
|
|
|
|
rbnode->cache_present,
|
|
|
|
rbnode->base_reg, start, end);
|
2013-03-30 03:32:28 +08:00
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
2011-09-19 21:34:02 +08:00
|
|
|
}
|
|
|
|
|
2013-03-30 03:32:28 +08:00
|
|
|
return regmap_async_complete(map);
|
2011-09-19 21:34:02 +08:00
|
|
|
}
|
|
|
|
|
2013-08-29 16:26:34 +08:00
|
|
|
static int regcache_rbtree_drop(struct regmap *map, unsigned int min,
|
|
|
|
unsigned int max)
|
|
|
|
{
|
|
|
|
struct regcache_rbtree_ctx *rbtree_ctx;
|
|
|
|
struct regcache_rbtree_node *rbnode;
|
|
|
|
struct rb_node *node;
|
|
|
|
unsigned int base_reg, top_reg;
|
|
|
|
unsigned int start, end;
|
|
|
|
|
|
|
|
rbtree_ctx = map->cache;
|
|
|
|
for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
|
|
|
|
rbnode = rb_entry(node, struct regcache_rbtree_node, node);
|
|
|
|
|
|
|
|
regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
|
|
|
|
&top_reg);
|
|
|
|
if (base_reg > max)
|
|
|
|
break;
|
|
|
|
if (top_reg < min)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (min > base_reg)
|
|
|
|
start = (min - base_reg) / map->reg_stride;
|
|
|
|
else
|
|
|
|
start = 0;
|
|
|
|
|
|
|
|
if (max < top_reg)
|
|
|
|
end = (max - base_reg) / map->reg_stride + 1;
|
|
|
|
else
|
|
|
|
end = rbnode->blklen;
|
|
|
|
|
|
|
|
bitmap_clear(rbnode->cache_present, start, end - start);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-09-19 21:34:02 +08:00
|
|
|
struct regcache_ops regcache_rbtree_ops = {
|
|
|
|
.type = REGCACHE_RBTREE,
|
|
|
|
.name = "rbtree",
|
|
|
|
.init = regcache_rbtree_init,
|
|
|
|
.exit = regcache_rbtree_exit,
|
2014-08-24 21:32:27 +08:00
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
|
|
.debugfs_init = rbtree_debugfs_init,
|
|
|
|
#endif
|
2011-09-19 21:34:02 +08:00
|
|
|
.read = regcache_rbtree_read,
|
|
|
|
.write = regcache_rbtree_write,
|
2013-08-29 16:26:34 +08:00
|
|
|
.sync = regcache_rbtree_sync,
|
|
|
|
.drop = regcache_rbtree_drop,
|
2011-09-19 21:34:02 +08:00
|
|
|
};
|