From: Mark Brown Date: Mon, 10 Dec 2012 16:24:29 +0000 (+0900) Subject: regmap: debugfs: Cache offsets of valid regions for dump X-Git-Tag: v5.15~21114^2~4^2 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=5166b7c006eeb4f6becc0822974d8da259484ba1;p=platform%2Fkernel%2Flinux-starfive.git regmap: debugfs: Cache offsets of valid regions for dump Avoid doing a linear scan of the entire register map for each read() of the debugfs register dump by recording the offsets where valid registers exist when we first read the registers file. This assumes the set of valid registers never changes, if this is not the case invalidation of the cache will be required. This could be further improved for large blocks of contiguous registers by calculating the register we will read from within the block - currently we do a linear scan of the block. An rbtree may also be worthwhile. Signed-off-by: Mark Brown --- diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h index 1abcd27..9c3b0e7 100644 --- a/drivers/base/regmap/internal.h +++ b/drivers/base/regmap/internal.h @@ -15,10 +15,18 @@ #include #include +#include struct regmap; struct regcache_ops; +struct regmap_debugfs_off_cache { + struct list_head list; + off_t min; + off_t max; + unsigned int base_reg; +}; + struct regmap_format { size_t buf_size; size_t reg_bytes; @@ -54,6 +62,8 @@ struct regmap { unsigned int debugfs_reg_len; unsigned int debugfs_val_len; unsigned int debugfs_tot_len; + + struct list_head debugfs_off_cache; #endif unsigned int max_register; diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index 749a1dc..07aad78 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -65,25 +65,53 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map, loff_t from, loff_t *pos) { - loff_t p = *pos; - unsigned int i; - - for (i = base; i <= map->max_register; i += map->reg_stride) { - if (!regmap_readable(map, i)) - continue; - - if (regmap_precious(map, i)) - continue; + struct regmap_debugfs_off_cache *c = NULL; + loff_t p = 0; + unsigned int i, ret; + + /* + * If we don't have a cache build one so we don't have to do a + * linear scan each time. + */ + if (list_empty(&map->debugfs_off_cache)) { + for (i = base; i <= map->max_register; i += map->reg_stride) { + /* Skip unprinted registers, closing off cache entry */ + if (!regmap_readable(map, i) || + regmap_precious(map, i)) { + if (c) { + c->max = p - 1; + list_add_tail(&c->list, + &map->debugfs_off_cache); + c = NULL; + } + + continue; + } + + /* No cache entry? Start a new one */ + if (!c) { + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) + break; + c->min = p; + c->base_reg = i; + } + + p += map->debugfs_tot_len; + } + } - if (i >= from) { - *pos = p; - return i; + /* Find the relevant block */ + list_for_each_entry(c, &map->debugfs_off_cache, list) { + if (*pos >= c->min && *pos <= c->max) { + *pos = c->min; + return c->base_reg; } - p += map->debugfs_tot_len; + ret = c->max; } - return base; + return ret; } static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from, @@ -309,6 +337,8 @@ void regmap_debugfs_init(struct regmap *map, const char *name) struct rb_node *next; struct regmap_range_node *range_node; + INIT_LIST_HEAD(&map->debugfs_off_cache); + if (name) { map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s", dev_name(map->dev), name); @@ -357,7 +387,16 @@ void regmap_debugfs_init(struct regmap *map, const char *name) void regmap_debugfs_exit(struct regmap *map) { + struct regmap_debugfs_off_cache *c; + debugfs_remove_recursive(map->debugfs); + while (!list_empty(&map->debugfs_off_cache)) { + c = list_first_entry(&map->debugfs_off_cache, + struct regmap_debugfs_off_cache, + list); + list_del(&c->list); + kfree(c); + } kfree(map->debugfs_name); }