2019-04-26 02:06:18 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
//
|
|
|
|
// Register map access API
|
|
|
|
//
|
|
|
|
// Copyright 2011 Wolfson Microelectronics plc
|
|
|
|
//
|
|
|
|
// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
|
2011-05-12 01:59:58 +08:00
|
|
|
|
2012-03-10 04:17:28 +08:00
|
|
|
#include <linux/device.h>
|
2011-05-12 01:59:58 +08:00
|
|
|
#include <linux/slab.h>
|
2012-02-29 08:28:02 +08:00
|
|
|
#include <linux/export.h>
|
2011-05-12 01:59:58 +08:00
|
|
|
#include <linux/mutex.h>
|
|
|
|
#include <linux/err.h>
|
2020-07-09 00:12:32 +08:00
|
|
|
#include <linux/property.h>
|
2012-06-15 18:23:56 +08:00
|
|
|
#include <linux/rbtree.h>
|
2013-02-03 13:50:14 +08:00
|
|
|
#include <linux/sched.h>
|
2015-07-16 23:36:22 +08:00
|
|
|
#include <linux/delay.h>
|
2016-01-04 18:00:33 +08:00
|
|
|
#include <linux/log2.h>
|
2017-11-01 10:11:55 +08:00
|
|
|
#include <linux/hwspinlock.h>
|
2020-05-31 17:53:00 +08:00
|
|
|
#include <asm/unaligned.h>
|
2011-05-12 01:59:58 +08:00
|
|
|
|
2011-07-25 04:30:55 +08:00
|
|
|
#define CREATE_TRACE_POINTS
|
2015-03-20 05:50:47 +08:00
|
|
|
#include "trace.h"
|
2011-07-25 04:30:55 +08:00
|
|
|
|
2011-07-21 05:35:37 +08:00
|
|
|
#include "internal.h"
|
2011-05-12 01:59:58 +08:00
|
|
|
|
2012-07-06 21:10:23 +08:00
|
|
|
/*
|
|
|
|
* Sometimes for failures during very early init the trace
|
|
|
|
* infrastructure isn't available early enough to be used. For this
|
|
|
|
* sort of problem defining LOG_DEVICE will add printks for basic
|
|
|
|
* register I/O on a specific device.
|
|
|
|
*/
|
|
|
|
#undef LOG_DEVICE
|
|
|
|
|
2018-10-02 18:42:05 +08:00
|
|
|
#ifdef LOG_DEVICE
|
|
|
|
static inline bool regmap_should_log(struct regmap *map)
|
|
|
|
{
|
|
|
|
return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline bool regmap_should_log(struct regmap *map) { return false; }
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2012-07-06 21:10:23 +08:00
|
|
|
static int _regmap_update_bits(struct regmap *map, unsigned int reg,
|
|
|
|
unsigned int mask, unsigned int val,
|
2015-06-16 16:52:22 +08:00
|
|
|
bool *change, bool force_write);
|
2012-07-06 21:10:23 +08:00
|
|
|
|
2014-04-17 17:40:11 +08:00
|
|
|
static int _regmap_bus_reg_read(void *context, unsigned int reg,
|
|
|
|
unsigned int *val);
|
2013-01-13 04:54:12 +08:00
|
|
|
static int _regmap_bus_read(void *context, unsigned int reg,
|
|
|
|
unsigned int *val);
|
2013-01-13 04:54:13 +08:00
|
|
|
static int _regmap_bus_formatted_write(void *context, unsigned int reg,
|
|
|
|
unsigned int val);
|
2014-04-17 17:40:11 +08:00
|
|
|
static int _regmap_bus_reg_write(void *context, unsigned int reg,
|
|
|
|
unsigned int val);
|
2013-01-13 04:54:13 +08:00
|
|
|
static int _regmap_bus_raw_write(void *context, unsigned int reg,
|
|
|
|
unsigned int val);
|
2013-01-13 04:54:12 +08:00
|
|
|
|
2012-11-20 22:20:30 +08:00
|
|
|
bool regmap_reg_in_ranges(unsigned int reg,
|
|
|
|
const struct regmap_range *ranges,
|
|
|
|
unsigned int nranges)
|
|
|
|
{
|
|
|
|
const struct regmap_range *r;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0, r = ranges; i < nranges; i++, r++)
|
|
|
|
if (regmap_reg_in_range(reg, r))
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
|
|
|
|
|
2013-05-08 20:55:23 +08:00
|
|
|
bool regmap_check_range_table(struct regmap *map, unsigned int reg,
|
|
|
|
const struct regmap_access_table *table)
|
2012-11-20 22:20:30 +08:00
|
|
|
{
|
|
|
|
/* Check "no ranges" first */
|
|
|
|
if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* In case zero "yes ranges" are supplied, any reg is OK */
|
|
|
|
if (!table->n_yes_ranges)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return regmap_reg_in_ranges(reg, table->yes_ranges,
|
|
|
|
table->n_yes_ranges);
|
|
|
|
}
|
2013-05-08 20:55:23 +08:00
|
|
|
EXPORT_SYMBOL_GPL(regmap_check_range_table);
|
2012-11-20 22:20:30 +08:00
|
|
|
|
2011-08-10 16:14:41 +08:00
|
|
|
bool regmap_writeable(struct regmap *map, unsigned int reg)
|
|
|
|
{
|
|
|
|
if (map->max_register && reg > map->max_register)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (map->writeable_reg)
|
|
|
|
return map->writeable_reg(map->dev, reg);
|
|
|
|
|
2012-11-20 22:20:30 +08:00
|
|
|
if (map->wr_table)
|
2013-05-08 20:55:23 +08:00
|
|
|
return regmap_check_range_table(map, reg, map->wr_table);
|
2012-11-20 22:20:30 +08:00
|
|
|
|
2011-08-10 16:14:41 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-08-08 23:44:21 +08:00
|
|
|
bool regmap_cached(struct regmap *map, unsigned int reg)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
unsigned int val;
|
|
|
|
|
2018-02-13 02:15:44 +08:00
|
|
|
if (map->cache_type == REGCACHE_NONE)
|
2016-08-08 23:44:21 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!map->cache_ops)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (map->max_register && reg > map->max_register)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
map->lock(map->lock_arg);
|
|
|
|
ret = regcache_read(map, reg, &val);
|
|
|
|
map->unlock(map->lock_arg);
|
|
|
|
if (ret)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2011-08-10 16:14:41 +08:00
|
|
|
bool regmap_readable(struct regmap *map, unsigned int reg)
|
|
|
|
{
|
2015-07-13 18:26:44 +08:00
|
|
|
if (!map->reg_read)
|
|
|
|
return false;
|
|
|
|
|
2011-08-10 16:14:41 +08:00
|
|
|
if (map->max_register && reg > map->max_register)
|
|
|
|
return false;
|
|
|
|
|
2012-01-30 22:08:16 +08:00
|
|
|
if (map->format.format_write)
|
|
|
|
return false;
|
|
|
|
|
2011-08-10 16:14:41 +08:00
|
|
|
if (map->readable_reg)
|
|
|
|
return map->readable_reg(map->dev, reg);
|
|
|
|
|
2012-11-20 22:20:30 +08:00
|
|
|
if (map->rd_table)
|
2013-05-08 20:55:23 +08:00
|
|
|
return regmap_check_range_table(map, reg, map->rd_table);
|
2012-11-20 22:20:30 +08:00
|
|
|
|
2011-08-10 16:14:41 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool regmap_volatile(struct regmap *map, unsigned int reg)
|
|
|
|
{
|
2014-08-26 19:12:17 +08:00
|
|
|
if (!map->format.format_write && !regmap_readable(map, reg))
|
2011-08-10 16:14:41 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
if (map->volatile_reg)
|
|
|
|
return map->volatile_reg(map->dev, reg);
|
|
|
|
|
2012-11-20 22:20:30 +08:00
|
|
|
if (map->volatile_table)
|
2013-05-08 20:55:23 +08:00
|
|
|
return regmap_check_range_table(map, reg, map->volatile_table);
|
2012-11-20 22:20:30 +08:00
|
|
|
|
2013-06-04 00:24:08 +08:00
|
|
|
if (map->cache_ops)
|
|
|
|
return false;
|
|
|
|
else
|
|
|
|
return true;
|
2011-08-10 16:14:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool regmap_precious(struct regmap *map, unsigned int reg)
|
|
|
|
{
|
2012-01-30 22:08:16 +08:00
|
|
|
if (!regmap_readable(map, reg))
|
2011-08-10 16:14:41 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
if (map->precious_reg)
|
|
|
|
return map->precious_reg(map->dev, reg);
|
|
|
|
|
2012-11-20 22:20:30 +08:00
|
|
|
if (map->precious_table)
|
2013-05-08 20:55:23 +08:00
|
|
|
return regmap_check_range_table(map, reg, map->precious_table);
|
2012-11-20 22:20:30 +08:00
|
|
|
|
2011-08-10 16:14:41 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-10-19 17:33:50 +08:00
|
|
|
bool regmap_writeable_noinc(struct regmap *map, unsigned int reg)
|
|
|
|
{
|
|
|
|
if (map->writeable_noinc_reg)
|
|
|
|
return map->writeable_noinc_reg(map->dev, reg);
|
|
|
|
|
|
|
|
if (map->wr_noinc_table)
|
|
|
|
return regmap_check_range_table(map, reg, map->wr_noinc_table);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-08-07 22:52:17 +08:00
|
|
|
bool regmap_readable_noinc(struct regmap *map, unsigned int reg)
|
|
|
|
{
|
|
|
|
if (map->readable_noinc_reg)
|
|
|
|
return map->readable_noinc_reg(map->dev, reg);
|
|
|
|
|
|
|
|
if (map->rd_noinc_table)
|
|
|
|
return regmap_check_range_table(map, reg, map->rd_noinc_table);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2011-11-09 01:37:25 +08:00
|
|
|
static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
|
2012-10-09 04:06:30 +08:00
|
|
|
size_t num)
|
2011-11-09 01:37:25 +08:00
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < num; i++)
|
2018-02-13 02:15:45 +08:00
|
|
|
if (!regmap_volatile(map, reg + regmap_get_offset(map, i)))
|
2011-11-09 01:37:25 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-01-27 23:10:22 +08:00
|
|
|
static void regmap_format_2_6_write(struct regmap *map,
|
|
|
|
unsigned int reg, unsigned int val)
|
|
|
|
{
|
|
|
|
u8 *out = map->work_buf;
|
|
|
|
|
|
|
|
*out = (reg << 6) | val;
|
|
|
|
}
|
|
|
|
|
2011-05-12 01:59:58 +08:00
|
|
|
static void regmap_format_4_12_write(struct regmap *map,
|
|
|
|
unsigned int reg, unsigned int val)
|
|
|
|
{
|
|
|
|
__be16 *out = map->work_buf;
|
|
|
|
*out = cpu_to_be16((reg << 12) | val);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void regmap_format_7_9_write(struct regmap *map,
|
|
|
|
unsigned int reg, unsigned int val)
|
|
|
|
{
|
|
|
|
__be16 *out = map->work_buf;
|
|
|
|
*out = cpu_to_be16((reg << 9) | val);
|
|
|
|
}
|
|
|
|
|
2011-11-16 23:28:21 +08:00
|
|
|
static void regmap_format_10_14_write(struct regmap *map,
|
|
|
|
unsigned int reg, unsigned int val)
|
|
|
|
{
|
|
|
|
u8 *out = map->work_buf;
|
|
|
|
|
|
|
|
out[2] = val;
|
|
|
|
out[1] = (val >> 8) | (reg << 6);
|
|
|
|
out[0] = reg >> 2;
|
|
|
|
}
|
|
|
|
|
2012-03-16 09:11:43 +08:00
|
|
|
static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
|
2011-05-12 01:59:58 +08:00
|
|
|
{
|
|
|
|
u8 *b = buf;
|
|
|
|
|
2012-03-16 09:11:43 +08:00
|
|
|
b[0] = val << shift;
|
2011-05-12 01:59:58 +08:00
|
|
|
}
|
|
|
|
|
2012-05-25 00:47:26 +08:00
|
|
|
static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
|
2011-05-12 01:59:58 +08:00
|
|
|
{
|
2020-05-31 17:53:00 +08:00
|
|
|
put_unaligned_be16(val << shift, buf);
|
2011-05-12 01:59:58 +08:00
|
|
|
}
|
|
|
|
|
2014-04-02 18:09:07 +08:00
|
|
|
static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
|
|
|
|
{
|
2020-05-31 17:53:00 +08:00
|
|
|
put_unaligned_le16(val << shift, buf);
|
2014-04-02 18:09:07 +08:00
|
|
|
}
|
|
|
|
|
2012-05-25 00:47:26 +08:00
|
|
|
static void regmap_format_16_native(void *buf, unsigned int val,
|
|
|
|
unsigned int shift)
|
|
|
|
{
|
2020-05-31 17:53:00 +08:00
|
|
|
u16 v = val << shift;
|
|
|
|
|
|
|
|
memcpy(buf, &v, sizeof(v));
|
2012-05-25 00:47:26 +08:00
|
|
|
}
|
|
|
|
|
2012-03-16 09:11:43 +08:00
|
|
|
static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
|
2012-03-16 09:11:42 +08:00
|
|
|
{
|
|
|
|
u8 *b = buf;
|
|
|
|
|
2012-03-16 09:11:43 +08:00
|
|
|
val <<= shift;
|
|
|
|
|
2012-03-16 09:11:42 +08:00
|
|
|
b[0] = val >> 16;
|
|
|
|
b[1] = val >> 8;
|
|
|
|
b[2] = val;
|
|
|
|
}
|
|
|
|
|
2012-05-25 00:47:26 +08:00
|
|
|
static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
|
2012-02-18 07:58:25 +08:00
|
|
|
{
|
2020-05-31 17:53:00 +08:00
|
|
|
put_unaligned_be32(val << shift, buf);
|
2012-02-18 07:58:25 +08:00
|
|
|
}
|
|
|
|
|
2014-04-02 18:09:07 +08:00
|
|
|
static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
|
|
|
|
{
|
2020-05-31 17:53:00 +08:00
|
|
|
put_unaligned_le32(val << shift, buf);
|
2014-04-02 18:09:07 +08:00
|
|
|
}
|
|
|
|
|
2012-05-25 00:47:26 +08:00
|
|
|
static void regmap_format_32_native(void *buf, unsigned int val,
|
|
|
|
unsigned int shift)
|
|
|
|
{
|
2020-05-31 17:53:00 +08:00
|
|
|
u32 v = val << shift;
|
|
|
|
|
|
|
|
memcpy(buf, &v, sizeof(v));
|
2012-05-25 00:47:26 +08:00
|
|
|
}
|
|
|
|
|
2015-12-03 17:31:52 +08:00
|
|
|
#ifdef CONFIG_64BIT
|
|
|
|
static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift)
|
|
|
|
{
|
2020-05-31 17:53:00 +08:00
|
|
|
put_unaligned_be64((u64) val << shift, buf);
|
2015-12-03 17:31:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift)
|
|
|
|
{
|
2020-05-31 17:53:00 +08:00
|
|
|
put_unaligned_le64((u64) val << shift, buf);
|
2015-12-03 17:31:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void regmap_format_64_native(void *buf, unsigned int val,
|
|
|
|
unsigned int shift)
|
|
|
|
{
|
2020-05-31 17:53:00 +08:00
|
|
|
u64 v = (u64) val << shift;
|
|
|
|
|
|
|
|
memcpy(buf, &v, sizeof(v));
|
2015-12-03 17:31:52 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2013-03-04 09:04:51 +08:00
|
|
|
static void regmap_parse_inplace_noop(void *buf)
|
2011-05-12 01:59:58 +08:00
|
|
|
{
|
2013-03-04 09:04:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int regmap_parse_8(const void *buf)
|
|
|
|
{
|
|
|
|
const u8 *b = buf;
|
2011-05-12 01:59:58 +08:00
|
|
|
|
|
|
|
return b[0];
|
|
|
|
}
|
|
|
|
|
2013-03-04 09:04:51 +08:00
|
|
|
static unsigned int regmap_parse_16_be(const void *buf)
|
|
|
|
{
|
2020-05-31 17:53:00 +08:00
|
|
|
return get_unaligned_be16(buf);
|
2013-03-04 09:04:51 +08:00
|
|
|
}
|
|
|
|
|
2014-04-02 18:09:07 +08:00
|
|
|
static unsigned int regmap_parse_16_le(const void *buf)
|
|
|
|
{
|
2020-05-31 17:53:00 +08:00
|
|
|
return get_unaligned_le16(buf);
|
2014-04-02 18:09:07 +08:00
|
|
|
}
|
|
|
|
|
2013-03-04 09:04:51 +08:00
|
|
|
static void regmap_parse_16_be_inplace(void *buf)
|
2011-05-12 01:59:58 +08:00
|
|
|
{
|
2020-05-31 17:53:00 +08:00
|
|
|
u16 v = get_unaligned_be16(buf);
|
2011-05-12 01:59:58 +08:00
|
|
|
|
2020-05-31 17:53:00 +08:00
|
|
|
memcpy(buf, &v, sizeof(v));
|
2011-05-12 01:59:58 +08:00
|
|
|
}
|
|
|
|
|
2014-04-02 18:09:07 +08:00
|
|
|
static void regmap_parse_16_le_inplace(void *buf)
|
|
|
|
{
|
2020-05-31 17:53:00 +08:00
|
|
|
u16 v = get_unaligned_le16(buf);
|
2014-04-02 18:09:07 +08:00
|
|
|
|
2020-05-31 17:53:00 +08:00
|
|
|
memcpy(buf, &v, sizeof(v));
|
2014-04-02 18:09:07 +08:00
|
|
|
}
|
|
|
|
|
2013-03-04 09:04:51 +08:00
|
|
|
static unsigned int regmap_parse_16_native(const void *buf)
|
2012-05-25 00:47:26 +08:00
|
|
|
{
|
2020-05-31 17:53:00 +08:00
|
|
|
u16 v;
|
|
|
|
|
|
|
|
memcpy(&v, buf, sizeof(v));
|
|
|
|
return v;
|
2012-05-25 00:47:26 +08:00
|
|
|
}
|
|
|
|
|
2013-03-04 09:04:51 +08:00
|
|
|
static unsigned int regmap_parse_24(const void *buf)
|
2012-03-16 09:11:42 +08:00
|
|
|
{
|
2013-03-04 09:04:51 +08:00
|
|
|
const u8 *b = buf;
|
2012-03-16 09:11:42 +08:00
|
|
|
unsigned int ret = b[2];
|
|
|
|
ret |= ((unsigned int)b[1]) << 8;
|
|
|
|
ret |= ((unsigned int)b[0]) << 16;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-03-04 09:04:51 +08:00
|
|
|
static unsigned int regmap_parse_32_be(const void *buf)
|
|
|
|
{
|
2020-05-31 17:53:00 +08:00
|
|
|
return get_unaligned_be32(buf);
|
2013-03-04 09:04:51 +08:00
|
|
|
}
|
|
|
|
|
2014-04-02 18:09:07 +08:00
|
|
|
static unsigned int regmap_parse_32_le(const void *buf)
|
|
|
|
{
|
2020-05-31 17:53:00 +08:00
|
|
|
return get_unaligned_le32(buf);
|
2014-04-02 18:09:07 +08:00
|
|
|
}
|
|
|
|
|
2013-03-04 09:04:51 +08:00
|
|
|
static void regmap_parse_32_be_inplace(void *buf)
|
2012-02-18 07:58:25 +08:00
|
|
|
{
|
2020-05-31 17:53:00 +08:00
|
|
|
u32 v = get_unaligned_be32(buf);
|
2012-02-18 07:58:25 +08:00
|
|
|
|
2020-05-31 17:53:00 +08:00
|
|
|
memcpy(buf, &v, sizeof(v));
|
2012-02-18 07:58:25 +08:00
|
|
|
}
|
|
|
|
|
2014-04-02 18:09:07 +08:00
|
|
|
static void regmap_parse_32_le_inplace(void *buf)
|
|
|
|
{
|
2020-05-31 17:53:00 +08:00
|
|
|
u32 v = get_unaligned_le32(buf);
|
2014-04-02 18:09:07 +08:00
|
|
|
|
2020-05-31 17:53:00 +08:00
|
|
|
memcpy(buf, &v, sizeof(v));
|
2014-04-02 18:09:07 +08:00
|
|
|
}
|
|
|
|
|
2013-03-04 09:04:51 +08:00
|
|
|
static unsigned int regmap_parse_32_native(const void *buf)
|
2012-05-25 00:47:26 +08:00
|
|
|
{
|
2020-05-31 17:53:00 +08:00
|
|
|
u32 v;
|
|
|
|
|
|
|
|
memcpy(&v, buf, sizeof(v));
|
|
|
|
return v;
|
2012-05-25 00:47:26 +08:00
|
|
|
}
|
|
|
|
|
2015-12-03 17:31:52 +08:00
|
|
|
#ifdef CONFIG_64BIT
|
|
|
|
static unsigned int regmap_parse_64_be(const void *buf)
|
|
|
|
{
|
2020-05-31 17:53:00 +08:00
|
|
|
return get_unaligned_be64(buf);
|
2015-12-03 17:31:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int regmap_parse_64_le(const void *buf)
|
|
|
|
{
|
2020-05-31 17:53:00 +08:00
|
|
|
return get_unaligned_le64(buf);
|
2015-12-03 17:31:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void regmap_parse_64_be_inplace(void *buf)
|
|
|
|
{
|
2020-05-31 17:53:00 +08:00
|
|
|
u64 v = get_unaligned_be64(buf);
|
2015-12-03 17:31:52 +08:00
|
|
|
|
2020-05-31 17:53:00 +08:00
|
|
|
memcpy(buf, &v, sizeof(v));
|
2015-12-03 17:31:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void regmap_parse_64_le_inplace(void *buf)
|
|
|
|
{
|
2020-05-31 17:53:00 +08:00
|
|
|
u64 v = get_unaligned_le64(buf);
|
2015-12-03 17:31:52 +08:00
|
|
|
|
2020-05-31 17:53:00 +08:00
|
|
|
memcpy(buf, &v, sizeof(v));
|
2015-12-03 17:31:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int regmap_parse_64_native(const void *buf)
|
|
|
|
{
|
2020-05-31 17:53:00 +08:00
|
|
|
u64 v;
|
|
|
|
|
|
|
|
memcpy(&v, buf, sizeof(v));
|
|
|
|
return v;
|
2015-12-03 17:31:52 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-11-01 10:11:55 +08:00
|
|
|
static void regmap_lock_hwlock(void *__map)
|
|
|
|
{
|
|
|
|
struct regmap *map = __map;
|
|
|
|
|
|
|
|
hwspin_lock_timeout(map->hwlock, UINT_MAX);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void regmap_lock_hwlock_irq(void *__map)
|
|
|
|
{
|
|
|
|
struct regmap *map = __map;
|
|
|
|
|
|
|
|
hwspin_lock_timeout_irq(map->hwlock, UINT_MAX);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void regmap_lock_hwlock_irqsave(void *__map)
|
|
|
|
{
|
|
|
|
struct regmap *map = __map;
|
|
|
|
|
|
|
|
hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX,
|
|
|
|
&map->spinlock_flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void regmap_unlock_hwlock(void *__map)
|
|
|
|
{
|
|
|
|
struct regmap *map = __map;
|
|
|
|
|
|
|
|
hwspin_unlock(map->hwlock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void regmap_unlock_hwlock_irq(void *__map)
|
|
|
|
{
|
|
|
|
struct regmap *map = __map;
|
|
|
|
|
|
|
|
hwspin_unlock_irq(map->hwlock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void regmap_unlock_hwlock_irqrestore(void *__map)
|
|
|
|
{
|
|
|
|
struct regmap *map = __map;
|
|
|
|
|
|
|
|
hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags);
|
|
|
|
}
|
|
|
|
|
2017-12-13 17:28:10 +08:00
|
|
|
static void regmap_lock_unlock_none(void *__map)
|
2017-12-06 22:26:21 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
}
|
2017-11-01 10:11:55 +08:00
|
|
|
|
2012-10-16 21:56:59 +08:00
|
|
|
static void regmap_lock_mutex(void *__map)
|
2012-04-05 05:48:28 +08:00
|
|
|
{
|
2012-10-16 21:56:59 +08:00
|
|
|
struct regmap *map = __map;
|
2012-04-05 05:48:28 +08:00
|
|
|
mutex_lock(&map->mutex);
|
|
|
|
}
|
|
|
|
|
2012-10-16 21:56:59 +08:00
|
|
|
static void regmap_unlock_mutex(void *__map)
|
2012-04-05 05:48:28 +08:00
|
|
|
{
|
2012-10-16 21:56:59 +08:00
|
|
|
struct regmap *map = __map;
|
2012-04-05 05:48:28 +08:00
|
|
|
mutex_unlock(&map->mutex);
|
|
|
|
}
|
|
|
|
|
2012-10-16 21:56:59 +08:00
|
|
|
static void regmap_lock_spinlock(void *__map)
|
2013-07-16 13:10:11 +08:00
|
|
|
__acquires(&map->spinlock)
|
2012-04-05 05:48:28 +08:00
|
|
|
{
|
2012-10-16 21:56:59 +08:00
|
|
|
struct regmap *map = __map;
|
2013-05-24 16:29:22 +08:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&map->spinlock, flags);
|
|
|
|
map->spinlock_flags = flags;
|
2012-04-05 05:48:28 +08:00
|
|
|
}
|
|
|
|
|
2012-10-16 21:56:59 +08:00
|
|
|
static void regmap_unlock_spinlock(void *__map)
|
2013-07-16 13:10:11 +08:00
|
|
|
__releases(&map->spinlock)
|
2012-04-05 05:48:28 +08:00
|
|
|
{
|
2012-10-16 21:56:59 +08:00
|
|
|
struct regmap *map = __map;
|
2013-05-24 16:29:22 +08:00
|
|
|
spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
|
2012-04-05 05:48:28 +08:00
|
|
|
}
|
|
|
|
|
2012-05-09 00:44:40 +08:00
|
|
|
static void dev_get_regmap_release(struct device *dev, void *res)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We don't actually have anything to do here; the goal here
|
|
|
|
* is not to manage the regmap but to provide a simple way to
|
|
|
|
* get the regmap back given a struct device.
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
2012-06-15 18:23:56 +08:00
|
|
|
static bool _regmap_range_add(struct regmap *map,
|
|
|
|
struct regmap_range_node *data)
|
|
|
|
{
|
|
|
|
struct rb_root *root = &map->range_tree;
|
|
|
|
struct rb_node **new = &(root->rb_node), *parent = NULL;
|
|
|
|
|
|
|
|
while (*new) {
|
|
|
|
struct regmap_range_node *this =
|
2016-12-19 22:40:25 +08:00
|
|
|
rb_entry(*new, struct regmap_range_node, node);
|
2012-06-15 18:23:56 +08:00
|
|
|
|
|
|
|
parent = *new;
|
|
|
|
if (data->range_max < this->range_min)
|
|
|
|
new = &((*new)->rb_left);
|
|
|
|
else if (data->range_min > this->range_max)
|
|
|
|
new = &((*new)->rb_right);
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
rb_link_node(&data->node, parent, new);
|
|
|
|
rb_insert_color(&data->node, root);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
|
|
|
|
unsigned int reg)
|
|
|
|
{
|
|
|
|
struct rb_node *node = map->range_tree.rb_node;
|
|
|
|
|
|
|
|
while (node) {
|
|
|
|
struct regmap_range_node *this =
|
2016-12-19 22:40:25 +08:00
|
|
|
rb_entry(node, struct regmap_range_node, node);
|
2012-06-15 18:23:56 +08:00
|
|
|
|
|
|
|
if (reg < this->range_min)
|
|
|
|
node = node->rb_left;
|
|
|
|
else if (reg > this->range_max)
|
|
|
|
node = node->rb_right;
|
|
|
|
else
|
|
|
|
return this;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void regmap_range_exit(struct regmap *map)
|
|
|
|
{
|
|
|
|
struct rb_node *next;
|
|
|
|
struct regmap_range_node *range_node;
|
|
|
|
|
|
|
|
next = rb_first(&map->range_tree);
|
|
|
|
while (next) {
|
|
|
|
range_node = rb_entry(next, struct regmap_range_node, node);
|
|
|
|
next = rb_next(&range_node->node);
|
|
|
|
rb_erase(&range_node->node, &map->range_tree);
|
|
|
|
kfree(range_node);
|
|
|
|
}
|
|
|
|
|
|
|
|
kfree(map->selector_work_buf);
|
|
|
|
}
|
|
|
|
|
2014-02-10 23:22:33 +08:00
|
|
|
int regmap_attach_dev(struct device *dev, struct regmap *map,
|
|
|
|
const struct regmap_config *config)
|
|
|
|
{
|
|
|
|
struct regmap **m;
|
|
|
|
|
|
|
|
map->dev = dev;
|
|
|
|
|
|
|
|
regmap_debugfs_init(map, config->name);
|
|
|
|
|
|
|
|
/* Add a devres resource for dev_get_regmap() */
|
|
|
|
m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
|
|
|
|
if (!m) {
|
|
|
|
regmap_debugfs_exit(map);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
*m = map;
|
|
|
|
devres_add(dev, m);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(regmap_attach_dev);
|
|
|
|
|
2014-08-27 22:36:03 +08:00
|
|
|
static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
|
|
|
|
const struct regmap_config *config)
|
|
|
|
{
|
|
|
|
enum regmap_endian endian;
|
|
|
|
|
|
|
|
/* Retrieve the endianness specification from the regmap config */
|
|
|
|
endian = config->reg_format_endian;
|
|
|
|
|
|
|
|
/* If the regmap config specified a non-default value, use that */
|
|
|
|
if (endian != REGMAP_ENDIAN_DEFAULT)
|
|
|
|
return endian;
|
|
|
|
|
|
|
|
/* Retrieve the endianness specification from the bus config */
|
|
|
|
if (bus && bus->reg_format_endian_default)
|
|
|
|
endian = bus->reg_format_endian_default;
|
2014-07-15 12:23:02 +08:00
|
|
|
|
2014-08-27 22:36:03 +08:00
|
|
|
/* If the bus specified a non-default value, use that */
|
|
|
|
if (endian != REGMAP_ENDIAN_DEFAULT)
|
|
|
|
return endian;
|
|
|
|
|
|
|
|
/* Use this if no other value was found */
|
|
|
|
return REGMAP_ENDIAN_BIG;
|
|
|
|
}
|
|
|
|
|
2015-02-04 02:01:18 +08:00
|
|
|
enum regmap_endian regmap_get_val_endian(struct device *dev,
|
|
|
|
const struct regmap_bus *bus,
|
|
|
|
const struct regmap_config *config)
|
2014-07-15 12:23:02 +08:00
|
|
|
{
|
2020-07-09 00:12:32 +08:00
|
|
|
struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL;
|
2014-08-27 22:36:03 +08:00
|
|
|
enum regmap_endian endian;
|
2014-07-15 12:23:02 +08:00
|
|
|
|
regmap: of_regmap_get_endian() cleanup
Commit d647c199510c ("regmap: add DT endianness binding support") had
some issues. Commit ba1b53feb8ca ("regmap: Fix DT endianess parsing
logic") fixed the main problem. This patch fixes the other.
Specifically, restore the overall default of REGMAP_ENDIAN_BIG if none of
the config, DT, or the bus specify any endianness. Without this,
of_regmap_get_endian() could return REGMAP_ENDIAN_DEFAULT, which the
calling code can't handle. Since all busses do specify an endianness in
the current code, this makes no difference right now, but I saw no
justification in the patch description for removing this final default.
Also, clean up the code a bit:
* s/of_regmap_get_endian/regmap_get_endian/ since the function isn't DT-
specific, even if the reason it was originally added was to add some
DT-specific features.
* After potentially reading an endianess specification from DT, the code
checks whether DT did specify an endianness, and if so, returns it. Move
this test outside the whole switch statement so that if the
REGMAP_ENDIAN_REG case ever modifies *endian, this check will pick that
up. This partially reverts part of commit ba1b53feb8ca ("regmap: Fix DT
endianess parsing logic"), while maintaining the bug-fix that commit
made to this code.
* Make the comments briefer, and only refer to the specific action taken
at their location. This makes most of the comments independent of DT,
and easier to follow.
Cc: Xiubo Li <Li.Xiubo@freescale.com>
Cc: Javier Martinez Canillas <javier.martinez@collabora.co.uk>
Cc: Thierry Reding <treding@nvidia.com>
Fixes: d647c199510c ("regmap: add DT endianness binding support")
Signed-off-by: Stephen Warren <swarren@nvidia.com>
Signed-off-by: Mark Brown <broonie@linaro.org>
2014-08-20 00:49:07 +08:00
|
|
|
/* Retrieve the endianness specification from the regmap config */
|
2014-08-27 22:36:03 +08:00
|
|
|
endian = config->val_format_endian;
|
2014-07-15 12:23:02 +08:00
|
|
|
|
regmap: of_regmap_get_endian() cleanup
Commit d647c199510c ("regmap: add DT endianness binding support") had
some issues. Commit ba1b53feb8ca ("regmap: Fix DT endianess parsing
logic") fixed the main problem. This patch fixes the other.
Specifically, restore the overall default of REGMAP_ENDIAN_BIG if none of
the config, DT, or the bus specify any endianness. Without this,
of_regmap_get_endian() could return REGMAP_ENDIAN_DEFAULT, which the
calling code can't handle. Since all busses do specify an endianness in
the current code, this makes no difference right now, but I saw no
justification in the patch description for removing this final default.
Also, clean up the code a bit:
* s/of_regmap_get_endian/regmap_get_endian/ since the function isn't DT-
specific, even if the reason it was originally added was to add some
DT-specific features.
* After potentially reading an endianess specification from DT, the code
checks whether DT did specify an endianness, and if so, returns it. Move
this test outside the whole switch statement so that if the
REGMAP_ENDIAN_REG case ever modifies *endian, this check will pick that
up. This partially reverts part of commit ba1b53feb8ca ("regmap: Fix DT
endianess parsing logic"), while maintaining the bug-fix that commit
made to this code.
* Make the comments briefer, and only refer to the specific action taken
at their location. This makes most of the comments independent of DT,
and easier to follow.
Cc: Xiubo Li <Li.Xiubo@freescale.com>
Cc: Javier Martinez Canillas <javier.martinez@collabora.co.uk>
Cc: Thierry Reding <treding@nvidia.com>
Fixes: d647c199510c ("regmap: add DT endianness binding support")
Signed-off-by: Stephen Warren <swarren@nvidia.com>
Signed-off-by: Mark Brown <broonie@linaro.org>
2014-08-20 00:49:07 +08:00
|
|
|
/* If the regmap config specified a non-default value, use that */
|
2014-08-27 22:36:03 +08:00
|
|
|
if (endian != REGMAP_ENDIAN_DEFAULT)
|
|
|
|
return endian;
|
2014-07-15 12:23:02 +08:00
|
|
|
|
2020-07-09 00:12:32 +08:00
|
|
|
/* If the firmware node exist try to get endianness from it */
|
|
|
|
if (fwnode_property_read_bool(fwnode, "big-endian"))
|
|
|
|
endian = REGMAP_ENDIAN_BIG;
|
|
|
|
else if (fwnode_property_read_bool(fwnode, "little-endian"))
|
|
|
|
endian = REGMAP_ENDIAN_LITTLE;
|
|
|
|
else if (fwnode_property_read_bool(fwnode, "native-endian"))
|
|
|
|
endian = REGMAP_ENDIAN_NATIVE;
|
|
|
|
|
|
|
|
/* If the endianness was specified in fwnode, use that */
|
|
|
|
if (endian != REGMAP_ENDIAN_DEFAULT)
|
|
|
|
return endian;
|
regmap: of_regmap_get_endian() cleanup
Commit d647c199510c ("regmap: add DT endianness binding support") had
some issues. Commit ba1b53feb8ca ("regmap: Fix DT endianess parsing
logic") fixed the main problem. This patch fixes the other.
Specifically, restore the overall default of REGMAP_ENDIAN_BIG if none of
the config, DT, or the bus specify any endianness. Without this,
of_regmap_get_endian() could return REGMAP_ENDIAN_DEFAULT, which the
calling code can't handle. Since all busses do specify an endianness in
the current code, this makes no difference right now, but I saw no
justification in the patch description for removing this final default.
Also, clean up the code a bit:
* s/of_regmap_get_endian/regmap_get_endian/ since the function isn't DT-
specific, even if the reason it was originally added was to add some
DT-specific features.
* After potentially reading an endianess specification from DT, the code
checks whether DT did specify an endianness, and if so, returns it. Move
this test outside the whole switch statement so that if the
REGMAP_ENDIAN_REG case ever modifies *endian, this check will pick that
up. This partially reverts part of commit ba1b53feb8ca ("regmap: Fix DT
endianess parsing logic"), while maintaining the bug-fix that commit
made to this code.
* Make the comments briefer, and only refer to the specific action taken
at their location. This makes most of the comments independent of DT,
and easier to follow.
Cc: Xiubo Li <Li.Xiubo@freescale.com>
Cc: Javier Martinez Canillas <javier.martinez@collabora.co.uk>
Cc: Thierry Reding <treding@nvidia.com>
Fixes: d647c199510c ("regmap: add DT endianness binding support")
Signed-off-by: Stephen Warren <swarren@nvidia.com>
Signed-off-by: Mark Brown <broonie@linaro.org>
2014-08-20 00:49:07 +08:00
|
|
|
|
|
|
|
/* Retrieve the endianness specification from the bus config */
|
2014-08-27 22:36:03 +08:00
|
|
|
if (bus && bus->val_format_endian_default)
|
|
|
|
endian = bus->val_format_endian_default;
|
2014-07-15 12:23:02 +08:00
|
|
|
|
regmap: of_regmap_get_endian() cleanup
Commit d647c199510c ("regmap: add DT endianness binding support") had
some issues. Commit ba1b53feb8ca ("regmap: Fix DT endianess parsing
logic") fixed the main problem. This patch fixes the other.
Specifically, restore the overall default of REGMAP_ENDIAN_BIG if none of
the config, DT, or the bus specify any endianness. Without this,
of_regmap_get_endian() could return REGMAP_ENDIAN_DEFAULT, which the
calling code can't handle. Since all busses do specify an endianness in
the current code, this makes no difference right now, but I saw no
justification in the patch description for removing this final default.
Also, clean up the code a bit:
* s/of_regmap_get_endian/regmap_get_endian/ since the function isn't DT-
specific, even if the reason it was originally added was to add some
DT-specific features.
* After potentially reading an endianess specification from DT, the code
checks whether DT did specify an endianness, and if so, returns it. Move
this test outside the whole switch statement so that if the
REGMAP_ENDIAN_REG case ever modifies *endian, this check will pick that
up. This partially reverts part of commit ba1b53feb8ca ("regmap: Fix DT
endianess parsing logic"), while maintaining the bug-fix that commit
made to this code.
* Make the comments briefer, and only refer to the specific action taken
at their location. This makes most of the comments independent of DT,
and easier to follow.
Cc: Xiubo Li <Li.Xiubo@freescale.com>
Cc: Javier Martinez Canillas <javier.martinez@collabora.co.uk>
Cc: Thierry Reding <treding@nvidia.com>
Fixes: d647c199510c ("regmap: add DT endianness binding support")
Signed-off-by: Stephen Warren <swarren@nvidia.com>
Signed-off-by: Mark Brown <broonie@linaro.org>
2014-08-20 00:49:07 +08:00
|
|
|
/* If the bus specified a non-default value, use that */
|
2014-08-27 22:36:03 +08:00
|
|
|
if (endian != REGMAP_ENDIAN_DEFAULT)
|
|
|
|
return endian;
|
regmap: of_regmap_get_endian() cleanup
Commit d647c199510c ("regmap: add DT endianness binding support") had
some issues. Commit ba1b53feb8ca ("regmap: Fix DT endianess parsing
logic") fixed the main problem. This patch fixes the other.
Specifically, restore the overall default of REGMAP_ENDIAN_BIG if none of
the config, DT, or the bus specify any endianness. Without this,
of_regmap_get_endian() could return REGMAP_ENDIAN_DEFAULT, which the
calling code can't handle. Since all busses do specify an endianness in
the current code, this makes no difference right now, but I saw no
justification in the patch description for removing this final default.
Also, clean up the code a bit:
* s/of_regmap_get_endian/regmap_get_endian/ since the function isn't DT-
specific, even if the reason it was originally added was to add some
DT-specific features.
* After potentially reading an endianess specification from DT, the code
checks whether DT did specify an endianness, and if so, returns it. Move
this test outside the whole switch statement so that if the
REGMAP_ENDIAN_REG case ever modifies *endian, this check will pick that
up. This partially reverts part of commit ba1b53feb8ca ("regmap: Fix DT
endianess parsing logic"), while maintaining the bug-fix that commit
made to this code.
* Make the comments briefer, and only refer to the specific action taken
at their location. This makes most of the comments independent of DT,
and easier to follow.
Cc: Xiubo Li <Li.Xiubo@freescale.com>
Cc: Javier Martinez Canillas <javier.martinez@collabora.co.uk>
Cc: Thierry Reding <treding@nvidia.com>
Fixes: d647c199510c ("regmap: add DT endianness binding support")
Signed-off-by: Stephen Warren <swarren@nvidia.com>
Signed-off-by: Mark Brown <broonie@linaro.org>
2014-08-20 00:49:07 +08:00
|
|
|
|
|
|
|
/* Use this if no other value was found */
|
2014-08-27 22:36:03 +08:00
|
|
|
return REGMAP_ENDIAN_BIG;
|
2014-07-15 12:23:02 +08:00
|
|
|
}
|
2015-02-04 02:01:18 +08:00
|
|
|
EXPORT_SYMBOL_GPL(regmap_get_val_endian);
|
2014-07-15 12:23:02 +08:00
|
|
|
|
2015-07-08 14:30:18 +08:00
|
|
|
struct regmap *__regmap_init(struct device *dev,
|
|
|
|
const struct regmap_bus *bus,
|
|
|
|
void *bus_context,
|
|
|
|
const struct regmap_config *config,
|
|
|
|
struct lock_class_key *lock_key,
|
|
|
|
const char *lock_name)
|
2011-05-12 01:59:58 +08:00
|
|
|
{
|
2014-02-10 23:22:33 +08:00
|
|
|
struct regmap *map;
|
2011-05-12 01:59:58 +08:00
|
|
|
int ret = -EINVAL;
|
2012-05-25 00:47:26 +08:00
|
|
|
enum regmap_endian reg_endian, val_endian;
|
2012-06-15 18:23:56 +08:00
|
|
|
int i, j;
|
2011-05-12 01:59:58 +08:00
|
|
|
|
2013-01-28 02:49:05 +08:00
|
|
|
if (!config)
|
2011-11-14 17:40:15 +08:00
|
|
|
goto err;
|
2011-05-12 01:59:58 +08:00
|
|
|
|
|
|
|
map = kzalloc(sizeof(*map), GFP_KERNEL);
|
|
|
|
if (map == NULL) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2017-12-14 00:25:31 +08:00
|
|
|
if (config->name) {
|
|
|
|
map->name = kstrdup_const(config->name, GFP_KERNEL);
|
|
|
|
if (!map->name) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto err_map;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-06 22:26:21 +08:00
|
|
|
if (config->disable_locking) {
|
2017-12-13 17:28:10 +08:00
|
|
|
map->lock = map->unlock = regmap_lock_unlock_none;
|
2020-09-02 22:18:43 +08:00
|
|
|
map->can_sleep = config->can_sleep;
|
2017-12-13 00:56:43 +08:00
|
|
|
regmap_debugfs_disable(map);
|
2017-12-06 22:26:21 +08:00
|
|
|
} else if (config->lock && config->unlock) {
|
2012-10-16 21:56:59 +08:00
|
|
|
map->lock = config->lock;
|
|
|
|
map->unlock = config->unlock;
|
|
|
|
map->lock_arg = config->lock_arg;
|
2020-09-02 22:18:43 +08:00
|
|
|
map->can_sleep = config->can_sleep;
|
2017-12-25 14:37:09 +08:00
|
|
|
} else if (config->use_hwlock) {
|
2017-11-01 10:11:55 +08:00
|
|
|
map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
|
|
|
|
if (!map->hwlock) {
|
|
|
|
ret = -ENXIO;
|
2017-12-14 00:25:31 +08:00
|
|
|
goto err_name;
|
2017-11-01 10:11:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
switch (config->hwlock_mode) {
|
|
|
|
case HWLOCK_IRQSTATE:
|
|
|
|
map->lock = regmap_lock_hwlock_irqsave;
|
|
|
|
map->unlock = regmap_unlock_hwlock_irqrestore;
|
|
|
|
break;
|
|
|
|
case HWLOCK_IRQ:
|
|
|
|
map->lock = regmap_lock_hwlock_irq;
|
|
|
|
map->unlock = regmap_unlock_hwlock_irq;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
map->lock = regmap_lock_hwlock;
|
|
|
|
map->unlock = regmap_unlock_hwlock;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
map->lock_arg = map;
|
2012-04-05 05:48:28 +08:00
|
|
|
} else {
|
2013-01-28 02:49:05 +08:00
|
|
|
if ((bus && bus->fast_io) ||
|
|
|
|
config->fast_io) {
|
2012-10-16 21:56:59 +08:00
|
|
|
spin_lock_init(&map->spinlock);
|
|
|
|
map->lock = regmap_lock_spinlock;
|
|
|
|
map->unlock = regmap_unlock_spinlock;
|
2015-07-08 14:30:18 +08:00
|
|
|
lockdep_set_class_and_name(&map->spinlock,
|
|
|
|
lock_key, lock_name);
|
2012-10-16 21:56:59 +08:00
|
|
|
} else {
|
|
|
|
mutex_init(&map->mutex);
|
|
|
|
map->lock = regmap_lock_mutex;
|
|
|
|
map->unlock = regmap_unlock_mutex;
|
2020-09-02 22:18:43 +08:00
|
|
|
map->can_sleep = true;
|
2015-07-08 14:30:18 +08:00
|
|
|
lockdep_set_class_and_name(&map->mutex,
|
|
|
|
lock_key, lock_name);
|
2012-10-16 21:56:59 +08:00
|
|
|
}
|
|
|
|
map->lock_arg = map;
|
2012-04-05 05:48:28 +08:00
|
|
|
}
|
2015-09-12 07:37:05 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* When we write in fast-paths with regmap_bulk_write() don't allocate
|
|
|
|
* scratch buffers with sleeping allocations.
|
|
|
|
*/
|
|
|
|
if ((bus && bus->fast_io) || config->fast_io)
|
|
|
|
map->alloc_flags = GFP_ATOMIC;
|
|
|
|
else
|
|
|
|
map->alloc_flags = GFP_KERNEL;
|
|
|
|
|
2012-01-28 09:16:41 +08:00
|
|
|
map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
|
2012-01-18 18:52:25 +08:00
|
|
|
map->format.pad_bytes = config->pad_bits / 8;
|
2012-01-28 09:16:41 +08:00
|
|
|
map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
|
2012-06-01 08:10:30 +08:00
|
|
|
map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
|
|
|
|
config->val_bits + config->pad_bits, 8);
|
2012-03-16 09:11:43 +08:00
|
|
|
map->reg_shift = config->pad_bits % 8;
|
2012-04-10 03:40:24 +08:00
|
|
|
if (config->reg_stride)
|
|
|
|
map->reg_stride = config->reg_stride;
|
|
|
|
else
|
|
|
|
map->reg_stride = 1;
|
2016-01-04 18:00:33 +08:00
|
|
|
if (is_power_of_2(map->reg_stride))
|
|
|
|
map->reg_stride_order = ilog2(map->reg_stride);
|
|
|
|
else
|
|
|
|
map->reg_stride_order = -1;
|
2018-09-02 00:50:41 +08:00
|
|
|
map->use_single_read = config->use_single_read || !bus || !bus->read;
|
|
|
|
map->use_single_write = config->use_single_write || !bus || !bus->write;
|
2015-08-21 16:26:43 +08:00
|
|
|
map->can_multi_write = config->can_multi_write && bus && bus->write;
|
2015-08-31 17:54:58 +08:00
|
|
|
if (bus) {
|
|
|
|
map->max_raw_read = bus->max_raw_read;
|
|
|
|
map->max_raw_write = bus->max_raw_write;
|
|
|
|
}
|
2011-05-12 01:59:58 +08:00
|
|
|
map->dev = dev;
|
|
|
|
map->bus = bus;
|
2012-04-05 05:48:30 +08:00
|
|
|
map->bus_context = bus_context;
|
2011-07-21 05:33:39 +08:00
|
|
|
map->max_register = config->max_register;
|
2012-11-20 22:20:30 +08:00
|
|
|
map->wr_table = config->wr_table;
|
|
|
|
map->rd_table = config->rd_table;
|
|
|
|
map->volatile_table = config->volatile_table;
|
|
|
|
map->precious_table = config->precious_table;
|
2018-10-19 17:33:50 +08:00
|
|
|
map->wr_noinc_table = config->wr_noinc_table;
|
2018-08-07 22:52:17 +08:00
|
|
|
map->rd_noinc_table = config->rd_noinc_table;
|
2011-07-21 05:33:39 +08:00
|
|
|
map->writeable_reg = config->writeable_reg;
|
|
|
|
map->readable_reg = config->readable_reg;
|
|
|
|
map->volatile_reg = config->volatile_reg;
|
2011-08-08 14:41:46 +08:00
|
|
|
map->precious_reg = config->precious_reg;
|
2018-10-19 17:33:50 +08:00
|
|
|
map->writeable_noinc_reg = config->writeable_noinc_reg;
|
2018-08-07 22:52:17 +08:00
|
|
|
map->readable_noinc_reg = config->readable_noinc_reg;
|
2011-09-19 21:34:05 +08:00
|
|
|
map->cache_type = config->cache_type;
|
2011-05-12 01:59:58 +08:00
|
|
|
|
2013-01-27 22:07:38 +08:00
|
|
|
spin_lock_init(&map->async_lock);
|
|
|
|
INIT_LIST_HEAD(&map->async_list);
|
2013-10-08 06:00:24 +08:00
|
|
|
INIT_LIST_HEAD(&map->async_free);
|
2013-01-27 22:07:38 +08:00
|
|
|
init_waitqueue_head(&map->async_waitq);
|
|
|
|
|
2018-01-08 07:19:09 +08:00
|
|
|
if (config->read_flag_mask ||
|
|
|
|
config->write_flag_mask ||
|
|
|
|
config->zero_flag_mask) {
|
2011-09-06 02:46:32 +08:00
|
|
|
map->read_flag_mask = config->read_flag_mask;
|
|
|
|
map->write_flag_mask = config->write_flag_mask;
|
2013-01-28 02:49:05 +08:00
|
|
|
} else if (bus) {
|
2011-09-06 02:46:32 +08:00
|
|
|
map->read_flag_mask = bus->read_flag_mask;
|
|
|
|
}
|
|
|
|
|
2013-01-28 02:49:05 +08:00
|
|
|
if (!bus) {
|
|
|
|
map->reg_read = config->reg_read;
|
|
|
|
map->reg_write = config->reg_write;
|
|
|
|
|
2014-04-17 17:40:11 +08:00
|
|
|
map->defer_caching = false;
|
|
|
|
goto skip_format_initialization;
|
|
|
|
} else if (!bus->read || !bus->write) {
|
|
|
|
map->reg_read = _regmap_bus_reg_read;
|
|
|
|
map->reg_write = _regmap_bus_reg_write;
|
2020-04-13 14:13:20 +08:00
|
|
|
map->reg_update_bits = bus->reg_update_bits;
|
2014-04-17 17:40:11 +08:00
|
|
|
|
2013-01-28 02:49:05 +08:00
|
|
|
map->defer_caching = false;
|
|
|
|
goto skip_format_initialization;
|
|
|
|
} else {
|
|
|
|
map->reg_read = _regmap_bus_read;
|
2015-10-02 00:38:07 +08:00
|
|
|
map->reg_update_bits = bus->reg_update_bits;
|
2013-01-28 02:49:05 +08:00
|
|
|
}
|
2013-01-13 04:54:12 +08:00
|
|
|
|
2014-08-27 22:36:03 +08:00
|
|
|
reg_endian = regmap_get_reg_endian(bus, config);
|
|
|
|
val_endian = regmap_get_val_endian(dev, bus, config);
|
2012-05-25 00:47:26 +08:00
|
|
|
|
2012-03-16 09:11:43 +08:00
|
|
|
switch (config->reg_bits + map->reg_shift) {
|
2012-01-27 23:10:22 +08:00
|
|
|
case 2:
|
|
|
|
switch (config->val_bits) {
|
|
|
|
case 6:
|
|
|
|
map->format.format_write = regmap_format_2_6_write;
|
|
|
|
break;
|
|
|
|
default:
|
2017-11-01 10:11:55 +08:00
|
|
|
goto err_hwlock;
|
2012-01-27 23:10:22 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2011-05-12 01:59:58 +08:00
|
|
|
case 4:
|
|
|
|
switch (config->val_bits) {
|
|
|
|
case 12:
|
|
|
|
map->format.format_write = regmap_format_4_12_write;
|
|
|
|
break;
|
|
|
|
default:
|
2017-11-01 10:11:55 +08:00
|
|
|
goto err_hwlock;
|
2011-05-12 01:59:58 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 7:
|
|
|
|
switch (config->val_bits) {
|
|
|
|
case 9:
|
|
|
|
map->format.format_write = regmap_format_7_9_write;
|
|
|
|
break;
|
|
|
|
default:
|
2017-11-01 10:11:55 +08:00
|
|
|
goto err_hwlock;
|
2011-05-12 01:59:58 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2011-11-16 23:28:21 +08:00
|
|
|
case 10:
|
|
|
|
switch (config->val_bits) {
|
|
|
|
case 14:
|
|
|
|
map->format.format_write = regmap_format_10_14_write;
|
|
|
|
break;
|
|
|
|
default:
|
2017-11-01 10:11:55 +08:00
|
|
|
goto err_hwlock;
|
2011-11-16 23:28:21 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2011-05-12 01:59:58 +08:00
|
|
|
case 8:
|
|
|
|
map->format.format_reg = regmap_format_8;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 16:
|
2012-05-25 00:47:26 +08:00
|
|
|
switch (reg_endian) {
|
|
|
|
case REGMAP_ENDIAN_BIG:
|
|
|
|
map->format.format_reg = regmap_format_16_be;
|
|
|
|
break;
|
2016-09-16 04:56:11 +08:00
|
|
|
case REGMAP_ENDIAN_LITTLE:
|
|
|
|
map->format.format_reg = regmap_format_16_le;
|
|
|
|
break;
|
2012-05-25 00:47:26 +08:00
|
|
|
case REGMAP_ENDIAN_NATIVE:
|
|
|
|
map->format.format_reg = regmap_format_16_native;
|
|
|
|
break;
|
|
|
|
default:
|
2017-11-01 10:11:55 +08:00
|
|
|
goto err_hwlock;
|
2012-05-25 00:47:26 +08:00
|
|
|
}
|
2011-05-12 01:59:58 +08:00
|
|
|
break;
|
|
|
|
|
2013-01-11 00:06:10 +08:00
|
|
|
case 24:
|
|
|
|
if (reg_endian != REGMAP_ENDIAN_BIG)
|
2017-11-01 10:11:55 +08:00
|
|
|
goto err_hwlock;
|
2013-01-11 00:06:10 +08:00
|
|
|
map->format.format_reg = regmap_format_24;
|
|
|
|
break;
|
|
|
|
|
2012-02-18 07:58:25 +08:00
|
|
|
case 32:
|
2012-05-25 00:47:26 +08:00
|
|
|
switch (reg_endian) {
|
|
|
|
case REGMAP_ENDIAN_BIG:
|
|
|
|
map->format.format_reg = regmap_format_32_be;
|
|
|
|
break;
|
2016-09-16 04:56:11 +08:00
|
|
|
case REGMAP_ENDIAN_LITTLE:
|
|
|
|
map->format.format_reg = regmap_format_32_le;
|
|
|
|
break;
|
2012-05-25 00:47:26 +08:00
|
|
|
case REGMAP_ENDIAN_NATIVE:
|
|
|
|
map->format.format_reg = regmap_format_32_native;
|
|
|
|
break;
|
|
|
|
default:
|
2017-11-01 10:11:55 +08:00
|
|
|
goto err_hwlock;
|
2012-05-25 00:47:26 +08:00
|
|
|
}
|
2012-02-18 07:58:25 +08:00
|
|
|
break;
|
|
|
|
|
2015-12-03 17:31:52 +08:00
|
|
|
#ifdef CONFIG_64BIT
|
|
|
|
case 64:
|
|
|
|
switch (reg_endian) {
|
|
|
|
case REGMAP_ENDIAN_BIG:
|
|
|
|
map->format.format_reg = regmap_format_64_be;
|
|
|
|
break;
|
2016-09-16 04:56:11 +08:00
|
|
|
case REGMAP_ENDIAN_LITTLE:
|
|
|
|
map->format.format_reg = regmap_format_64_le;
|
|
|
|
break;
|
2015-12-03 17:31:52 +08:00
|
|
|
case REGMAP_ENDIAN_NATIVE:
|
|
|
|
map->format.format_reg = regmap_format_64_native;
|
|
|
|
break;
|
|
|
|
default:
|
2017-11-01 10:11:55 +08:00
|
|
|
goto err_hwlock;
|
2015-12-03 17:31:52 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
|
2011-05-12 01:59:58 +08:00
|
|
|
default:
|
2017-11-01 10:11:55 +08:00
|
|
|
goto err_hwlock;
|
2011-05-12 01:59:58 +08:00
|
|
|
}
|
|
|
|
|
2013-03-04 09:04:51 +08:00
|
|
|
if (val_endian == REGMAP_ENDIAN_NATIVE)
|
|
|
|
map->format.parse_inplace = regmap_parse_inplace_noop;
|
|
|
|
|
2011-05-12 01:59:58 +08:00
|
|
|
switch (config->val_bits) {
|
|
|
|
case 8:
|
|
|
|
map->format.format_val = regmap_format_8;
|
|
|
|
map->format.parse_val = regmap_parse_8;
|
2013-03-04 09:04:51 +08:00
|
|
|
map->format.parse_inplace = regmap_parse_inplace_noop;
|
2011-05-12 01:59:58 +08:00
|
|
|
break;
|
|
|
|
case 16:
|
2012-05-25 00:47:26 +08:00
|
|
|
switch (val_endian) {
|
|
|
|
case REGMAP_ENDIAN_BIG:
|
|
|
|
map->format.format_val = regmap_format_16_be;
|
|
|
|
map->format.parse_val = regmap_parse_16_be;
|
2013-03-04 09:04:51 +08:00
|
|
|
map->format.parse_inplace = regmap_parse_16_be_inplace;
|
2012-05-25 00:47:26 +08:00
|
|
|
break;
|
2014-04-02 18:09:07 +08:00
|
|
|
case REGMAP_ENDIAN_LITTLE:
|
|
|
|
map->format.format_val = regmap_format_16_le;
|
|
|
|
map->format.parse_val = regmap_parse_16_le;
|
|
|
|
map->format.parse_inplace = regmap_parse_16_le_inplace;
|
|
|
|
break;
|
2012-05-25 00:47:26 +08:00
|
|
|
case REGMAP_ENDIAN_NATIVE:
|
|
|
|
map->format.format_val = regmap_format_16_native;
|
|
|
|
map->format.parse_val = regmap_parse_16_native;
|
|
|
|
break;
|
|
|
|
default:
|
2017-11-01 10:11:55 +08:00
|
|
|
goto err_hwlock;
|
2012-05-25 00:47:26 +08:00
|
|
|
}
|
2011-05-12 01:59:58 +08:00
|
|
|
break;
|
2012-03-16 09:11:42 +08:00
|
|
|
case 24:
|
2012-05-25 00:47:26 +08:00
|
|
|
if (val_endian != REGMAP_ENDIAN_BIG)
|
2017-11-01 10:11:55 +08:00
|
|
|
goto err_hwlock;
|
2012-03-16 09:11:42 +08:00
|
|
|
map->format.format_val = regmap_format_24;
|
|
|
|
map->format.parse_val = regmap_parse_24;
|
|
|
|
break;
|
2012-02-18 07:58:25 +08:00
|
|
|
case 32:
|
2012-05-25 00:47:26 +08:00
|
|
|
switch (val_endian) {
|
|
|
|
case REGMAP_ENDIAN_BIG:
|
|
|
|
map->format.format_val = regmap_format_32_be;
|
|
|
|
map->format.parse_val = regmap_parse_32_be;
|
2013-03-04 09:04:51 +08:00
|
|
|
map->format.parse_inplace = regmap_parse_32_be_inplace;
|
2012-05-25 00:47:26 +08:00
|
|
|
break;
|
2014-04-02 18:09:07 +08:00
|
|
|
case REGMAP_ENDIAN_LITTLE:
|
|
|
|
map->format.format_val = regmap_format_32_le;
|
|
|
|
map->format.parse_val = regmap_parse_32_le;
|
|
|
|
map->format.parse_inplace = regmap_parse_32_le_inplace;
|
|
|
|
break;
|
2012-05-25 00:47:26 +08:00
|
|
|
case REGMAP_ENDIAN_NATIVE:
|
|
|
|
map->format.format_val = regmap_format_32_native;
|
|
|
|
map->format.parse_val = regmap_parse_32_native;
|
|
|
|
break;
|
|
|
|
default:
|
2017-11-01 10:11:55 +08:00
|
|
|
goto err_hwlock;
|
2012-05-25 00:47:26 +08:00
|
|
|
}
|
2012-02-18 07:58:25 +08:00
|
|
|
break;
|
2015-12-03 17:31:52 +08:00
|
|
|
#ifdef CONFIG_64BIT
|
2015-12-12 20:59:43 +08:00
|
|
|
case 64:
|
2015-12-03 17:31:52 +08:00
|
|
|
switch (val_endian) {
|
|
|
|
case REGMAP_ENDIAN_BIG:
|
|
|
|
map->format.format_val = regmap_format_64_be;
|
|
|
|
map->format.parse_val = regmap_parse_64_be;
|
|
|
|
map->format.parse_inplace = regmap_parse_64_be_inplace;
|
|
|
|
break;
|
|
|
|
case REGMAP_ENDIAN_LITTLE:
|
|
|
|
map->format.format_val = regmap_format_64_le;
|
|
|
|
map->format.parse_val = regmap_parse_64_le;
|
|
|
|
map->format.parse_inplace = regmap_parse_64_le_inplace;
|
|
|
|
break;
|
|
|
|
case REGMAP_ENDIAN_NATIVE:
|
|
|
|
map->format.format_val = regmap_format_64_native;
|
|
|
|
map->format.parse_val = regmap_parse_64_native;
|
|
|
|
break;
|
|
|
|
default:
|
2017-11-01 10:11:55 +08:00
|
|
|
goto err_hwlock;
|
2015-12-03 17:31:52 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
#endif
|
2011-05-12 01:59:58 +08:00
|
|
|
}
|
|
|
|
|
2012-05-25 00:47:26 +08:00
|
|
|
if (map->format.format_write) {
|
|
|
|
if ((reg_endian != REGMAP_ENDIAN_BIG) ||
|
|
|
|
(val_endian != REGMAP_ENDIAN_BIG))
|
2017-11-01 10:11:55 +08:00
|
|
|
goto err_hwlock;
|
2015-08-21 16:26:42 +08:00
|
|
|
map->use_single_write = true;
|
2012-05-25 00:47:26 +08:00
|
|
|
}
|
2012-05-01 06:26:32 +08:00
|
|
|
|
2011-05-12 01:59:58 +08:00
|
|
|
if (!map->format.format_write &&
|
|
|
|
!(map->format.format_reg && map->format.format_val))
|
2017-11-01 10:11:55 +08:00
|
|
|
goto err_hwlock;
|
2011-05-12 01:59:58 +08:00
|
|
|
|
2012-01-18 18:52:25 +08:00
|
|
|
map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
|
2011-05-12 01:59:58 +08:00
|
|
|
if (map->work_buf == NULL) {
|
|
|
|
ret = -ENOMEM;
|
2017-11-01 10:11:55 +08:00
|
|
|
goto err_hwlock;
|
2011-05-12 01:59:58 +08:00
|
|
|
}
|
|
|
|
|
2013-01-28 02:49:05 +08:00
|
|
|
if (map->format.format_write) {
|
|
|
|
map->defer_caching = false;
|
2013-01-13 04:54:13 +08:00
|
|
|
map->reg_write = _regmap_bus_formatted_write;
|
2013-01-28 02:49:05 +08:00
|
|
|
} else if (map->format.format_val) {
|
|
|
|
map->defer_caching = true;
|
2013-01-13 04:54:13 +08:00
|
|
|
map->reg_write = _regmap_bus_raw_write;
|
2013-01-28 02:49:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
skip_format_initialization:
|
2013-01-13 04:54:13 +08:00
|
|
|
|
2012-06-15 18:23:56 +08:00
|
|
|
map->range_tree = RB_ROOT;
|
2012-10-03 03:17:15 +08:00
|
|
|
for (i = 0; i < config->num_ranges; i++) {
|
2012-06-15 18:23:56 +08:00
|
|
|
const struct regmap_range_cfg *range_cfg = &config->ranges[i];
|
|
|
|
struct regmap_range_node *new;
|
|
|
|
|
|
|
|
/* Sanity check */
|
2012-10-03 19:17:51 +08:00
|
|
|
if (range_cfg->range_max < range_cfg->range_min) {
|
|
|
|
dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
|
|
|
|
range_cfg->range_max, range_cfg->range_min);
|
2012-06-15 18:23:56 +08:00
|
|
|
goto err_range;
|
2012-10-03 19:17:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (range_cfg->range_max > map->max_register) {
|
|
|
|
dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
|
|
|
|
range_cfg->range_max, map->max_register);
|
|
|
|
goto err_range;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (range_cfg->selector_reg > map->max_register) {
|
|
|
|
dev_err(map->dev,
|
|
|
|
"Invalid range %d: selector out of map\n", i);
|
|
|
|
goto err_range;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (range_cfg->window_len == 0) {
|
|
|
|
dev_err(map->dev, "Invalid range %d: window_len 0\n",
|
|
|
|
i);
|
|
|
|
goto err_range;
|
|
|
|
}
|
2012-06-15 18:23:56 +08:00
|
|
|
|
|
|
|
/* Make sure, that this register range has no selector
|
|
|
|
or data window within its boundary */
|
2012-10-03 03:17:15 +08:00
|
|
|
for (j = 0; j < config->num_ranges; j++) {
|
2012-06-15 18:23:56 +08:00
|
|
|
unsigned sel_reg = config->ranges[j].selector_reg;
|
|
|
|
unsigned win_min = config->ranges[j].window_start;
|
|
|
|
unsigned win_max = win_min +
|
|
|
|
config->ranges[j].window_len - 1;
|
|
|
|
|
2013-07-23 18:16:02 +08:00
|
|
|
/* Allow data window inside its own virtual range */
|
|
|
|
if (j == i)
|
|
|
|
continue;
|
|
|
|
|
2012-06-15 18:23:56 +08:00
|
|
|
if (range_cfg->range_min <= sel_reg &&
|
|
|
|
sel_reg <= range_cfg->range_max) {
|
2012-10-03 19:17:51 +08:00
|
|
|
dev_err(map->dev,
|
|
|
|
"Range %d: selector for %d in window\n",
|
|
|
|
i, j);
|
2012-06-15 18:23:56 +08:00
|
|
|
goto err_range;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(win_max < range_cfg->range_min ||
|
|
|
|
win_min > range_cfg->range_max)) {
|
2012-10-03 19:17:51 +08:00
|
|
|
dev_err(map->dev,
|
|
|
|
"Range %d: window for %d in window\n",
|
|
|
|
i, j);
|
2012-06-15 18:23:56 +08:00
|
|
|
goto err_range;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
new = kzalloc(sizeof(*new), GFP_KERNEL);
|
|
|
|
if (new == NULL) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto err_range;
|
|
|
|
}
|
|
|
|
|
2012-10-03 20:13:16 +08:00
|
|
|
new->map = map;
|
2012-10-03 19:40:47 +08:00
|
|
|
new->name = range_cfg->name;
|
2012-06-15 18:23:56 +08:00
|
|
|
new->range_min = range_cfg->range_min;
|
|
|
|
new->range_max = range_cfg->range_max;
|
|
|
|
new->selector_reg = range_cfg->selector_reg;
|
|
|
|
new->selector_mask = range_cfg->selector_mask;
|
|
|
|
new->selector_shift = range_cfg->selector_shift;
|
|
|
|
new->window_start = range_cfg->window_start;
|
|
|
|
new->window_len = range_cfg->window_len;
|
|
|
|
|
2014-02-21 16:05:45 +08:00
|
|
|
if (!_regmap_range_add(map, new)) {
|
2012-10-03 19:17:51 +08:00
|
|
|
dev_err(map->dev, "Failed to add range %d\n", i);
|
2012-06-15 18:23:56 +08:00
|
|
|
kfree(new);
|
|
|
|
goto err_range;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (map->selector_work_buf == NULL) {
|
|
|
|
map->selector_work_buf =
|
|
|
|
kzalloc(map->format.buf_size, GFP_KERNEL);
|
|
|
|
if (map->selector_work_buf == NULL) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto err_range;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2011-11-22 03:05:13 +08:00
|
|
|
|
2011-11-16 23:28:16 +08:00
|
|
|
ret = regcache_init(map, config);
|
2012-10-05 00:39:13 +08:00
|
|
|
if (ret != 0)
|
2012-06-15 18:23:56 +08:00
|
|
|
goto err_range;
|
|
|
|
|
2014-04-01 18:46:43 +08:00
|
|
|
if (dev) {
|
2014-02-10 23:22:33 +08:00
|
|
|
ret = regmap_attach_dev(dev, map, config);
|
|
|
|
if (ret != 0)
|
|
|
|
goto err_regcache;
|
2018-02-20 05:43:02 +08:00
|
|
|
} else {
|
|
|
|
regmap_debugfs_init(map, config->name);
|
2014-04-01 18:46:43 +08:00
|
|
|
}
|
2012-05-09 00:44:40 +08:00
|
|
|
|
2011-05-12 01:59:58 +08:00
|
|
|
return map;
|
|
|
|
|
2014-02-10 23:22:33 +08:00
|
|
|
err_regcache:
|
2012-05-09 00:44:40 +08:00
|
|
|
regcache_exit(map);
|
2012-06-15 18:23:56 +08:00
|
|
|
err_range:
|
|
|
|
regmap_range_exit(map);
|
2011-11-11 01:15:15 +08:00
|
|
|
kfree(map->work_buf);
|
2017-11-01 10:11:55 +08:00
|
|
|
err_hwlock:
|
2017-11-20 15:27:28 +08:00
|
|
|
if (map->hwlock)
|
2017-11-04 03:50:20 +08:00
|
|
|
hwspin_lock_free(map->hwlock);
|
2017-12-14 00:25:31 +08:00
|
|
|
err_name:
|
|
|
|
kfree_const(map->name);
|
2011-05-12 01:59:58 +08:00
|
|
|
err_map:
|
|
|
|
kfree(map);
|
|
|
|
err:
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|
2015-07-08 14:30:18 +08:00
|
|
|
EXPORT_SYMBOL_GPL(__regmap_init);
|
2011-05-12 01:59:58 +08:00
|
|
|
|
2012-01-31 03:56:52 +08:00
|
|
|
static void devm_regmap_release(struct device *dev, void *res)
|
|
|
|
{
|
|
|
|
regmap_exit(*(struct regmap **)res);
|
|
|
|
}
|
|
|
|
|
2015-07-08 14:30:18 +08:00
|
|
|
struct regmap *__devm_regmap_init(struct device *dev,
|
|
|
|
const struct regmap_bus *bus,
|
|
|
|
void *bus_context,
|
|
|
|
const struct regmap_config *config,
|
|
|
|
struct lock_class_key *lock_key,
|
|
|
|
const char *lock_name)
|
2012-01-31 03:56:52 +08:00
|
|
|
{
|
|
|
|
struct regmap **ptr, *regmap;
|
|
|
|
|
|
|
|
ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
|
|
|
|
if (!ptr)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
2015-07-08 14:30:18 +08:00
|
|
|
regmap = __regmap_init(dev, bus, bus_context, config,
|
|
|
|
lock_key, lock_name);
|
2012-01-31 03:56:52 +08:00
|
|
|
if (!IS_ERR(regmap)) {
|
|
|
|
*ptr = regmap;
|
|
|
|
devres_add(dev, ptr);
|
|
|
|
} else {
|
|
|
|
devres_free(ptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
return regmap;
|
|
|
|
}
|
2015-07-08 14:30:18 +08:00
|
|
|
EXPORT_SYMBOL_GPL(__devm_regmap_init);
|
2012-01-31 03:56:52 +08:00
|
|
|
|
2013-06-11 20:18:15 +08:00
|
|
|
static void regmap_field_init(struct regmap_field *rm_field,
|
|
|
|
struct regmap *regmap, struct reg_field reg_field)
|
|
|
|
{
|
|
|
|
rm_field->regmap = regmap;
|
|
|
|
rm_field->reg = reg_field.reg;
|
|
|
|
rm_field->shift = reg_field.lsb;
|
2015-06-16 19:53:19 +08:00
|
|
|
rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb);
|
2013-09-02 11:30:50 +08:00
|
|
|
rm_field->id_size = reg_field.id_size;
|
|
|
|
rm_field->id_offset = reg_field.id_offset;
|
2013-06-11 20:18:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2017-01-12 19:17:39 +08:00
|
|
|
* devm_regmap_field_alloc() - Allocate and initialise a register field.
|
2013-06-11 20:18:15 +08:00
|
|
|
*
|
|
|
|
* @dev: Device that will be interacted with
|
|
|
|
* @regmap: regmap bank in which this register field is located.
|
|
|
|
* @reg_field: Register field with in the bank.
|
|
|
|
*
|
|
|
|
* The return value will be an ERR_PTR() on error or a valid pointer
|
|
|
|
* to a struct regmap_field. The regmap_field will be automatically freed
|
|
|
|
* by the device management code.
|
|
|
|
*/
|
|
|
|
struct regmap_field *devm_regmap_field_alloc(struct device *dev,
|
|
|
|
struct regmap *regmap, struct reg_field reg_field)
|
|
|
|
{
|
|
|
|
struct regmap_field *rm_field = devm_kzalloc(dev,
|
|
|
|
sizeof(*rm_field), GFP_KERNEL);
|
|
|
|
if (!rm_field)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
regmap_field_init(rm_field, regmap, reg_field);
|
|
|
|
|
|
|
|
return rm_field;
|
|
|
|
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
|
|
|
|
|
|
|
|
/**
|
2017-01-12 19:17:39 +08:00
|
|
|
* devm_regmap_field_free() - Free a register field allocated using
|
|
|
|
* devm_regmap_field_alloc.
|
2013-06-11 20:18:15 +08:00
|
|
|
*
|
|
|
|
* @dev: Device that will be interacted with
|
|
|
|
* @field: regmap field which should be freed.
|
2017-01-12 19:17:39 +08:00
|
|
|
*
|
|
|
|
* Free register field allocated using devm_regmap_field_alloc(). Usually
|
|
|
|
* drivers need not call this function, as the memory allocated via devm
|
|
|
|
* will be freed as per device-driver life-cyle.
|
2013-06-11 20:18:15 +08:00
|
|
|
*/
|
|
|
|
void devm_regmap_field_free(struct device *dev,
|
|
|
|
struct regmap_field *field)
|
|
|
|
{
|
|
|
|
devm_kfree(dev, field);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(devm_regmap_field_free);
|
|
|
|
|
|
|
|
/**
|
2017-01-12 19:17:39 +08:00
|
|
|
* regmap_field_alloc() - Allocate and initialise a register field.
|
2013-06-11 20:18:15 +08:00
|
|
|
*
|
|
|
|
* @regmap: regmap bank in which this register field is located.
|
|
|
|
* @reg_field: Register field with in the bank.
|
|
|
|
*
|
|
|
|
* The return value will be an ERR_PTR() on error or a valid pointer
|
|
|
|
* to a struct regmap_field. The regmap_field should be freed by the
|
|
|
|
* user once its finished working with it using regmap_field_free().
|
|
|
|
*/
|
|
|
|
struct regmap_field *regmap_field_alloc(struct regmap *regmap,
|
|
|
|
struct reg_field reg_field)
|
|
|
|
{
|
|
|
|
struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL);
|
|
|
|
|
|
|
|
if (!rm_field)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
regmap_field_init(rm_field, regmap, reg_field);
|
|
|
|
|
|
|
|
return rm_field;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(regmap_field_alloc);
|
|
|
|
|
|
|
|
/**
|
2017-01-12 19:17:39 +08:00
|
|
|
* regmap_field_free() - Free register field allocated using
|
|
|
|
* regmap_field_alloc.
|
2013-06-11 20:18:15 +08:00
|
|
|
*
|
|
|
|
* @field: regmap field which should be freed.
|
|
|
|
*/
|
|
|
|
void regmap_field_free(struct regmap_field *field)
|
|
|
|
{
|
|
|
|
kfree(field);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(regmap_field_free);
|
|
|
|
|
2011-12-04 01:06:20 +08:00
|
|
|
/**
|
2017-01-12 19:17:39 +08:00
|
|
|
* regmap_reinit_cache() - Reinitialise the current register cache
|
2011-12-04 01:06:20 +08:00
|
|
|
*
|
|
|
|
* @map: Register map to operate on.
|
|
|
|
* @config: New configuration. Only the cache data will be used.
|
|
|
|
*
|
|
|
|
* Discard any existing register cache for the map and initialize a
|
|
|
|
* new cache. This can be used to restore the cache to defaults or to
|
|
|
|
* update the cache configuration to reflect runtime discovery of the
|
|
|
|
* hardware.
|
2012-07-27 21:54:15 +08:00
|
|
|
*
|
|
|
|
* No explicit locking is done here, the user needs to ensure that
|
|
|
|
* this function will not race with other calls to regmap.
|
2011-12-04 01:06:20 +08:00
|
|
|
*/
|
|
|
|
int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
|
|
|
|
{
|
|
|
|
regcache_exit(map);
|
2012-01-27 02:30:16 +08:00
|
|
|
regmap_debugfs_exit(map);
|
2011-12-04 01:06:20 +08:00
|
|
|
|
|
|
|
map->max_register = config->max_register;
|
|
|
|
map->writeable_reg = config->writeable_reg;
|
|
|
|
map->readable_reg = config->readable_reg;
|
|
|
|
map->volatile_reg = config->volatile_reg;
|
|
|
|
map->precious_reg = config->precious_reg;
|
2018-10-19 17:33:50 +08:00
|
|
|
map->writeable_noinc_reg = config->writeable_noinc_reg;
|
2018-08-07 22:52:17 +08:00
|
|
|
map->readable_noinc_reg = config->readable_noinc_reg;
|
2011-12-04 01:06:20 +08:00
|
|
|
map->cache_type = config->cache_type;
|
|
|
|
|
2012-04-05 05:48:29 +08:00
|
|
|
regmap_debugfs_init(map, config->name);
|
2012-01-27 02:30:16 +08:00
|
|
|
|
2012-01-20 21:39:37 +08:00
|
|
|
map->cache_bypass = false;
|
|
|
|
map->cache_only = false;
|
|
|
|
|
2012-07-27 21:54:15 +08:00
|
|
|
return regcache_init(map, config);
|
2011-12-04 01:06:20 +08:00
|
|
|
}
|
2012-05-14 17:00:12 +08:00
|
|
|
EXPORT_SYMBOL_GPL(regmap_reinit_cache);
|
2011-12-04 01:06:20 +08:00
|
|
|
|
2011-05-12 01:59:58 +08:00
|
|
|
/**
|
2017-01-12 19:17:39 +08:00
|
|
|
* regmap_exit() - Free a previously allocated register map
|
|
|
|
*
|
|
|
|
* @map: Register map to operate on.
|
2011-05-12 01:59:58 +08:00
|
|
|
*/
|
|
|
|
void regmap_exit(struct regmap *map)
|
|
|
|
{
|
2013-10-08 06:00:24 +08:00
|
|
|
struct regmap_async *async;
|
|
|
|
|
2011-09-19 21:34:05 +08:00
|
|
|
regcache_exit(map);
|
2011-07-21 05:56:53 +08:00
|
|
|
regmap_debugfs_exit(map);
|
2012-06-15 18:23:56 +08:00
|
|
|
regmap_range_exit(map);
|
2013-01-28 02:49:05 +08:00
|
|
|
if (map->bus && map->bus->free_context)
|
2012-04-05 05:48:30 +08:00
|
|
|
map->bus->free_context(map->bus_context);
|
2011-05-12 01:59:58 +08:00
|
|
|
kfree(map->work_buf);
|
2013-10-08 06:00:24 +08:00
|
|
|
while (!list_empty(&map->async_free)) {
|
|
|
|
async = list_first_entry_or_null(&map->async_free,
|
|
|
|
struct regmap_async,
|
|
|
|
list);
|
|
|
|
list_del(&async->list);
|
|
|
|
kfree(async->work_buf);
|
|
|
|
kfree(async);
|
|
|
|
}
|
2017-11-20 15:27:28 +08:00
|
|
|
if (map->hwlock)
|
2017-11-04 03:53:56 +08:00
|
|
|
hwspin_lock_free(map->hwlock);
|
2017-12-14 00:25:31 +08:00
|
|
|
kfree_const(map->name);
|
2020-06-17 23:21:29 +08:00
|
|
|
kfree(map->patch);
|
2011-05-12 01:59:58 +08:00
|
|
|
kfree(map);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(regmap_exit);
|
|
|
|
|
2012-05-09 00:44:40 +08:00
|
|
|
static int dev_get_regmap_match(struct device *dev, void *res, void *data)
|
|
|
|
{
|
|
|
|
struct regmap **r = res;
|
|
|
|
if (!r || !*r) {
|
|
|
|
WARN_ON(!r || !*r);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If the user didn't specify a name match any */
|
|
|
|
if (data)
|
2020-07-03 18:33:15 +08:00
|
|
|
return !strcmp((*r)->name, data);
|
2012-05-09 00:44:40 +08:00
|
|
|
else
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2017-01-12 19:17:39 +08:00
|
|
|
* dev_get_regmap() - Obtain the regmap (if any) for a device
|
2012-05-09 00:44:40 +08:00
|
|
|
*
|
|
|
|
* @dev: Device to retrieve the map for
|
|
|
|
* @name: Optional name for the register map, usually NULL.
|
|
|
|
*
|
|
|
|
* Returns the regmap for the device if one is present, or NULL. If
|
|
|
|
* name is specified then it must match the name specified when
|
|
|
|
* registering the device, if it is NULL then the first regmap found
|
|
|
|
* will be used. Devices with multiple register maps are very rare,
|
|
|
|
* generic code should normally not need to specify a name.
|
|
|
|
*/
|
|
|
|
struct regmap *dev_get_regmap(struct device *dev, const char *name)
|
|
|
|
{
|
|
|
|
struct regmap **r = devres_find(dev, dev_get_regmap_release,
|
|
|
|
dev_get_regmap_match, (void *)name);
|
|
|
|
|
|
|
|
if (!r)
|
|
|
|
return NULL;
|
|
|
|
return *r;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dev_get_regmap);
|
|
|
|
|
2014-07-21 23:38:47 +08:00
|
|
|
/**
|
2017-01-12 19:17:39 +08:00
|
|
|
* regmap_get_device() - Obtain the device from a regmap
|
2014-07-21 23:38:47 +08:00
|
|
|
*
|
|
|
|
* @map: Register map to operate on.
|
|
|
|
*
|
|
|
|
* Returns the underlying device that the regmap has been created for.
|
|
|
|
*/
|
|
|
|
struct device *regmap_get_device(struct regmap *map)
|
|
|
|
{
|
|
|
|
return map->dev;
|
|
|
|
}
|
2014-07-26 01:30:31 +08:00
|
|
|
EXPORT_SYMBOL_GPL(regmap_get_device);
|
2014-07-21 23:38:47 +08:00
|
|
|
|
2012-06-15 18:23:56 +08:00
|
|
|
static int _regmap_select_page(struct regmap *map, unsigned int *reg,
|
2012-10-05 00:31:11 +08:00
|
|
|
struct regmap_range_node *range,
|
2012-06-15 18:23:56 +08:00
|
|
|
unsigned int val_num)
|
|
|
|
{
|
|
|
|
void *orig_work_buf;
|
|
|
|
unsigned int win_offset;
|
|
|
|
unsigned int win_page;
|
|
|
|
bool page_chg;
|
|
|
|
int ret;
|
|
|
|
|
2012-10-05 00:31:11 +08:00
|
|
|
win_offset = (*reg - range->range_min) % range->window_len;
|
|
|
|
win_page = (*reg - range->range_min) / range->window_len;
|
2012-06-15 18:23:56 +08:00
|
|
|
|
2012-10-05 00:31:11 +08:00
|
|
|
if (val_num > 1) {
|
|
|
|
/* Bulk write shouldn't cross range boundary */
|
|
|
|
if (*reg + val_num - 1 > range->range_max)
|
|
|
|
return -EINVAL;
|
2012-06-15 18:23:56 +08:00
|
|
|
|
2012-10-05 00:31:11 +08:00
|
|
|
/* ... or single page boundary */
|
|
|
|
if (val_num > range->window_len - win_offset)
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2012-06-15 18:23:56 +08:00
|
|
|
|
2012-10-05 00:31:11 +08:00
|
|
|
/* It is possible to have selector register inside data window.
|
|
|
|
In that case, selector register is located on every page and
|
|
|
|
it needs no page switching, when accessed alone. */
|
|
|
|
if (val_num > 1 ||
|
|
|
|
range->window_start + win_offset != range->selector_reg) {
|
|
|
|
/* Use separate work_buf during page switching */
|
|
|
|
orig_work_buf = map->work_buf;
|
|
|
|
map->work_buf = map->selector_work_buf;
|
2012-06-15 18:23:56 +08:00
|
|
|
|
2012-10-05 00:31:11 +08:00
|
|
|
ret = _regmap_update_bits(map, range->selector_reg,
|
|
|
|
range->selector_mask,
|
|
|
|
win_page << range->selector_shift,
|
2015-06-16 16:52:22 +08:00
|
|
|
&page_chg, false);
|
2012-06-18 20:04:29 +08:00
|
|
|
|
2012-10-05 00:31:11 +08:00
|
|
|
map->work_buf = orig_work_buf;
|
2012-06-15 18:23:56 +08:00
|
|
|
|
2012-10-05 00:39:13 +08:00
|
|
|
if (ret != 0)
|
2012-10-05 00:31:11 +08:00
|
|
|
return ret;
|
2012-06-15 18:23:56 +08:00
|
|
|
}
|
|
|
|
|
2012-10-05 00:31:11 +08:00
|
|
|
*reg = range->window_start + win_offset;
|
|
|
|
|
2012-06-15 18:23:56 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-09-16 04:56:10 +08:00
|
|
|
static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
|
|
|
|
unsigned long mask)
|
|
|
|
{
|
|
|
|
u8 *buf;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!mask || !map->work_buf)
|
|
|
|
return;
|
|
|
|
|
|
|
|
buf = map->work_buf;
|
|
|
|
|
|
|
|
for (i = 0; i < max_bytes; i++)
|
|
|
|
buf[i] |= (mask >> (8 * i)) & 0xff;
|
|
|
|
}
|
|
|
|
|
2018-02-22 20:59:12 +08:00
|
|
|
static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
|
|
|
|
const void *val, size_t val_len)
|
2011-05-12 01:59:58 +08:00
|
|
|
{
|
2012-10-05 00:31:11 +08:00
|
|
|
struct regmap_range_node *range;
|
2013-01-27 22:07:38 +08:00
|
|
|
unsigned long flags;
|
|
|
|
void *work_val = map->work_buf + map->format.reg_bytes +
|
|
|
|
map->format.pad_bytes;
|
2011-05-12 01:59:58 +08:00
|
|
|
void *buf;
|
|
|
|
int ret = -ENOTSUPP;
|
|
|
|
size_t len;
|
2011-07-24 18:46:20 +08:00
|
|
|
int i;
|
|
|
|
|
2013-03-14 03:18:13 +08:00
|
|
|
WARN_ON(!map->bus);
|
2013-01-28 02:49:05 +08:00
|
|
|
|
2020-01-19 04:56:24 +08:00
|
|
|
/* Check for unwritable or noinc registers in range
|
|
|
|
* before we start
|
|
|
|
*/
|
|
|
|
if (!regmap_writeable_noinc(map, reg)) {
|
|
|
|
for (i = 0; i < val_len / map->format.val_bytes; i++) {
|
|
|
|
unsigned int element =
|
|
|
|
reg + regmap_get_offset(map, i);
|
|
|
|
if (!regmap_writeable(map, element) ||
|
|
|
|
regmap_writeable_noinc(map, element))
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
2011-05-12 01:59:58 +08:00
|
|
|
|
2012-02-11 00:00:27 +08:00
|
|
|
if (!map->cache_bypass && map->format.parse_val) {
|
|
|
|
unsigned int ival;
|
|
|
|
int val_bytes = map->format.val_bytes;
|
|
|
|
for (i = 0; i < val_len / val_bytes; i++) {
|
regmap: don't corrupt work buffer in _regmap_raw_write()
_regmap_raw_write() contains code to call regcache_write() to write
values to the cache. That code calls memcpy() to copy the value data to
the start of the work_buf. However, at least when _regmap_raw_write() is
called from _regmap_bus_raw_write(), the value data is in the work_buf,
and this memcpy() operation may over-write part of that value data,
depending on the value of reg_bytes + pad_bytes. At least when using
reg_bytes==1 and pad_bytes==0, corruption of the value data does occur.
To solve this, remove the memcpy() operation, and modify the subsequent
.parse_val() call to parse the original value buffer directly.
At least in the case of 8-bit register address and 16-bit values, and
writes of single registers at a time, this memcpy-then-parse combination
used to cancel each-other out; for a work-buffer containing xx 89 03,
the memcpy changed it to 89 03 03, and the parse_val changed it back to
89 89 03, thus leaving the value uncorrupted. This appears completely
accidental though. Since commit 8a819ff "regmap: core: Split out in
place value parsing", .parse_val only returns the parsed value, and does
not modify the buffer, and hence does not (accidentally) undo the
corruption caused by memcpy(). This caused bogus values to get written
to HW, thus preventing e.g. audio playback on systems with a WM8903
CODEC. This patch fixes that.
Signed-off-by: Stephen Warren <swarren@nvidia.com>
Signed-off-by: Mark Brown <broonie@opensource.wolfsonmicro.com>
2013-03-21 07:02:02 +08:00
|
|
|
ival = map->format.parse_val(val + (i * val_bytes));
|
2016-01-04 18:00:33 +08:00
|
|
|
ret = regcache_write(map,
|
|
|
|
reg + regmap_get_offset(map, i),
|
2012-04-10 03:40:24 +08:00
|
|
|
ival);
|
2012-02-11 00:00:27 +08:00
|
|
|
if (ret) {
|
|
|
|
dev_err(map->dev,
|
2012-10-27 02:05:32 +08:00
|
|
|
"Error in caching of register: %x ret: %d\n",
|
2012-02-11 00:00:27 +08:00
|
|
|
reg + i, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (map->cache_only) {
|
|
|
|
map->cache_dirty = true;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-05 00:31:11 +08:00
|
|
|
range = _regmap_range_lookup(map, reg);
|
|
|
|
if (range) {
|
2012-10-05 01:20:18 +08:00
|
|
|
int val_num = val_len / map->format.val_bytes;
|
|
|
|
int win_offset = (reg - range->range_min) % range->window_len;
|
|
|
|
int win_residue = range->window_len - win_offset;
|
|
|
|
|
|
|
|
/* If the write goes beyond the end of the window split it */
|
|
|
|
while (val_num > win_residue) {
|
2012-10-26 00:07:18 +08:00
|
|
|
dev_dbg(map->dev, "Writing window %d/%zu\n",
|
2012-10-05 01:20:18 +08:00
|
|
|
win_residue, val_len / map->format.val_bytes);
|
2018-02-22 20:59:12 +08:00
|
|
|
ret = _regmap_raw_write_impl(map, reg, val,
|
|
|
|
win_residue *
|
|
|
|
map->format.val_bytes);
|
2012-10-05 01:20:18 +08:00
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
reg += win_residue;
|
|
|
|
val_num -= win_residue;
|
|
|
|
val += win_residue * map->format.val_bytes;
|
|
|
|
val_len -= win_residue * map->format.val_bytes;
|
|
|
|
|
|
|
|
win_offset = (reg - range->range_min) %
|
|
|
|
range->window_len;
|
|
|
|
win_residue = range->window_len - win_offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = _regmap_select_page(map, ®, range, val_num);
|
2012-10-05 00:39:13 +08:00
|
|
|
if (ret != 0)
|
2012-10-05 00:31:11 +08:00
|
|
|
return ret;
|
|
|
|
}
|
2012-06-15 18:23:56 +08:00
|
|
|
|
2012-03-16 09:11:43 +08:00
|
|
|
map->format.format_reg(map->work_buf, reg, map->reg_shift);
|
2016-09-16 04:56:10 +08:00
|
|
|
regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
|
|
|
|
map->write_flag_mask);
|
2011-09-06 02:46:32 +08:00
|
|
|
|
2013-10-09 01:37:36 +08:00
|
|
|
/*
|
|
|
|
* Essentially all I/O mechanisms will be faster with a single
|
|
|
|
* buffer to write. Since register syncs often generate raw
|
|
|
|
* writes of single registers optimise that case.
|
|
|
|
*/
|
|
|
|
if (val != work_val && val_len == map->format.val_bytes) {
|
|
|
|
memcpy(work_val, val, map->format.val_bytes);
|
|
|
|
val = work_val;
|
|
|
|
}
|
|
|
|
|
2013-10-09 19:28:52 +08:00
|
|
|
if (map->async && map->bus->async_write) {
|
2013-10-08 06:00:24 +08:00
|
|
|
struct regmap_async *async;
|
2013-01-27 22:07:38 +08:00
|
|
|
|
2015-03-09 19:20:13 +08:00
|
|
|
trace_regmap_async_write_start(map, reg, val_len);
|
2013-02-22 03:05:48 +08:00
|
|
|
|
2013-10-08 06:00:24 +08:00
|
|
|
spin_lock_irqsave(&map->async_lock, flags);
|
|
|
|
async = list_first_entry_or_null(&map->async_free,
|
|
|
|
struct regmap_async,
|
|
|
|
list);
|
|
|
|
if (async)
|
|
|
|
list_del(&async->list);
|
|
|
|
spin_unlock_irqrestore(&map->async_lock, flags);
|
|
|
|
|
|
|
|
if (!async) {
|
|
|
|
async = map->bus->async_alloc();
|
|
|
|
if (!async)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
async->work_buf = kzalloc(map->format.buf_size,
|
|
|
|
GFP_KERNEL | GFP_DMA);
|
|
|
|
if (!async->work_buf) {
|
|
|
|
kfree(async);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2013-01-27 22:07:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
async->map = map;
|
|
|
|
|
|
|
|
/* If the caller supplied the value we can use it safely. */
|
|
|
|
memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
|
|
|
|
map->format.reg_bytes + map->format.val_bytes);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&map->async_lock, flags);
|
|
|
|
list_add_tail(&async->list, &map->async_list);
|
|
|
|
spin_unlock_irqrestore(&map->async_lock, flags);
|
|
|
|
|
2013-10-11 05:38:29 +08:00
|
|
|
if (val != work_val)
|
|
|
|
ret = map->bus->async_write(map->bus_context,
|
|
|
|
async->work_buf,
|
|
|
|
map->format.reg_bytes +
|
|
|
|
map->format.pad_bytes,
|
|
|
|
val, val_len, async);
|
|
|
|
else
|
|
|
|
ret = map->bus->async_write(map->bus_context,
|
|
|
|
async->work_buf,
|
|
|
|
map->format.reg_bytes +
|
|
|
|
map->format.pad_bytes +
|
|
|
|
val_len, NULL, 0, async);
|
2013-01-27 22:07:38 +08:00
|
|
|
|
|
|
|
if (ret != 0) {
|
|
|
|
dev_err(map->dev, "Failed to schedule write: %d\n",
|
|
|
|
ret);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&map->async_lock, flags);
|
2013-10-08 06:00:24 +08:00
|
|
|
list_move(&async->list, &map->async_free);
|
2013-01-27 22:07:38 +08:00
|
|
|
spin_unlock_irqrestore(&map->async_lock, flags);
|
|
|
|
}
|
2013-03-27 21:08:44 +08:00
|
|
|
|
|
|
|
return ret;
|
2013-01-27 22:07:38 +08:00
|
|
|
}
|
|
|
|
|
2015-03-09 19:20:13 +08:00
|
|
|
trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
|
2011-07-25 04:30:55 +08:00
|
|
|
|
2011-07-21 04:47:22 +08:00
|
|
|
/* If we're doing a single register write we can probably just
|
|
|
|
* send the work_buf directly, otherwise try to do a gather
|
|
|
|
* write.
|
|
|
|
*/
|
2013-01-27 22:07:38 +08:00
|
|
|
if (val == work_val)
|
2012-04-05 05:48:30 +08:00
|
|
|
ret = map->bus->write(map->bus_context, map->work_buf,
|
2012-01-18 18:52:25 +08:00
|
|
|
map->format.reg_bytes +
|
|
|
|
map->format.pad_bytes +
|
|
|
|
val_len);
|
2011-07-21 04:47:22 +08:00
|
|
|
else if (map->bus->gather_write)
|
2012-04-05 05:48:30 +08:00
|
|
|
ret = map->bus->gather_write(map->bus_context, map->work_buf,
|
2012-01-18 18:52:25 +08:00
|
|
|
map->format.reg_bytes +
|
|
|
|
map->format.pad_bytes,
|
2011-05-12 01:59:58 +08:00
|
|
|
val, val_len);
|
2019-06-12 19:03:43 +08:00
|
|
|
else
|
|
|
|
ret = -ENOTSUPP;
|
2011-05-12 01:59:58 +08:00
|
|
|
|
2011-07-21 04:47:22 +08:00
|
|
|
/* If that didn't work fall back on linearising by hand. */
|
2011-05-12 01:59:58 +08:00
|
|
|
if (ret == -ENOTSUPP) {
|
2012-01-18 18:52:25 +08:00
|
|
|
len = map->format.reg_bytes + map->format.pad_bytes + val_len;
|
|
|
|
buf = kzalloc(len, GFP_KERNEL);
|
2011-05-12 01:59:58 +08:00
|
|
|
if (!buf)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
memcpy(buf, map->work_buf, map->format.reg_bytes);
|
2012-01-18 18:52:25 +08:00
|
|
|
memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
|
|
|
|
val, val_len);
|
2012-04-05 05:48:30 +08:00
|
|
|
ret = map->bus->write(map->bus_context, buf, len);
|
2011-05-12 01:59:58 +08:00
|
|
|
|
|
|
|
kfree(buf);
|
2016-08-18 17:01:55 +08:00
|
|
|
} else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
|
2016-09-22 17:02:25 +08:00
|
|
|
/* regcache_drop_region() takes lock that we already have,
|
|
|
|
* thus call map->cache_ops->drop() directly
|
|
|
|
*/
|
|
|
|
if (map->cache_ops && map->cache_ops->drop)
|
|
|
|
map->cache_ops->drop(map, reg, reg + 1);
|
2011-05-12 01:59:58 +08:00
|
|
|
}
|
|
|
|
|
2015-03-09 19:20:13 +08:00
|
|
|
trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
|
2011-07-25 04:30:55 +08:00
|
|
|
|
2011-05-12 01:59:58 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-03-27 05:24:20 +08:00
|
|
|
/**
|
|
|
|
* regmap_can_raw_write - Test if regmap_raw_write() is supported
|
|
|
|
*
|
|
|
|
* @map: Map to check.
|
|
|
|
*/
|
|
|
|
bool regmap_can_raw_write(struct regmap *map)
|
|
|
|
{
|
2015-08-12 18:12:33 +08:00
|
|
|
return map->bus && map->bus->write && map->format.format_val &&
|
|
|
|
map->format.format_reg;
|
2013-03-27 05:24:20 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(regmap_can_raw_write);
|
|
|
|
|
2015-08-30 15:33:54 +08:00
|
|
|
/**
|
|
|
|
* regmap_get_raw_read_max - Get the maximum size we can read
|
|
|
|
*
|
|
|
|
* @map: Map to check.
|
|
|
|
*/
|
|
|
|
size_t regmap_get_raw_read_max(struct regmap *map)
|
|
|
|
{
|
|
|
|
return map->max_raw_read;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(regmap_get_raw_read_max);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* regmap_get_raw_write_max - Get the maximum size we can read
|
|
|
|
*
|
|
|
|
* @map: Map to check.
|
|
|
|
*/
|
|
|
|
size_t regmap_get_raw_write_max(struct regmap *map)
|
|
|
|
{
|
|
|
|
return map->max_raw_write;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(regmap_get_raw_write_max);
|
|
|
|
|
2013-01-13 04:54:13 +08:00
|
|
|
static int _regmap_bus_formatted_write(void *context, unsigned int reg,
|
|
|
|
unsigned int val)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct regmap_range_node *range;
|
|
|
|
struct regmap *map = context;
|
|
|
|
|
2013-03-14 03:18:13 +08:00
|
|
|
WARN_ON(!map->bus || !map->format.format_write);
|
2013-01-13 04:54:13 +08:00
|
|
|
|
|
|
|
range = _regmap_range_lookup(map, reg);
|
|
|
|
if (range) {
|
|
|
|
ret = _regmap_select_page(map, ®, range, 1);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
map->format.format_write(map, reg, val);
|
|
|
|
|
2015-03-09 19:20:13 +08:00
|
|
|
trace_regmap_hw_write_start(map, reg, 1);
|
2013-01-13 04:54:13 +08:00
|
|
|
|
|
|
|
ret = map->bus->write(map->bus_context, map->work_buf,
|
|
|
|
map->format.buf_size);
|
|
|
|
|
2015-03-09 19:20:13 +08:00
|
|
|
trace_regmap_hw_write_done(map, reg, 1);
|
2013-01-13 04:54:13 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-04-17 17:40:11 +08:00
|
|
|
static int _regmap_bus_reg_write(void *context, unsigned int reg,
|
|
|
|
unsigned int val)
|
|
|
|
{
|
|
|
|
struct regmap *map = context;
|
|
|
|
|
|
|
|
return map->bus->reg_write(map->bus_context, reg, val);
|
|
|
|
}
|
|
|
|
|
2013-01-13 04:54:13 +08:00
|
|
|
static int _regmap_bus_raw_write(void *context, unsigned int reg,
|
|
|
|
unsigned int val)
|
|
|
|
{
|
|
|
|
struct regmap *map = context;
|
|
|
|
|
2013-03-14 03:18:13 +08:00
|
|
|
WARN_ON(!map->bus || !map->format.format_val);
|
2013-01-13 04:54:13 +08:00
|
|
|
|
|
|
|
map->format.format_val(map->work_buf + map->format.reg_bytes
|
|
|
|
+ map->format.pad_bytes, val, 0);
|
2018-02-22 20:59:12 +08:00
|
|
|
return _regmap_raw_write_impl(map, reg,
|
|
|
|
map->work_buf +
|
|
|
|
map->format.reg_bytes +
|
|
|
|
map->format.pad_bytes,
|
|
|
|
map->format.val_bytes);
|
2013-01-13 04:54:13 +08:00
|
|
|
}
|
|
|
|
|
2013-01-28 02:49:05 +08:00
|
|
|
static inline void *_regmap_map_get_context(struct regmap *map)
|
|
|
|
{
|
|
|
|
return (map->bus) ? map : map->bus_context;
|
|
|
|
}
|
|
|
|
|
2011-09-29 17:39:07 +08:00
|
|
|
int _regmap_write(struct regmap *map, unsigned int reg,
|
|
|
|
unsigned int val)
|
2011-05-12 01:59:58 +08:00
|
|
|
{
|
2011-07-25 04:30:55 +08:00
|
|
|
int ret;
|
2013-01-28 02:49:05 +08:00
|
|
|
void *context = _regmap_map_get_context(map);
|
2011-05-12 01:59:58 +08:00
|
|
|
|
2013-08-09 18:09:20 +08:00
|
|
|
if (!regmap_writeable(map, reg))
|
|
|
|
return -EIO;
|
|
|
|
|
2013-01-28 02:49:05 +08:00
|
|
|
if (!map->cache_bypass && !map->defer_caching) {
|
2011-09-19 21:34:05 +08:00
|
|
|
ret = regcache_write(map, reg, val);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
2011-10-26 16:34:22 +08:00
|
|
|
if (map->cache_only) {
|
|
|
|
map->cache_dirty = true;
|
2011-09-19 21:34:05 +08:00
|
|
|
return 0;
|
2011-10-26 16:34:22 +08:00
|
|
|
}
|
2011-09-19 21:34:05 +08:00
|
|
|
}
|
|
|
|
|
2018-10-02 18:42:05 +08:00
|
|
|
if (regmap_should_log(map))
|
2012-07-06 21:10:23 +08:00
|
|
|
dev_info(map->dev, "%x <= %x\n", reg, val);
|
|
|
|
|
2015-03-09 19:20:13 +08:00
|
|
|
trace_regmap_reg_write(map, reg, val);
|
2011-07-25 04:30:55 +08:00
|
|
|
|
2013-01-28 02:49:05 +08:00
|
|
|
return map->reg_write(context, reg, val);
|
2011-05-12 01:59:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2017-01-12 19:17:39 +08:00
|
|
|
* regmap_write() - Write a value to a single register
|
2011-05-12 01:59:58 +08:00
|
|
|
*
|
|
|
|
* @map: Register map to write to
|
|
|
|
* @reg: Register to write to
|
|
|
|
* @val: Value to be written
|
|
|
|
*
|
|
|
|
* A value of zero will be returned on success, a negative errno will
|
|
|
|
* be returned in error cases.
|
|
|
|
*/
|
|
|
|
int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2015-12-16 17:45:32 +08:00
|
|
|
if (!IS_ALIGNED(reg, map->reg_stride))
|
2012-04-10 03:40:24 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2012-10-16 21:56:59 +08:00
|
|
|
map->lock(map->lock_arg);
|
2011-05-12 01:59:58 +08:00
|
|
|
|
|
|
|
ret = _regmap_write(map, reg, val);
|
|
|
|
|
2012-10-16 21:56:59 +08:00
|
|
|
map->unlock(map->lock_arg);
|
2011-05-12 01:59:58 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(regmap_write);
|
|
|
|
|
2013-10-09 20:30:10 +08:00
|
|
|
/**
|
2017-01-12 19:17:39 +08:00
|
|
|
* regmap_write_async() - Write a value to a single register asynchronously
|
2013-10-09 20:30:10 +08:00
|
|
|
*
|
|
|
|
* @map: Register map to write to
|
|
|
|
* @reg: Register to write to
|
|
|
|
* @val: Value to be written
|
|
|
|
*
|
|
|
|
* A value of zero will be returned on success, a negative errno will
|
|
|
|
* be returned in error cases.
|
|
|
|
*/
|
|
|
|
int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2015-12-16 17:45:32 +08:00
|
|
|
if (!IS_ALIGNED(reg, map->reg_stride))
|
2013-10-09 20:30:10 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
map->lock(map->lock_arg);
|
|
|
|
|
|
|
|
map->async = true;
|
|
|
|
|
|
|
|
ret = _regmap_write(map, reg, val);
|
|
|
|
|
|
|
|
map->async = false;
|
|
|
|
|
|
|
|
map->unlock(map->lock_arg);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(regmap_write_async);
|
|
|
|
|
2018-02-22 20:59:12 +08:00
|
|
|
int _regmap_raw_write(struct regmap *map, unsigned int reg,
|
|
|
|
const void *val, size_t val_len)
|
|
|
|
{
|
|
|
|
size_t val_bytes = map->format.val_bytes;
|
|
|
|
size_t val_count = val_len / val_bytes;
|
2018-02-22 20:59:13 +08:00
|
|
|
size_t chunk_count, chunk_bytes;
|
|
|
|
size_t chunk_regs = val_count;
|
2018-02-22 20:59:12 +08:00
|
|
|
int ret, i;
|
|
|
|
|
|
|
|
if (!val_count)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2018-02-22 20:59:13 +08:00
|
|
|
if (map->use_single_write)
|
|
|
|
chunk_regs = 1;
|
|
|
|
else if (map->max_raw_write && val_len > map->max_raw_write)
|
|
|
|
chunk_regs = map->max_raw_write / val_bytes;
|
|
|
|
|
|
|
|
chunk_count = val_count / chunk_regs;
|
|
|
|
chunk_bytes = chunk_regs * val_bytes;
|
2018-02-22 20:59:12 +08:00
|
|
|
|
|
|
|
/* Write as many bytes as possible with chunk_size */
|
|
|
|
for (i = 0; i < chunk_count; i++) {
|
2018-02-22 20:59:13 +08:00
|
|
|
ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes);
|
2018-02-22 20:59:12 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2018-02-22 20:59:13 +08:00
|
|
|
|
|
|
|
reg += regmap_get_offset(map, chunk_regs);
|
|
|
|
val += chunk_bytes;
|
|
|
|
val_len -= chunk_bytes;
|
2018-02-22 20:59:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Write remaining bytes */
|
2018-02-22 20:59:13 +08:00
|
|
|
if (val_len)
|
|
|
|
ret = _regmap_raw_write_impl(map, reg, val, val_len);
|
2018-02-22 20:59:12 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-05-12 01:59:58 +08:00
|
|
|
/**
|
2017-01-12 19:17:39 +08:00
|
|
|
* regmap_raw_write() - Write raw values to one or more registers
|
2011-05-12 01:59:58 +08:00
|
|
|
*
|
|
|
|
* @map: Register map to write to
|
|
|
|
* @reg: Initial register to write to
|
|
|
|
* @val: Block of data to be written, laid out for direct transmission to the
|
|
|
|
* device
|
|
|
|
* @val_len: Length of data pointed to by val.
|
|
|
|
*
|
|
|
|
* This function is intended to be used for things like firmware
|
|
|
|
* download where a large block of data needs to be transferred to the
|
|
|
|
* device. No formatting will be done on the data provided.
|
|
|
|
*
|
|
|
|
* A value of zero will be returned on success, a negative errno will
|
|
|
|
* be returned in error cases.
|
|
|
|
*/
|
|
|
|
int regmap_raw_write(struct regmap *map, unsigned int reg,
|
|
|
|
const void *val, size_t val_len)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2013-03-27 05:24:20 +08:00
|
|
|
if (!regmap_can_raw_write(map))
|
2013-01-28 02:49:05 +08:00
|
|
|
return -EINVAL;
|
2012-04-07 05:16:03 +08:00
|
|
|
if (val_len % map->format.val_bytes)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2012-10-16 21:56:59 +08:00
|
|
|
map->lock(map->lock_arg);
|
2011-05-12 01:59:58 +08:00
|
|
|
|
2013-10-09 19:28:52 +08:00
|
|
|
ret = _regmap_raw_write(map, reg, val, val_len);
|
2011-05-12 01:59:58 +08:00
|
|
|
|
2012-10-16 21:56:59 +08:00
|
|
|
map->unlock(map->lock_arg);
|
2011-05-12 01:59:58 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(regmap_raw_write);
|
|
|
|
|
2018-10-19 17:33:50 +08:00
|
|
|
/**
|
|
|
|
* regmap_noinc_write(): Write data from a register without incrementing the
|
|
|
|
* register number
|
|
|
|
*
|
|
|
|
* @map: Register map to write to
|
|
|
|
* @reg: Register to write to
|
|
|
|
* @val: Pointer to data buffer
|
|
|
|
* @val_len: Length of output buffer in bytes.
|
|
|
|
*
|
|
|
|
* The regmap API usually assumes that bulk bus write operations will write a
|
|
|
|
* range of registers. Some devices have certain registers for which a write
|
|
|
|
* operation can write to an internal FIFO.
|
|
|
|
*
|
|
|
|
* The target register must be volatile but registers after it can be
|
|
|
|
* completely unrelated cacheable registers.
|
|
|
|
*
|
|
|
|
* This will attempt multiple writes as required to write val_len bytes.
|
|
|
|
*
|
|
|
|
* A value of zero will be returned on success, a negative errno will be
|
|
|
|
* returned in error cases.
|
|
|
|
*/
|
|
|
|
int regmap_noinc_write(struct regmap *map, unsigned int reg,
|
|
|
|
const void *val, size_t val_len)
|
|
|
|
{
|
|
|
|
size_t write_len;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!map->bus)
|
|
|
|
return -EINVAL;
|
|
|
|
if (!map->bus->write)
|
|
|
|
return -ENOTSUPP;
|
|
|
|
if (val_len % map->format.val_bytes)
|
|
|
|
return -EINVAL;
|
|
|
|
if (!IS_ALIGNED(reg, map->reg_stride))
|
|
|
|
return -EINVAL;
|
|
|
|
if (val_len == 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
map->lock(map->lock_arg);
|
|
|
|
|
|
|
|
if (!regmap_volatile(map, reg) || !regmap_writeable_noinc(map, reg)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (val_len) {
|
|
|
|
if (map->max_raw_write && map->max_raw_write < val_len)
|
|
|
|
write_len = map->max_raw_write;
|
|
|
|
else
|
|
|
|
write_len = val_len;
|
|
|
|
ret = _regmap_raw_write(map, reg, val, write_len);
|
|
|
|
if (ret)
|
|
|
|
goto out_unlock;
|
|
|
|
val = ((u8 *)val) + write_len;
|
|
|
|
val_len -= write_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
out_unlock:
|
|
|
|
map->unlock(map->lock_arg);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(regmap_noinc_write);
|
|
|
|
|
2013-06-11 20:18:15 +08:00
|
|
|
/**
|
2017-01-12 19:17:39 +08:00
|
|
|
* regmap_field_update_bits_base() - Perform a read/modify/write cycle a
|
|
|
|
* register field.
|
2013-09-02 11:24:50 +08:00
|
|
|
*
|
|
|
|
* @field: Register field to write to
|
|
|
|
* @mask: Bitmask to change
|
|
|
|
* @val: Value to be written
|
2016-02-15 13:23:55 +08:00
|
|
|
* @change: Boolean indicating if a write was done
|
|
|
|
* @async: Boolean indicating asynchronously
|
|
|
|
* @force: Boolean indicating use force update
|
2013-09-02 11:24:50 +08:00
|
|
|
*
|
2017-01-12 19:17:39 +08:00
|
|
|
* Perform a read/modify/write cycle on the register field with change,
|
|
|
|
* async, force option.
|
|
|
|
*
|
2013-09-02 11:24:50 +08:00
|
|
|
* A value of zero will be returned on success, a negative errno will
|
|
|
|
* be returned in error cases.
|
|
|
|
*/
|
2016-02-15 13:23:55 +08:00
|
|
|
int regmap_field_update_bits_base(struct regmap_field *field,
|
|
|
|
unsigned int mask, unsigned int val,
|
|
|
|
bool *change, bool async, bool force)
|
2013-09-02 11:24:50 +08:00
|
|
|
{
|
|
|
|
mask = (mask << field->shift) & field->mask;
|
|
|
|
|
2016-02-15 13:23:55 +08:00
|
|
|
return regmap_update_bits_base(field->regmap, field->reg,
|
|
|
|
mask, val << field->shift,
|
|
|
|
change, async, force);
|
2015-06-16 16:52:55 +08:00
|
|
|
}
|
2016-02-15 13:23:55 +08:00
|
|
|
EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
|
2015-06-16 16:52:55 +08:00
|
|
|
|
2013-09-02 11:30:50 +08:00
|
|
|
/**
|
2017-01-12 19:17:39 +08:00
|
|
|
* regmap_fields_update_bits_base() - Perform a read/modify/write cycle a
|
|
|
|
* register field with port ID
|
2013-09-02 11:30:50 +08:00
|
|
|
*
|
|
|
|
* @field: Register field to write to
|
|
|
|
* @id: port ID
|
|
|
|
* @mask: Bitmask to change
|
|
|
|
* @val: Value to be written
|
2016-02-15 13:24:51 +08:00
|
|
|
* @change: Boolean indicating if a write was done
|
|
|
|
* @async: Boolean indicating asynchronously
|
|
|
|
* @force: Boolean indicating use force update
|
2013-09-02 11:30:50 +08:00
|
|
|
*
|
|
|
|
* A value of zero will be returned on success, a negative errno will
|
|
|
|
* be returned in error cases.
|
|
|
|
*/
|
2020-06-15 15:25:07 +08:00
|
|
|
int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
|
2016-02-15 13:24:51 +08:00
|
|
|
unsigned int mask, unsigned int val,
|
|
|
|
bool *change, bool async, bool force)
|
2013-09-02 11:30:50 +08:00
|
|
|
{
|
|
|
|
if (id >= field->id_size)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
mask = (mask << field->shift) & field->mask;
|
|
|
|
|
2016-02-15 13:24:51 +08:00
|
|
|
return regmap_update_bits_base(field->regmap,
|
|
|
|
field->reg + (field->id_offset * id),
|
|
|
|
mask, val << field->shift,
|
|
|
|
change, async, force);
|
2013-09-02 11:30:50 +08:00
|
|
|
}
|
2016-02-15 13:24:51 +08:00
|
|
|
EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base);
|
2013-09-02 11:30:50 +08:00
|
|
|
|
2017-01-12 19:17:39 +08:00
|
|
|
/**
|
|
|
|
* regmap_bulk_write() - Write multiple registers to the device
|
2012-02-12 22:19:43 +08:00
|
|
|
*
|
|
|
|
* @map: Register map to write to
|
|
|
|
* @reg: First register to be write from
|
|
|
|
* @val: Block of data to be written, in native register size for device
|
|
|
|
* @val_count: Number of registers to write
|
|
|
|
*
|
|
|
|
* This function is intended to be used for writing a large block of
|
2013-01-18 23:51:03 +08:00
|
|
|
* data to the device either in single transfer or multiple transfer.
|
2012-02-12 22:19:43 +08:00
|
|
|
*
|
|
|
|
* A value of zero will be returned on success, a negative errno will
|
|
|
|
* be returned in error cases.
|
|
|
|
*/
|
|
|
|
int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
|
|
|
|
size_t val_count)
|
|
|
|
{
|
|
|
|
int ret = 0, i;
|
|
|
|
size_t val_bytes = map->format.val_bytes;
|
|
|
|
|
2015-12-16 17:45:32 +08:00
|
|
|
if (!IS_ALIGNED(reg, map->reg_stride))
|
2012-04-10 03:40:24 +08:00
|
|
|
return -EINVAL;
|
2012-02-12 22:19:43 +08:00
|
|
|
|
2013-12-27 05:52:04 +08:00
|
|
|
/*
|
2018-02-22 20:59:14 +08:00
|
|
|
* Some devices don't support bulk write, for them we have a series of
|
|
|
|
* single write operations.
|
2013-12-27 05:52:04 +08:00
|
|
|
*/
|
2018-02-22 20:59:14 +08:00
|
|
|
if (!map->bus || !map->format.parse_inplace) {
|
2014-03-18 19:58:33 +08:00
|
|
|
map->lock(map->lock_arg);
|
2013-12-27 05:52:04 +08:00
|
|
|
for (i = 0; i < val_count; i++) {
|
|
|
|
unsigned int ival;
|
|
|
|
|
|
|
|
switch (val_bytes) {
|
|
|
|
case 1:
|
|
|
|
ival = *(u8 *)(val + (i * val_bytes));
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
ival = *(u16 *)(val + (i * val_bytes));
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
ival = *(u32 *)(val + (i * val_bytes));
|
|
|
|
break;
|
|
|
|
#ifdef CONFIG_64BIT
|
|
|
|
case 8:
|
|
|
|
ival = *(u64 *)(val + (i * val_bytes));
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
default:
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
2012-02-12 22:19:43 +08:00
|
|
|
|
2016-01-04 18:00:33 +08:00
|
|
|
ret = _regmap_write(map,
|
|
|
|
reg + regmap_get_offset(map, i),
|
|
|
|
ival);
|
2013-12-27 05:52:04 +08:00
|
|
|
if (ret != 0)
|
|
|
|
goto out;
|
|
|
|
}
|
2014-03-18 19:58:33 +08:00
|
|
|
out:
|
|
|
|
map->unlock(map->lock_arg);
|
2012-02-12 22:19:43 +08:00
|
|
|
} else {
|
2013-12-27 05:52:04 +08:00
|
|
|
void *wval;
|
|
|
|
|
2015-09-12 07:37:05 +08:00
|
|
|
wval = kmemdup(val, val_count * val_bytes, map->alloc_flags);
|
2018-02-22 20:59:11 +08:00
|
|
|
if (!wval)
|
2014-03-18 19:58:33 +08:00
|
|
|
return -ENOMEM;
|
2018-02-22 20:59:11 +08:00
|
|
|
|
2012-02-12 22:19:43 +08:00
|
|
|
for (i = 0; i < val_count * val_bytes; i += val_bytes)
|
2013-03-04 09:04:51 +08:00
|
|
|
map->format.parse_inplace(wval + i);
|
2013-12-27 05:52:04 +08:00
|
|
|
|
2018-02-22 20:59:12 +08:00
|
|
|
ret = regmap_raw_write(map, reg, wval, val_bytes * val_count);
|
2012-02-12 22:19:43 +08:00
|
|
|
|
|
|
|
kfree(wval);
|
2013-12-27 05:52:04 +08:00
|
|
|
}
|
2012-02-12 22:19:43 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(regmap_bulk_write);
|
|
|
|
|
2014-03-04 21:54:02 +08:00
|
|
|
/*
|
|
|
|
* _regmap_raw_multi_reg_write()
|
|
|
|
*
|
|
|
|
* the (register,newvalue) pairs in regs have not been formatted, but
|
|
|
|
* they are all in the same page and have been changed to being page
|
2015-08-12 15:02:19 +08:00
|
|
|
* relative. The page register has been written if that was necessary.
|
2014-03-04 21:54:02 +08:00
|
|
|
*/
|
|
|
|
static int _regmap_raw_multi_reg_write(struct regmap *map,
|
2015-07-16 23:36:21 +08:00
|
|
|
const struct reg_sequence *regs,
|
2014-03-04 21:54:02 +08:00
|
|
|
size_t num_regs)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
void *buf;
|
|
|
|
int i;
|
|
|
|
u8 *u8;
|
|
|
|
size_t val_bytes = map->format.val_bytes;
|
|
|
|
size_t reg_bytes = map->format.reg_bytes;
|
|
|
|
size_t pad_bytes = map->format.pad_bytes;
|
|
|
|
size_t pair_size = reg_bytes + pad_bytes + val_bytes;
|
|
|
|
size_t len = pair_size * num_regs;
|
|
|
|
|
2014-04-30 17:31:08 +08:00
|
|
|
if (!len)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2014-03-04 21:54:02 +08:00
|
|
|
buf = kzalloc(len, GFP_KERNEL);
|
|
|
|
if (!buf)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
/* We have to linearise by hand. */
|
|
|
|
|
|
|
|
u8 = buf;
|
|
|
|
|
|
|
|
for (i = 0; i < num_regs; i++) {
|
2015-08-12 18:12:28 +08:00
|
|
|
unsigned int reg = regs[i].reg;
|
|
|
|
unsigned int val = regs[i].def;
|
2015-03-09 19:20:13 +08:00
|
|
|
trace_regmap_hw_write_start(map, reg, 1);
|
2014-03-04 21:54:02 +08:00
|
|
|
map->format.format_reg(u8, reg, map->reg_shift);
|
|
|
|
u8 += reg_bytes + pad_bytes;
|
|
|
|
map->format.format_val(u8, val, 0);
|
|
|
|
u8 += val_bytes;
|
|
|
|
}
|
|
|
|
u8 = buf;
|
|
|
|
*u8 |= map->write_flag_mask;
|
|
|
|
|
|
|
|
ret = map->bus->write(map->bus_context, buf, len);
|
|
|
|
|
|
|
|
kfree(buf);
|
|
|
|
|
|
|
|
for (i = 0; i < num_regs; i++) {
|
|
|
|
int reg = regs[i].reg;
|
2015-03-09 19:20:13 +08:00
|
|
|
trace_regmap_hw_write_done(map, reg, 1);
|
2014-03-04 21:54:02 +08:00
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int _regmap_register_page(struct regmap *map,
|
|
|
|
unsigned int reg,
|
|
|
|
struct regmap_range_node *range)
|
|
|
|
{
|
|
|
|
unsigned int win_page = (reg - range->range_min) / range->window_len;
|
|
|
|
|
|
|
|
return win_page;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int _regmap_range_multi_paged_reg_write(struct regmap *map,
|
2015-07-16 23:36:21 +08:00
|
|
|
struct reg_sequence *regs,
|
2014-03-04 21:54:02 +08:00
|
|
|
size_t num_regs)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
int i, n;
|
2015-07-16 23:36:21 +08:00
|
|
|
struct reg_sequence *base;
|
2014-04-22 18:47:29 +08:00
|
|
|
unsigned int this_page = 0;
|
2015-07-16 23:36:22 +08:00
|
|
|
unsigned int page_change = 0;
|
2014-03-04 21:54:02 +08:00
|
|
|
/*
|
|
|
|
* the set of registers are not neccessarily in order, but
|
|
|
|
* since the order of write must be preserved this algorithm
|
2015-07-16 23:36:22 +08:00
|
|
|
* chops the set each time the page changes. This also applies
|
|
|
|
* if there is a delay required at any point in the sequence.
|
2014-03-04 21:54:02 +08:00
|
|
|
*/
|
|
|
|
base = regs;
|
|
|
|
for (i = 0, n = 0; i < num_regs; i++, n++) {
|
|
|
|
unsigned int reg = regs[i].reg;
|
|
|
|
struct regmap_range_node *range;
|
|
|
|
|
|
|
|
range = _regmap_range_lookup(map, reg);
|
|
|
|
if (range) {
|
|
|
|
unsigned int win_page = _regmap_register_page(map, reg,
|
|
|
|
range);
|
|
|
|
|
|
|
|
if (i == 0)
|
|
|
|
this_page = win_page;
|
|
|
|
if (win_page != this_page) {
|
|
|
|
this_page = win_page;
|
2015-07-16 23:36:22 +08:00
|
|
|
page_change = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If we have both a page change and a delay make sure to
|
|
|
|
* write the regs and apply the delay before we change the
|
|
|
|
* page.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (page_change || regs[i].delay_us) {
|
|
|
|
|
|
|
|
/* For situations where the first write requires
|
|
|
|
* a delay we need to make sure we don't call
|
|
|
|
* raw_multi_reg_write with n=0
|
|
|
|
* This can't occur with page breaks as we
|
|
|
|
* never write on the first iteration
|
|
|
|
*/
|
|
|
|
if (regs[i].delay_us && i == 0)
|
|
|
|
n = 1;
|
|
|
|
|
2014-03-04 21:54:02 +08:00
|
|
|
ret = _regmap_raw_multi_reg_write(map, base, n);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
2015-07-16 23:36:22 +08:00
|
|
|
|
2020-09-02 22:18:43 +08:00
|
|
|
if (regs[i].delay_us) {
|
|
|
|
if (map->can_sleep)
|
|
|
|
fsleep(regs[i].delay_us);
|
|
|
|
else
|
|
|
|
udelay(regs[i].delay_us);
|
|
|
|
}
|
2015-07-16 23:36:22 +08:00
|
|
|
|
2014-03-04 21:54:02 +08:00
|
|
|
base += n;
|
|
|
|
n = 0;
|
2015-07-16 23:36:22 +08:00
|
|
|
|
|
|
|
if (page_change) {
|
|
|
|
ret = _regmap_select_page(map,
|
|
|
|
&base[n].reg,
|
|
|
|
range, 1);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
page_change = 0;
|
|
|
|
}
|
|
|
|
|
2014-03-04 21:54:02 +08:00
|
|
|
}
|
2015-07-16 23:36:22 +08:00
|
|
|
|
2014-03-04 21:54:02 +08:00
|
|
|
}
|
|
|
|
if (n > 0)
|
|
|
|
return _regmap_raw_multi_reg_write(map, base, n);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-02-25 21:45:50 +08:00
|
|
|
static int _regmap_multi_reg_write(struct regmap *map,
|
2015-07-16 23:36:21 +08:00
|
|
|
const struct reg_sequence *regs,
|
2014-03-04 21:54:02 +08:00
|
|
|
size_t num_regs)
|
2014-02-25 21:45:50 +08:00
|
|
|
{
|
2014-03-04 21:54:02 +08:00
|
|
|
int i;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!map->can_multi_write) {
|
|
|
|
for (i = 0; i < num_regs; i++) {
|
|
|
|
ret = _regmap_write(map, regs[i].reg, regs[i].def);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
2015-07-16 23:36:22 +08:00
|
|
|
|
2020-09-02 22:18:43 +08:00
|
|
|
if (regs[i].delay_us) {
|
|
|
|
if (map->can_sleep)
|
|
|
|
fsleep(regs[i].delay_us);
|
|
|
|
else
|
|
|
|
udelay(regs[i].delay_us);
|
|
|
|
}
|
2014-03-04 21:54:02 +08:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!map->format.parse_inplace)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (map->writeable_reg)
|
|
|
|
for (i = 0; i < num_regs; i++) {
|
|
|
|
int reg = regs[i].reg;
|
|
|
|
if (!map->writeable_reg(map->dev, reg))
|
|
|
|
return -EINVAL;
|
2015-12-16 17:45:32 +08:00
|
|
|
if (!IS_ALIGNED(reg, map->reg_stride))
|
2014-03-04 21:54:02 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!map->cache_bypass) {
|
|
|
|
for (i = 0; i < num_regs; i++) {
|
|
|
|
unsigned int val = regs[i].def;
|
|
|
|
unsigned int reg = regs[i].reg;
|
|
|
|
ret = regcache_write(map, reg, val);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(map->dev,
|
|
|
|
"Error in caching of register: %x ret: %d\n",
|
|
|
|
reg, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (map->cache_only) {
|
|
|
|
map->cache_dirty = true;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
WARN_ON(!map->bus);
|
2014-02-25 21:45:50 +08:00
|
|
|
|
|
|
|
for (i = 0; i < num_regs; i++) {
|
2014-03-04 21:54:02 +08:00
|
|
|
unsigned int reg = regs[i].reg;
|
|
|
|
struct regmap_range_node *range;
|
2015-07-16 23:36:22 +08:00
|
|
|
|
|
|
|
/* Coalesce all the writes between a page break or a delay
|
|
|
|
* in a sequence
|
|
|
|
*/
|
2014-03-04 21:54:02 +08:00
|
|
|
range = _regmap_range_lookup(map, reg);
|
2015-07-16 23:36:22 +08:00
|
|
|
if (range || regs[i].delay_us) {
|
2015-07-16 23:36:21 +08:00
|
|
|
size_t len = sizeof(struct reg_sequence)*num_regs;
|
|
|
|
struct reg_sequence *base = kmemdup(regs, len,
|
2014-03-04 21:54:02 +08:00
|
|
|
GFP_KERNEL);
|
|
|
|
if (!base)
|
|
|
|
return -ENOMEM;
|
|
|
|
ret = _regmap_range_multi_paged_reg_write(map, base,
|
|
|
|
num_regs);
|
|
|
|
kfree(base);
|
|
|
|
|
2014-02-25 21:45:50 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
2014-03-04 21:54:02 +08:00
|
|
|
return _regmap_raw_multi_reg_write(map, regs, num_regs);
|
2014-02-25 21:45:50 +08:00
|
|
|
}
|
|
|
|
|
2017-01-12 19:17:39 +08:00
|
|
|
/**
|
|
|
|
* regmap_multi_reg_write() - Write multiple registers to the device
|
2013-10-11 22:31:11 +08:00
|
|
|
*
|
|
|
|
* @map: Register map to write to
|
|
|
|
* @regs: Array of structures containing register,value to be written
|
|
|
|
* @num_regs: Number of registers to write
|
|
|
|
*
|
2017-01-12 19:17:39 +08:00
|
|
|
* Write multiple registers to the device where the set of register, value
|
|
|
|
* pairs are supplied in any order, possibly not all in a single range.
|
|
|
|
*
|
2014-03-04 21:54:02 +08:00
|
|
|
* The 'normal' block write mode will send ultimately send data on the
|
2017-01-12 19:17:39 +08:00
|
|
|
* target bus as R,V1,V2,V3,..,Vn where successively higher registers are
|
2014-03-04 21:54:02 +08:00
|
|
|
* addressed. However, this alternative block multi write mode will send
|
|
|
|
* the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
|
|
|
|
* must of course support the mode.
|
2013-10-11 22:31:11 +08:00
|
|
|
*
|
2014-03-04 21:54:02 +08:00
|
|
|
* A value of zero will be returned on success, a negative errno will be
|
|
|
|
* returned in error cases.
|
2013-10-11 22:31:11 +08:00
|
|
|
*/
|
2015-07-16 23:36:21 +08:00
|
|
|
int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
|
2014-02-25 21:45:49 +08:00
|
|
|
int num_regs)
|
2013-10-11 22:31:11 +08:00
|
|
|
{
|
2014-02-25 21:45:50 +08:00
|
|
|
int ret;
|
2013-10-11 22:31:11 +08:00
|
|
|
|
|
|
|
map->lock(map->lock_arg);
|
|
|
|
|
2014-02-25 21:45:50 +08:00
|
|
|
ret = _regmap_multi_reg_write(map, regs, num_regs);
|
|
|
|
|
2013-10-11 22:31:11 +08:00
|
|
|
map->unlock(map->lock_arg);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
|
|
|
|
|
2017-01-12 19:17:39 +08:00
|
|
|
/**
|
|
|
|
* regmap_multi_reg_write_bypassed() - Write multiple registers to the
|
|
|
|
* device but not the cache
|
2013-10-11 22:31:11 +08:00
|
|
|
*
|
|
|
|
* @map: Register map to write to
|
|
|
|
* @regs: Array of structures containing register,value to be written
|
|
|
|
* @num_regs: Number of registers to write
|
|
|
|
*
|
2017-01-12 19:17:39 +08:00
|
|
|
* Write multiple registers to the device but not the cache where the set
|
|
|
|
* of register are supplied in any order.
|
|
|
|
*
|
2013-10-11 22:31:11 +08:00
|
|
|
* This function is intended to be used for writing a large block of data
|
|
|
|
* atomically to the device in single transfer for those I2C client devices
|
|
|
|
* that implement this alternative block write mode.
|
|
|
|
*
|
|
|
|
* A value of zero will be returned on success, a negative errno will
|
|
|
|
* be returned in error cases.
|
|
|
|
*/
|
2014-02-25 21:45:50 +08:00
|
|
|
int regmap_multi_reg_write_bypassed(struct regmap *map,
|
2015-07-16 23:36:21 +08:00
|
|
|
const struct reg_sequence *regs,
|
2014-02-25 21:45:50 +08:00
|
|
|
int num_regs)
|
2013-10-11 22:31:11 +08:00
|
|
|
{
|
2014-02-25 21:45:50 +08:00
|
|
|
int ret;
|
|
|
|
bool bypass;
|
2013-10-11 22:31:11 +08:00
|
|
|
|
|
|
|
map->lock(map->lock_arg);
|
|
|
|
|
2014-02-25 21:45:50 +08:00
|
|
|
bypass = map->cache_bypass;
|
|
|
|
map->cache_bypass = true;
|
|
|
|
|
|
|
|
ret = _regmap_multi_reg_write(map, regs, num_regs);
|
|
|
|
|
|
|
|
map->cache_bypass = bypass;
|
|
|
|
|
2013-10-11 22:31:11 +08:00
|
|
|
map->unlock(map->lock_arg);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2014-02-25 21:45:50 +08:00
|
|
|
EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
|
2013-10-11 22:31:11 +08:00
|
|
|
|
2013-01-27 22:07:38 +08:00
|
|
|
/**
|
2017-01-12 19:17:39 +08:00
|
|
|
* regmap_raw_write_async() - Write raw values to one or more registers
|
|
|
|
* asynchronously
|
2013-01-27 22:07:38 +08:00
|
|
|
*
|
|
|
|
* @map: Register map to write to
|
|
|
|
* @reg: Initial register to write to
|
|
|
|
* @val: Block of data to be written, laid out for direct transmission to the
|
|
|
|
* device. Must be valid until regmap_async_complete() is called.
|
|
|
|
* @val_len: Length of data pointed to by val.
|
|
|
|
*
|
|
|
|
* This function is intended to be used for things like firmware
|
|
|
|
* download where a large block of data needs to be transferred to the
|
|
|
|
* device. No formatting will be done on the data provided.
|
|
|
|
*
|
|
|
|
* If supported by the underlying bus the write will be scheduled
|
|
|
|
* asynchronously, helping maximise I/O speed on higher speed buses
|
|
|
|
* like SPI. regmap_async_complete() can be called to ensure that all
|
|
|
|
* asynchrnous writes have been completed.
|
|
|
|
*
|
|
|
|
* A value of zero will be returned on success, a negative errno will
|
|
|
|
* be returned in error cases.
|
|
|
|
*/
|
|
|
|
int regmap_raw_write_async(struct regmap *map, unsigned int reg,
|
|
|
|
const void *val, size_t val_len)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (val_len % map->format.val_bytes)
|
|
|
|
return -EINVAL;
|
2015-12-16 17:45:32 +08:00
|
|
|
if (!IS_ALIGNED(reg, map->reg_stride))
|
2013-01-27 22:07:38 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
map->lock(map->lock_arg);
|
|
|
|
|
2013-10-09 19:28:52 +08:00
|
|
|
map->async = true;
|
|
|
|
|
|
|
|
ret = _regmap_raw_write(map, reg, val, val_len);
|
|
|
|
|
|
|
|
map->async = false;
|
2013-01-27 22:07:38 +08:00
|
|
|
|
|
|
|
map->unlock(map->lock_arg);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(regmap_raw_write_async);
|
|
|
|
|
2011-05-12 01:59:58 +08:00
|
|
|
static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
|
|
|
|
unsigned int val_len)
|
|
|
|
{
|
2012-10-05 00:31:11 +08:00
|
|
|
struct regmap_range_node *range;
|
2011-05-12 01:59:58 +08:00
|
|
|
int ret;
|
|
|
|
|
2013-03-14 03:18:13 +08:00
|
|
|
WARN_ON(!map->bus);
|
2013-01-28 02:49:05 +08:00
|
|
|
|
2016-02-02 05:09:14 +08:00
|
|
|
if (!map->bus || !map->bus->read)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2012-10-05 00:31:11 +08:00
|
|
|
range = _regmap_range_lookup(map, reg);
|
|
|
|
if (range) {
|
|
|
|
ret = _regmap_select_page(map, ®, range,
|
|
|
|
val_len / map->format.val_bytes);
|
2012-10-05 00:39:13 +08:00
|
|
|
if (ret != 0)
|
2012-10-05 00:31:11 +08:00
|
|
|
return ret;
|
|
|
|
}
|
2012-06-15 18:23:56 +08:00
|
|
|
|
2012-03-16 09:11:43 +08:00
|
|
|
map->format.format_reg(map->work_buf, reg, map->reg_shift);
|
2016-09-16 04:56:10 +08:00
|
|
|
regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
|
|
|
|
map->read_flag_mask);
|
2015-03-09 19:20:13 +08:00
|
|
|
trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
|
2011-07-25 04:30:55 +08:00
|
|
|
|
2012-04-05 05:48:30 +08:00
|
|
|
ret = map->bus->read(map->bus_context, map->work_buf,
|
2012-01-18 18:52:25 +08:00
|
|
|
map->format.reg_bytes + map->format.pad_bytes,
|
2011-07-25 05:39:12 +08:00
|
|
|
val, val_len);
|
2011-05-12 01:59:58 +08:00
|
|
|
|
2015-03-09 19:20:13 +08:00
|
|
|
trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
|
2011-07-25 04:30:55 +08:00
|
|
|
|
|
|
|
return ret;
|
2011-05-12 01:59:58 +08:00
|
|
|
}
|
|
|
|
|
2014-04-17 17:40:11 +08:00
|
|
|
static int _regmap_bus_reg_read(void *context, unsigned int reg,
|
|
|
|
unsigned int *val)
|
|
|
|
{
|
|
|
|
struct regmap *map = context;
|
|
|
|
|
|
|
|
return map->bus->reg_read(map->bus_context, reg, val);
|
|
|
|
}
|
|
|
|
|
2013-01-13 04:54:12 +08:00
|
|
|
static int _regmap_bus_read(void *context, unsigned int reg,
|
|
|
|
unsigned int *val)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct regmap *map = context;
|
2017-11-30 22:09:15 +08:00
|
|
|
void *work_val = map->work_buf + map->format.reg_bytes +
|
|
|
|
map->format.pad_bytes;
|
2013-01-13 04:54:12 +08:00
|
|
|
|
|
|
|
if (!map->format.parse_val)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2017-11-30 22:09:15 +08:00
|
|
|
ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes);
|
2013-01-13 04:54:12 +08:00
|
|
|
if (ret == 0)
|
2017-11-30 22:09:15 +08:00
|
|
|
*val = map->format.parse_val(work_val);
|
2013-01-13 04:54:12 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-05-12 01:59:58 +08:00
|
|
|
static int _regmap_read(struct regmap *map, unsigned int reg,
|
|
|
|
unsigned int *val)
|
|
|
|
{
|
|
|
|
int ret;
|
2013-01-28 02:49:05 +08:00
|
|
|
void *context = _regmap_map_get_context(map);
|
|
|
|
|
2011-09-19 21:34:05 +08:00
|
|
|
if (!map->cache_bypass) {
|
|
|
|
ret = regcache_read(map, reg, val);
|
|
|
|
if (ret == 0)
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (map->cache_only)
|
|
|
|
return -EBUSY;
|
|
|
|
|
2014-02-10 19:59:46 +08:00
|
|
|
if (!regmap_readable(map, reg))
|
|
|
|
return -EIO;
|
|
|
|
|
2013-01-28 02:49:05 +08:00
|
|
|
ret = map->reg_read(context, reg, val);
|
2011-07-25 04:30:55 +08:00
|
|
|
if (ret == 0) {
|
2018-10-02 18:42:05 +08:00
|
|
|
if (regmap_should_log(map))
|
2012-07-06 21:10:23 +08:00
|
|
|
dev_info(map->dev, "%x => %x\n", reg, *val);
|
|
|
|
|
2015-03-09 19:20:13 +08:00
|
|
|
trace_regmap_reg_read(map, reg, *val);
|
2011-05-12 01:59:58 +08:00
|
|
|
|
2013-01-13 04:54:12 +08:00
|
|
|
if (!map->cache_bypass)
|
|
|
|
regcache_write(map, reg, *val);
|
|
|
|
}
|
2012-05-01 04:25:05 +08:00
|
|
|
|
2011-05-12 01:59:58 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2017-01-12 19:17:39 +08:00
|
|
|
* regmap_read() - Read a value from a single register
|
2011-05-12 01:59:58 +08:00
|
|
|
*
|
2013-11-11 17:42:36 +08:00
|
|
|
* @map: Register map to read from
|
2011-05-12 01:59:58 +08:00
|
|
|
* @reg: Register to be read from
|
|
|
|
* @val: Pointer to store read value
|
|
|
|
*
|
|
|
|
* A value of zero will be returned on success, a negative errno will
|
|
|
|
* be returned in error cases.
|
|
|
|
*/
|
|
|
|
int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2015-12-16 17:45:32 +08:00
|
|
|
if (!IS_ALIGNED(reg, map->reg_stride))
|
2012-04-10 03:40:24 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2012-10-16 21:56:59 +08:00
|
|
|
map->lock(map->lock_arg);
|
2011-05-12 01:59:58 +08:00
|
|
|
|
|
|
|
ret = _regmap_read(map, reg, val);
|
|
|
|
|
2012-10-16 21:56:59 +08:00
|
|
|
map->unlock(map->lock_arg);
|
2011-05-12 01:59:58 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(regmap_read);
|
|
|
|
|
|
|
|
/**
|
2017-01-12 19:17:39 +08:00
|
|
|
* regmap_raw_read() - Read raw data from the device
|
2011-05-12 01:59:58 +08:00
|
|
|
*
|
2013-11-11 17:42:36 +08:00
|
|
|
* @map: Register map to read from
|
2011-05-12 01:59:58 +08:00
|
|
|
* @reg: First register to be read from
|
|
|
|
* @val: Pointer to store read value
|
|
|
|
* @val_len: Size of data to read
|
|
|
|
*
|
|
|
|
* A value of zero will be returned on success, a negative errno will
|
|
|
|
* be returned in error cases.
|
|
|
|
*/
|
|
|
|
int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
|
|
|
|
size_t val_len)
|
|
|
|
{
|
2012-02-22 03:12:47 +08:00
|
|
|
size_t val_bytes = map->format.val_bytes;
|
|
|
|
size_t val_count = val_len / val_bytes;
|
|
|
|
unsigned int v;
|
|
|
|
int ret, i;
|
2011-10-09 20:35:43 +08:00
|
|
|
|
2013-01-28 02:49:05 +08:00
|
|
|
if (!map->bus)
|
|
|
|
return -EINVAL;
|
2012-04-07 05:16:03 +08:00
|
|
|
if (val_len % map->format.val_bytes)
|
|
|
|
return -EINVAL;
|
2015-12-16 17:45:32 +08:00
|
|
|
if (!IS_ALIGNED(reg, map->reg_stride))
|
2012-04-10 03:40:24 +08:00
|
|
|
return -EINVAL;
|
2015-07-02 06:51:43 +08:00
|
|
|
if (val_count == 0)
|
|
|
|
return -EINVAL;
|
2012-04-07 05:16:03 +08:00
|
|
|
|
2012-10-16 21:56:59 +08:00
|
|
|
map->lock(map->lock_arg);
|
2011-05-12 01:59:58 +08:00
|
|
|
|
2012-02-22 03:12:47 +08:00
|
|
|
if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
|
|
|
|
map->cache_type == REGCACHE_NONE) {
|
2018-02-16 01:52:17 +08:00
|
|
|
size_t chunk_count, chunk_bytes;
|
|
|
|
size_t chunk_regs = val_count;
|
2018-02-16 01:52:16 +08:00
|
|
|
|
2015-08-20 17:12:35 +08:00
|
|
|
if (!map->bus->read) {
|
|
|
|
ret = -ENOTSUPP;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2018-02-16 01:52:17 +08:00
|
|
|
if (map->use_single_read)
|
|
|
|
chunk_regs = 1;
|
|
|
|
else if (map->max_raw_read && val_len > map->max_raw_read)
|
|
|
|
chunk_regs = map->max_raw_read / val_bytes;
|
2015-08-20 17:12:35 +08:00
|
|
|
|
2018-02-16 01:52:17 +08:00
|
|
|
chunk_count = val_count / chunk_regs;
|
|
|
|
chunk_bytes = chunk_regs * val_bytes;
|
|
|
|
|
|
|
|
/* Read bytes that fit into whole chunks */
|
2018-02-16 01:52:16 +08:00
|
|
|
for (i = 0; i < chunk_count; i++) {
|
2018-02-16 01:52:17 +08:00
|
|
|
ret = _regmap_raw_read(map, reg, val, chunk_bytes);
|
2018-02-16 01:52:16 +08:00
|
|
|
if (ret != 0)
|
2018-02-16 01:52:17 +08:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
reg += regmap_get_offset(map, chunk_regs);
|
|
|
|
val += chunk_bytes;
|
|
|
|
val_len -= chunk_bytes;
|
2018-02-16 01:52:16 +08:00
|
|
|
}
|
2012-02-22 03:12:47 +08:00
|
|
|
|
2018-02-16 01:52:16 +08:00
|
|
|
/* Read remaining bytes */
|
2018-02-16 01:52:17 +08:00
|
|
|
if (val_len) {
|
|
|
|
ret = _regmap_raw_read(map, reg, val, val_len);
|
2018-02-16 01:52:16 +08:00
|
|
|
if (ret != 0)
|
2018-02-16 01:52:17 +08:00
|
|
|
goto out;
|
2018-02-16 01:52:16 +08:00
|
|
|
}
|
2012-02-22 03:12:47 +08:00
|
|
|
} else {
|
|
|
|
/* Otherwise go word by word for the cache; should be low
|
|
|
|
* cost as we expect to hit the cache.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < val_count; i++) {
|
2016-01-04 18:00:33 +08:00
|
|
|
ret = _regmap_read(map, reg + regmap_get_offset(map, i),
|
2012-04-10 03:40:24 +08:00
|
|
|
&v);
|
2012-02-22 03:12:47 +08:00
|
|
|
if (ret != 0)
|
|
|
|
goto out;
|
|
|
|
|
2012-03-16 09:11:43 +08:00
|
|
|
map->format.format_val(val + (i * val_bytes), v, 0);
|
2012-02-22 03:12:47 +08:00
|
|
|
}
|
|
|
|
}
|
2011-05-12 01:59:58 +08:00
|
|
|
|
2012-02-22 03:12:47 +08:00
|
|
|
out:
|
2012-10-16 21:56:59 +08:00
|
|
|
map->unlock(map->lock_arg);
|
2011-05-12 01:59:58 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(regmap_raw_read);
|
|
|
|
|
2013-06-11 20:18:15 +08:00
|
|
|
/**
|
2018-08-07 22:52:17 +08:00
|
|
|
* regmap_noinc_read(): Read data from a register without incrementing the
|
|
|
|
* register number
|
|
|
|
*
|
|
|
|
* @map: Register map to read from
|
|
|
|
* @reg: Register to read from
|
|
|
|
* @val: Pointer to data buffer
|
|
|
|
* @val_len: Length of output buffer in bytes.
|
|
|
|
*
|
|
|
|
* The regmap API usually assumes that bulk bus read operations will read a
|
|
|
|
* range of registers. Some devices have certain registers for which a read
|
|
|
|
* operation read will read from an internal FIFO.
|
|
|
|
*
|
|
|
|
* The target register must be volatile but registers after it can be
|
|
|
|
* completely unrelated cacheable registers.
|
|
|
|
*
|
|
|
|
* This will attempt multiple reads as required to read val_len bytes.
|
|
|
|
*
|
|
|
|
* A value of zero will be returned on success, a negative errno will be
|
|
|
|
* returned in error cases.
|
|
|
|
*/
|
|
|
|
int regmap_noinc_read(struct regmap *map, unsigned int reg,
|
|
|
|
void *val, size_t val_len)
|
|
|
|
{
|
|
|
|
size_t read_len;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!map->bus)
|
|
|
|
return -EINVAL;
|
|
|
|
if (!map->bus->read)
|
|
|
|
return -ENOTSUPP;
|
|
|
|
if (val_len % map->format.val_bytes)
|
|
|
|
return -EINVAL;
|
|
|
|
if (!IS_ALIGNED(reg, map->reg_stride))
|
|
|
|
return -EINVAL;
|
|
|
|
if (val_len == 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
map->lock(map->lock_arg);
|
|
|
|
|
|
|
|
if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (val_len) {
|
|
|
|
if (map->max_raw_read && map->max_raw_read < val_len)
|
|
|
|
read_len = map->max_raw_read;
|
|
|
|
else
|
|
|
|
read_len = val_len;
|
|
|
|
ret = _regmap_raw_read(map, reg, val, read_len);
|
|
|
|
if (ret)
|
|
|
|
goto out_unlock;
|
|
|
|
val = ((u8 *)val) + read_len;
|
|
|
|
val_len -= read_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
out_unlock:
|
|
|
|
map->unlock(map->lock_arg);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(regmap_noinc_read);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* regmap_field_read(): Read a value to a single register field
|
2013-06-11 20:18:15 +08:00
|
|
|
*
|
|
|
|
* @field: Register field to read from
|
|
|
|
* @val: Pointer to store read value
|
|
|
|
*
|
|
|
|
* A value of zero will be returned on success, a negative errno will
|
|
|
|
* be returned in error cases.
|
|
|
|
*/
|
|
|
|
int regmap_field_read(struct regmap_field *field, unsigned int *val)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
unsigned int reg_val;
|
|
|
|
ret = regmap_read(field->regmap, field->reg, ®_val);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
reg_val &= field->mask;
|
|
|
|
reg_val >>= field->shift;
|
|
|
|
*val = reg_val;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(regmap_field_read);
|
|
|
|
|
2013-09-02 11:30:50 +08:00
|
|
|
/**
|
2017-01-12 19:17:39 +08:00
|
|
|
* regmap_fields_read() - Read a value to a single register field with port ID
|
2013-09-02 11:30:50 +08:00
|
|
|
*
|
|
|
|
* @field: Register field to read from
|
|
|
|
* @id: port ID
|
|
|
|
* @val: Pointer to store read value
|
|
|
|
*
|
|
|
|
* A value of zero will be returned on success, a negative errno will
|
|
|
|
* be returned in error cases.
|
|
|
|
*/
|
|
|
|
int regmap_fields_read(struct regmap_field *field, unsigned int id,
|
|
|
|
unsigned int *val)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
unsigned int reg_val;
|
|
|
|
|
|
|
|
if (id >= field->id_size)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
ret = regmap_read(field->regmap,
|
|
|
|
field->reg + (field->id_offset * id),
|
|
|
|
®_val);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
reg_val &= field->mask;
|
|
|
|
reg_val >>= field->shift;
|
|
|
|
*val = reg_val;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(regmap_fields_read);
|
|
|
|
|
2011-05-12 01:59:58 +08:00
|
|
|
/**
|
2017-01-12 19:17:39 +08:00
|
|
|
* regmap_bulk_read() - Read multiple registers from the device
|
2011-05-12 01:59:58 +08:00
|
|
|
*
|
2013-11-11 17:42:36 +08:00
|
|
|
* @map: Register map to read from
|
2011-05-12 01:59:58 +08:00
|
|
|
* @reg: First register to be read from
|
|
|
|
* @val: Pointer to store read value, in native register size for device
|
|
|
|
* @val_count: Number of registers to read
|
|
|
|
*
|
|
|
|
* A value of zero will be returned on success, a negative errno will
|
|
|
|
* be returned in error cases.
|
|
|
|
*/
|
|
|
|
int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
|
|
|
|
size_t val_count)
|
|
|
|
{
|
|
|
|
int ret, i;
|
|
|
|
size_t val_bytes = map->format.val_bytes;
|
2011-11-09 01:37:25 +08:00
|
|
|
bool vol = regmap_volatile_range(map, reg, val_count);
|
2011-09-19 21:34:05 +08:00
|
|
|
|
2015-12-16 17:45:32 +08:00
|
|
|
if (!IS_ALIGNED(reg, map->reg_stride))
|
2012-04-10 03:40:24 +08:00
|
|
|
return -EINVAL;
|
2018-02-16 01:52:18 +08:00
|
|
|
if (val_count == 0)
|
|
|
|
return -EINVAL;
|
2011-05-12 01:59:58 +08:00
|
|
|
|
2013-12-14 01:14:07 +08:00
|
|
|
if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
|
2018-02-16 01:52:16 +08:00
|
|
|
ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
2011-10-10 20:24:52 +08:00
|
|
|
|
|
|
|
for (i = 0; i < val_count * val_bytes; i += val_bytes)
|
2013-03-04 09:04:51 +08:00
|
|
|
map->format.parse_inplace(val + i);
|
2011-10-10 20:24:52 +08:00
|
|
|
} else {
|
2018-02-13 02:15:46 +08:00
|
|
|
#ifdef CONFIG_64BIT
|
|
|
|
u64 *u64 = val;
|
|
|
|
#endif
|
|
|
|
u32 *u32 = val;
|
|
|
|
u16 *u16 = val;
|
|
|
|
u8 *u8 = val;
|
|
|
|
|
2018-02-16 01:52:18 +08:00
|
|
|
map->lock(map->lock_arg);
|
|
|
|
|
2011-10-10 20:24:52 +08:00
|
|
|
for (i = 0; i < val_count; i++) {
|
2012-05-09 20:13:12 +08:00
|
|
|
unsigned int ival;
|
2015-08-29 03:04:53 +08:00
|
|
|
|
2018-02-16 01:52:18 +08:00
|
|
|
ret = _regmap_read(map, reg + regmap_get_offset(map, i),
|
|
|
|
&ival);
|
2011-10-10 20:24:52 +08:00
|
|
|
if (ret != 0)
|
2018-02-16 01:52:18 +08:00
|
|
|
goto out;
|
2015-08-29 03:04:53 +08:00
|
|
|
|
2018-02-13 02:15:46 +08:00
|
|
|
switch (map->format.val_bytes) {
|
2015-12-03 17:31:52 +08:00
|
|
|
#ifdef CONFIG_64BIT
|
2018-02-13 02:15:46 +08:00
|
|
|
case 8:
|
|
|
|
u64[i] = ival;
|
|
|
|
break;
|
2015-12-03 17:31:52 +08:00
|
|
|
#endif
|
2018-02-13 02:15:46 +08:00
|
|
|
case 4:
|
|
|
|
u32[i] = ival;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
u16[i] = ival;
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
u8[i] = ival;
|
|
|
|
break;
|
|
|
|
default:
|
2018-02-16 01:52:18 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
2015-08-29 03:04:53 +08:00
|
|
|
}
|
2011-10-10 20:24:52 +08:00
|
|
|
}
|
2018-02-16 01:52:18 +08:00
|
|
|
|
|
|
|
out:
|
|
|
|
map->unlock(map->lock_arg);
|
2011-10-10 20:24:52 +08:00
|
|
|
}
|
2011-05-12 01:59:58 +08:00
|
|
|
|
2018-02-16 01:52:18 +08:00
|
|
|
return ret;
|
2011-05-12 01:59:58 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(regmap_bulk_read);
|
|
|
|
|
2011-11-30 04:10:36 +08:00
|
|
|
static int _regmap_update_bits(struct regmap *map, unsigned int reg,
|
|
|
|
unsigned int mask, unsigned int val,
|
2015-06-16 16:52:22 +08:00
|
|
|
bool *change, bool force_write)
|
2011-05-12 01:59:58 +08:00
|
|
|
{
|
|
|
|
int ret;
|
2011-11-19 00:03:50 +08:00
|
|
|
unsigned int tmp, orig;
|
2011-05-12 01:59:58 +08:00
|
|
|
|
2015-10-02 00:38:07 +08:00
|
|
|
if (change)
|
|
|
|
*change = false;
|
2011-05-12 01:59:58 +08:00
|
|
|
|
2015-10-02 00:38:07 +08:00
|
|
|
if (regmap_volatile(map, reg) && map->reg_update_bits) {
|
|
|
|
ret = map->reg_update_bits(map->bus_context, reg, mask, val);
|
|
|
|
if (ret == 0 && change)
|
2014-02-20 08:50:10 +08:00
|
|
|
*change = true;
|
2011-11-30 04:10:36 +08:00
|
|
|
} else {
|
2015-10-02 00:38:07 +08:00
|
|
|
ret = _regmap_read(map, reg, &orig);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
tmp = orig & ~mask;
|
|
|
|
tmp |= val & mask;
|
|
|
|
|
|
|
|
if (force_write || (tmp != orig)) {
|
|
|
|
ret = _regmap_write(map, reg, tmp);
|
|
|
|
if (ret == 0 && change)
|
|
|
|
*change = true;
|
|
|
|
}
|
2011-11-30 04:10:36 +08:00
|
|
|
}
|
2011-05-12 01:59:58 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2011-11-30 04:10:36 +08:00
|
|
|
|
|
|
|
/**
|
2017-01-12 19:17:39 +08:00
|
|
|
* regmap_update_bits_base() - Perform a read/modify/write cycle on a register
|
2013-10-09 20:30:10 +08:00
|
|
|
*
|
|
|
|
* @map: Register map to update
|
|
|
|
* @reg: Register to update
|
|
|
|
* @mask: Bitmask to change
|
|
|
|
* @val: New value for bitmask
|
|
|
|
* @change: Boolean indicating if a write was done
|
2016-02-15 13:22:18 +08:00
|
|
|
* @async: Boolean indicating asynchronously
|
|
|
|
* @force: Boolean indicating use force update
|
2013-10-09 20:30:10 +08:00
|
|
|
*
|
2017-01-12 19:17:39 +08:00
|
|
|
* Perform a read/modify/write cycle on a register map with change, async, force
|
|
|
|
* options.
|
|
|
|
*
|
|
|
|
* If async is true:
|
|
|
|
*
|
|
|
|
* With most buses the read must be done synchronously so this is most useful
|
|
|
|
* for devices with a cache which do not need to interact with the hardware to
|
|
|
|
* determine the current register value.
|
2013-10-09 20:30:10 +08:00
|
|
|
*
|
|
|
|
* Returns zero for success, a negative number on error.
|
|
|
|
*/
|
2016-02-15 13:22:18 +08:00
|
|
|
int regmap_update_bits_base(struct regmap *map, unsigned int reg,
|
|
|
|
unsigned int mask, unsigned int val,
|
|
|
|
bool *change, bool async, bool force)
|
2013-10-09 20:30:10 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
map->lock(map->lock_arg);
|
|
|
|
|
2016-02-15 13:22:18 +08:00
|
|
|
map->async = async;
|
2013-10-09 20:30:10 +08:00
|
|
|
|
2016-02-15 13:22:18 +08:00
|
|
|
ret = _regmap_update_bits(map, reg, mask, val, change, force);
|
2013-10-09 20:30:10 +08:00
|
|
|
|
|
|
|
map->async = false;
|
|
|
|
|
|
|
|
map->unlock(map->lock_arg);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2016-02-15 13:22:18 +08:00
|
|
|
EXPORT_SYMBOL_GPL(regmap_update_bits_base);
|
2013-10-09 20:30:10 +08:00
|
|
|
|
2020-05-28 23:45:02 +08:00
|
|
|
/**
|
|
|
|
* regmap_test_bits() - Check if all specified bits are set in a register.
|
|
|
|
*
|
|
|
|
* @map: Register map to operate on
|
|
|
|
* @reg: Register to read from
|
|
|
|
* @bits: Bits to test
|
|
|
|
*
|
2020-06-07 17:34:21 +08:00
|
|
|
* Returns 0 if at least one of the tested bits is not set, 1 if all tested
|
|
|
|
* bits are set and a negative error number if the underlying regmap_read()
|
|
|
|
* fails.
|
2020-05-28 23:45:02 +08:00
|
|
|
*/
|
|
|
|
int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits)
|
|
|
|
{
|
|
|
|
unsigned int val, ret;
|
|
|
|
|
|
|
|
ret = regmap_read(map, reg, &val);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return (val & bits) == bits;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(regmap_test_bits);
|
|
|
|
|
2013-01-27 22:07:38 +08:00
|
|
|
void regmap_async_complete_cb(struct regmap_async *async, int ret)
|
|
|
|
{
|
|
|
|
struct regmap *map = async->map;
|
|
|
|
bool wake;
|
|
|
|
|
2015-03-09 19:20:13 +08:00
|
|
|
trace_regmap_async_io_complete(map);
|
2013-02-22 03:05:48 +08:00
|
|
|
|
2013-01-27 22:07:38 +08:00
|
|
|
spin_lock(&map->async_lock);
|
2013-10-08 06:00:24 +08:00
|
|
|
list_move(&async->list, &map->async_free);
|
2013-01-27 22:07:38 +08:00
|
|
|
wake = list_empty(&map->async_list);
|
|
|
|
|
|
|
|
if (ret != 0)
|
|
|
|
map->async_ret = ret;
|
|
|
|
|
|
|
|
spin_unlock(&map->async_lock);
|
|
|
|
|
|
|
|
if (wake)
|
|
|
|
wake_up(&map->async_waitq);
|
|
|
|
}
|
2013-02-03 00:14:13 +08:00
|
|
|
EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
|
2013-01-27 22:07:38 +08:00
|
|
|
|
|
|
|
static int regmap_async_is_done(struct regmap *map)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&map->async_lock, flags);
|
|
|
|
ret = list_empty(&map->async_list);
|
|
|
|
spin_unlock_irqrestore(&map->async_lock, flags);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2017-01-12 19:17:39 +08:00
|
|
|
* regmap_async_complete - Ensure all asynchronous I/O has completed.
|
2013-01-27 22:07:38 +08:00
|
|
|
*
|
|
|
|
* @map: Map to operate on.
|
|
|
|
*
|
|
|
|
* Blocks until any pending asynchronous I/O has completed. Returns
|
|
|
|
* an error code for any failed I/O operations.
|
|
|
|
*/
|
|
|
|
int regmap_async_complete(struct regmap *map)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* Nothing to do with no async support */
|
2013-07-04 19:11:03 +08:00
|
|
|
if (!map->bus || !map->bus->async_write)
|
2013-01-27 22:07:38 +08:00
|
|
|
return 0;
|
|
|
|
|
2015-03-09 19:20:13 +08:00
|
|
|
trace_regmap_async_complete_start(map);
|
2013-02-22 03:05:48 +08:00
|
|
|
|
2013-01-27 22:07:38 +08:00
|
|
|
wait_event(map->async_waitq, regmap_async_is_done(map));
|
|
|
|
|
|
|
|
spin_lock_irqsave(&map->async_lock, flags);
|
|
|
|
ret = map->async_ret;
|
|
|
|
map->async_ret = 0;
|
|
|
|
spin_unlock_irqrestore(&map->async_lock, flags);
|
|
|
|
|
2015-03-09 19:20:13 +08:00
|
|
|
trace_regmap_async_complete_done(map);
|
2013-02-22 03:05:48 +08:00
|
|
|
|
2013-01-27 22:07:38 +08:00
|
|
|
return ret;
|
|
|
|
}
|
2013-02-05 21:53:26 +08:00
|
|
|
EXPORT_SYMBOL_GPL(regmap_async_complete);
|
2013-01-27 22:07:38 +08:00
|
|
|
|
2012-01-21 20:01:14 +08:00
|
|
|
/**
|
2017-01-12 19:17:39 +08:00
|
|
|
* regmap_register_patch - Register and apply register updates to be applied
|
|
|
|
* on device initialistion
|
2012-01-21 20:01:14 +08:00
|
|
|
*
|
|
|
|
* @map: Register map to apply updates to.
|
|
|
|
* @regs: Values to update.
|
|
|
|
* @num_regs: Number of entries in regs.
|
|
|
|
*
|
|
|
|
* Register a set of register updates to be applied to the device
|
|
|
|
* whenever the device registers are synchronised with the cache and
|
|
|
|
* apply them immediately. Typically this is used to apply
|
|
|
|
* corrections to be applied to the device defaults on startup, such
|
|
|
|
* as the updates some vendors provide to undocumented registers.
|
2014-03-18 18:53:26 +08:00
|
|
|
*
|
|
|
|
* The caller must ensure that this function cannot be called
|
|
|
|
* concurrently with either itself or regcache_sync().
|
2012-01-21 20:01:14 +08:00
|
|
|
*/
|
2015-07-16 23:36:21 +08:00
|
|
|
int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
|
2012-01-21 20:01:14 +08:00
|
|
|
int num_regs)
|
|
|
|
{
|
2015-07-16 23:36:21 +08:00
|
|
|
struct reg_sequence *p;
|
2014-02-25 21:45:51 +08:00
|
|
|
int ret;
|
2012-01-21 20:01:14 +08:00
|
|
|
bool bypass;
|
|
|
|
|
2013-11-18 20:21:49 +08:00
|
|
|
if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
|
|
|
|
num_regs))
|
|
|
|
return 0;
|
|
|
|
|
2013-07-11 19:41:44 +08:00
|
|
|
p = krealloc(map->patch,
|
2015-07-16 23:36:21 +08:00
|
|
|
sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
|
2013-07-11 19:41:44 +08:00
|
|
|
GFP_KERNEL);
|
|
|
|
if (p) {
|
|
|
|
memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
|
|
|
|
map->patch = p;
|
|
|
|
map->patch_regs += num_regs;
|
2012-01-21 20:01:14 +08:00
|
|
|
} else {
|
2014-03-18 18:53:26 +08:00
|
|
|
return -ENOMEM;
|
2012-01-21 20:01:14 +08:00
|
|
|
}
|
|
|
|
|
2012-10-16 21:56:59 +08:00
|
|
|
map->lock(map->lock_arg);
|
2012-01-21 20:01:14 +08:00
|
|
|
|
|
|
|
bypass = map->cache_bypass;
|
|
|
|
|
|
|
|
map->cache_bypass = true;
|
2013-10-11 03:55:03 +08:00
|
|
|
map->async = true;
|
2012-01-21 20:01:14 +08:00
|
|
|
|
2014-02-25 21:45:51 +08:00
|
|
|
ret = _regmap_multi_reg_write(map, regs, num_regs);
|
2012-01-21 20:01:14 +08:00
|
|
|
|
2013-10-11 03:55:03 +08:00
|
|
|
map->async = false;
|
2012-01-21 20:01:14 +08:00
|
|
|
map->cache_bypass = bypass;
|
|
|
|
|
2012-10-16 21:56:59 +08:00
|
|
|
map->unlock(map->lock_arg);
|
2012-01-21 20:01:14 +08:00
|
|
|
|
2013-10-11 03:55:03 +08:00
|
|
|
regmap_async_complete(map);
|
|
|
|
|
2012-01-21 20:01:14 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(regmap_register_patch);
|
|
|
|
|
2017-01-12 19:17:39 +08:00
|
|
|
/**
|
|
|
|
* regmap_get_val_bytes() - Report the size of a register value
|
|
|
|
*
|
|
|
|
* @map: Register map to operate on.
|
2012-02-18 06:20:14 +08:00
|
|
|
*
|
|
|
|
* Report the size of a register value, mainly intended to for use by
|
|
|
|
* generic infrastructure built on top of regmap.
|
|
|
|
*/
|
|
|
|
int regmap_get_val_bytes(struct regmap *map)
|
|
|
|
{
|
|
|
|
if (map->format.format_write)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return map->format.val_bytes;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
|
|
|
|
|
2015-05-22 00:42:43 +08:00
|
|
|
/**
|
2017-01-12 19:17:39 +08:00
|
|
|
* regmap_get_max_register() - Report the max register value
|
|
|
|
*
|
|
|
|
* @map: Register map to operate on.
|
2015-05-22 00:42:43 +08:00
|
|
|
*
|
|
|
|
* Report the max register value, mainly intended to for use by
|
|
|
|
* generic infrastructure built on top of regmap.
|
|
|
|
*/
|
|
|
|
int regmap_get_max_register(struct regmap *map)
|
|
|
|
{
|
|
|
|
return map->max_register ? map->max_register : -EINVAL;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(regmap_get_max_register);
|
|
|
|
|
2015-05-22 00:42:54 +08:00
|
|
|
/**
|
2017-01-12 19:17:39 +08:00
|
|
|
* regmap_get_reg_stride() - Report the register address stride
|
|
|
|
*
|
|
|
|
* @map: Register map to operate on.
|
2015-05-22 00:42:54 +08:00
|
|
|
*
|
|
|
|
* Report the register address stride, mainly intended to for use by
|
|
|
|
* generic infrastructure built on top of regmap.
|
|
|
|
*/
|
|
|
|
int regmap_get_reg_stride(struct regmap *map)
|
|
|
|
{
|
|
|
|
return map->reg_stride;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(regmap_get_reg_stride);
|
|
|
|
|
2014-02-19 18:44:13 +08:00
|
|
|
int regmap_parse_val(struct regmap *map, const void *buf,
|
|
|
|
unsigned int *val)
|
|
|
|
{
|
|
|
|
if (!map->format.parse_val)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
*val = map->format.parse_val(buf);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(regmap_parse_val);
|
|
|
|
|
2011-07-21 05:56:53 +08:00
|
|
|
static int __init regmap_initcall(void)
|
|
|
|
{
|
|
|
|
regmap_debugfs_initcall();
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
postcore_initcall(regmap_initcall);
|