2018-12-12 01:57:48 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2012-03-16 14:11:20 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
|
|
|
|
* Copyright (C) 2011 Richard Zhao, Linaro <richard.zhao@linaro.org>
|
|
|
|
* Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
|
|
|
|
*
|
|
|
|
* Simple multiplexer clock implementation
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/clk-provider.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/io.h>
|
|
|
|
#include <linux/err.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* DOC: basic adjustable multiplexer clock that cannot gate
|
|
|
|
*
|
|
|
|
* Traits of this clock:
|
|
|
|
* prepare - clk_prepare only ensures that parents are prepared
|
|
|
|
* enable - clk_enable only ensures that parents are enabled
|
|
|
|
* rate - rate is only affected by parent switching. No clk_set_rate support
|
|
|
|
* parent - parent is adjustable through clk_set_parent
|
|
|
|
*/
|
|
|
|
|
2019-04-18 19:12:08 +08:00
|
|
|
static inline u32 clk_mux_readl(struct clk_mux *mux)
|
|
|
|
{
|
|
|
|
if (mux->flags & CLK_MUX_BIG_ENDIAN)
|
|
|
|
return ioread32be(mux->reg);
|
|
|
|
|
2019-04-18 19:12:11 +08:00
|
|
|
return readl(mux->reg);
|
2019-04-18 19:12:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void clk_mux_writel(struct clk_mux *mux, u32 val)
|
|
|
|
{
|
|
|
|
if (mux->flags & CLK_MUX_BIG_ENDIAN)
|
|
|
|
iowrite32be(val, mux->reg);
|
|
|
|
else
|
2019-04-18 19:12:11 +08:00
|
|
|
writel(val, mux->reg);
|
2019-04-18 19:12:08 +08:00
|
|
|
}
|
|
|
|
|
2018-02-14 21:43:34 +08:00
|
|
|
int clk_mux_val_to_index(struct clk_hw *hw, u32 *table, unsigned int flags,
|
|
|
|
unsigned int val)
|
2012-03-16 14:11:20 +08:00
|
|
|
{
|
2015-06-26 07:53:23 +08:00
|
|
|
int num_parents = clk_hw_get_num_parents(hw);
|
2012-03-16 14:11:20 +08:00
|
|
|
|
2018-02-14 21:43:34 +08:00
|
|
|
if (table) {
|
2013-03-22 20:07:53 +08:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < num_parents; i++)
|
2018-02-14 21:43:34 +08:00
|
|
|
if (table[i] == val)
|
2013-03-22 20:07:53 +08:00
|
|
|
return i;
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2012-03-16 14:11:20 +08:00
|
|
|
|
2018-02-14 21:43:34 +08:00
|
|
|
if (val && (flags & CLK_MUX_INDEX_BIT))
|
2012-03-16 14:11:20 +08:00
|
|
|
val = ffs(val) - 1;
|
|
|
|
|
2018-02-14 21:43:34 +08:00
|
|
|
if (val && (flags & CLK_MUX_INDEX_ONE))
|
2012-03-16 14:11:20 +08:00
|
|
|
val--;
|
|
|
|
|
2013-03-22 20:07:53 +08:00
|
|
|
if (val >= num_parents)
|
2012-03-16 14:11:20 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return val;
|
|
|
|
}
|
2018-02-14 21:43:34 +08:00
|
|
|
EXPORT_SYMBOL_GPL(clk_mux_val_to_index);
|
2012-03-16 14:11:20 +08:00
|
|
|
|
2018-02-14 21:43:34 +08:00
|
|
|
unsigned int clk_mux_index_to_val(u32 *table, unsigned int flags, u8 index)
|
2012-03-16 14:11:20 +08:00
|
|
|
{
|
2018-02-14 21:43:34 +08:00
|
|
|
unsigned int val = index;
|
2012-03-16 14:11:20 +08:00
|
|
|
|
2018-02-14 21:43:34 +08:00
|
|
|
if (table) {
|
|
|
|
val = table[index];
|
2015-11-05 16:59:39 +08:00
|
|
|
} else {
|
2018-02-14 21:43:34 +08:00
|
|
|
if (flags & CLK_MUX_INDEX_BIT)
|
|
|
|
val = 1 << index;
|
2013-03-22 20:07:53 +08:00
|
|
|
|
2018-02-14 21:43:34 +08:00
|
|
|
if (flags & CLK_MUX_INDEX_ONE)
|
|
|
|
val++;
|
2013-03-22 20:07:53 +08:00
|
|
|
}
|
2012-03-16 14:11:20 +08:00
|
|
|
|
2018-02-14 21:43:34 +08:00
|
|
|
return val;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(clk_mux_index_to_val);
|
|
|
|
|
|
|
|
static u8 clk_mux_get_parent(struct clk_hw *hw)
|
|
|
|
{
|
|
|
|
struct clk_mux *mux = to_clk_mux(hw);
|
|
|
|
u32 val;
|
|
|
|
|
2019-04-18 19:12:08 +08:00
|
|
|
val = clk_mux_readl(mux) >> mux->shift;
|
2018-02-14 21:43:34 +08:00
|
|
|
val &= mux->mask;
|
|
|
|
|
|
|
|
return clk_mux_val_to_index(hw, mux->table, mux->flags, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
|
|
|
|
{
|
|
|
|
struct clk_mux *mux = to_clk_mux(hw);
|
|
|
|
u32 val = clk_mux_index_to_val(mux->table, mux->flags, index);
|
|
|
|
unsigned long flags = 0;
|
|
|
|
u32 reg;
|
|
|
|
|
2012-03-16 14:11:20 +08:00
|
|
|
if (mux->lock)
|
|
|
|
spin_lock_irqsave(mux->lock, flags);
|
2015-07-25 03:21:12 +08:00
|
|
|
else
|
|
|
|
__acquire(mux->lock);
|
2012-03-16 14:11:20 +08:00
|
|
|
|
2013-06-08 22:47:17 +08:00
|
|
|
if (mux->flags & CLK_MUX_HIWORD_MASK) {
|
2018-02-14 21:43:34 +08:00
|
|
|
reg = mux->mask << (mux->shift + 16);
|
2013-06-08 22:47:17 +08:00
|
|
|
} else {
|
2019-04-18 19:12:08 +08:00
|
|
|
reg = clk_mux_readl(mux);
|
2018-02-14 21:43:34 +08:00
|
|
|
reg &= ~(mux->mask << mux->shift);
|
2013-06-08 22:47:17 +08:00
|
|
|
}
|
2018-02-14 21:43:34 +08:00
|
|
|
val = val << mux->shift;
|
|
|
|
reg |= val;
|
2019-04-18 19:12:08 +08:00
|
|
|
clk_mux_writel(mux, reg);
|
2012-03-16 14:11:20 +08:00
|
|
|
|
|
|
|
if (mux->lock)
|
|
|
|
spin_unlock_irqrestore(mux->lock, flags);
|
2015-07-25 03:21:12 +08:00
|
|
|
else
|
|
|
|
__release(mux->lock);
|
2012-03-16 14:11:20 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-04-09 21:59:20 +08:00
|
|
|
static int clk_mux_determine_rate(struct clk_hw *hw,
|
|
|
|
struct clk_rate_request *req)
|
|
|
|
{
|
|
|
|
struct clk_mux *mux = to_clk_mux(hw);
|
|
|
|
|
|
|
|
return clk_mux_determine_rate_flags(hw, req, mux->flags);
|
|
|
|
}
|
|
|
|
|
2012-03-27 15:23:22 +08:00
|
|
|
const struct clk_ops clk_mux_ops = {
|
2012-03-16 14:11:20 +08:00
|
|
|
.get_parent = clk_mux_get_parent,
|
|
|
|
.set_parent = clk_mux_set_parent,
|
2018-04-09 21:59:20 +08:00
|
|
|
.determine_rate = clk_mux_determine_rate,
|
2012-03-16 14:11:20 +08:00
|
|
|
};
|
|
|
|
EXPORT_SYMBOL_GPL(clk_mux_ops);
|
|
|
|
|
2013-07-23 07:49:18 +08:00
|
|
|
const struct clk_ops clk_mux_ro_ops = {
|
|
|
|
.get_parent = clk_mux_get_parent,
|
|
|
|
};
|
|
|
|
EXPORT_SYMBOL_GPL(clk_mux_ro_ops);
|
|
|
|
|
2019-08-30 23:09:21 +08:00
|
|
|
struct clk_hw *__clk_hw_register_mux(struct device *dev, struct device_node *np,
|
|
|
|
const char *name, u8 num_parents,
|
|
|
|
const char * const *parent_names,
|
|
|
|
const struct clk_hw **parent_hws,
|
|
|
|
const struct clk_parent_data *parent_data,
|
|
|
|
unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
|
2013-03-22 20:07:53 +08:00
|
|
|
u8 clk_mux_flags, u32 *table, spinlock_t *lock)
|
2012-03-16 14:11:20 +08:00
|
|
|
{
|
|
|
|
struct clk_mux *mux;
|
2016-02-07 16:05:48 +08:00
|
|
|
struct clk_hw *hw;
|
2019-11-16 00:28:55 +08:00
|
|
|
struct clk_init_data init = {};
|
2013-06-08 22:47:17 +08:00
|
|
|
u8 width = 0;
|
2019-08-30 23:09:21 +08:00
|
|
|
int ret = -EINVAL;
|
2013-06-08 22:47:17 +08:00
|
|
|
|
|
|
|
if (clk_mux_flags & CLK_MUX_HIWORD_MASK) {
|
|
|
|
width = fls(mask) - ffs(mask) + 1;
|
|
|
|
if (width + shift > 16) {
|
|
|
|
pr_err("mux value exceeds LOWORD field\n");
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
}
|
2012-03-16 14:11:20 +08:00
|
|
|
|
2012-03-27 08:51:03 +08:00
|
|
|
/* allocate the mux */
|
2017-09-26 23:30:06 +08:00
|
|
|
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
|
2017-09-26 23:23:04 +08:00
|
|
|
if (!mux)
|
2012-03-16 14:11:20 +08:00
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
2012-04-26 13:58:56 +08:00
|
|
|
init.name = name;
|
2013-07-23 07:49:18 +08:00
|
|
|
if (clk_mux_flags & CLK_MUX_READ_ONLY)
|
|
|
|
init.ops = &clk_mux_ro_ops;
|
|
|
|
else
|
|
|
|
init.ops = &clk_mux_ops;
|
2019-04-26 01:57:37 +08:00
|
|
|
init.flags = flags;
|
2012-04-26 13:58:56 +08:00
|
|
|
init.parent_names = parent_names;
|
2019-08-30 23:09:21 +08:00
|
|
|
init.parent_data = parent_data;
|
|
|
|
init.parent_hws = parent_hws;
|
2012-04-26 13:58:56 +08:00
|
|
|
init.num_parents = num_parents;
|
|
|
|
|
2012-03-16 14:11:20 +08:00
|
|
|
/* struct clk_mux assignments */
|
|
|
|
mux->reg = reg;
|
|
|
|
mux->shift = shift;
|
2013-03-22 20:07:53 +08:00
|
|
|
mux->mask = mask;
|
2012-03-16 14:11:20 +08:00
|
|
|
mux->flags = clk_mux_flags;
|
|
|
|
mux->lock = lock;
|
2013-03-22 20:07:53 +08:00
|
|
|
mux->table = table;
|
2012-05-07 09:48:11 +08:00
|
|
|
mux->hw.init = &init;
|
2012-03-16 14:11:20 +08:00
|
|
|
|
2016-02-07 16:05:48 +08:00
|
|
|
hw = &mux->hw;
|
2019-08-30 23:09:21 +08:00
|
|
|
if (dev || !np)
|
|
|
|
ret = clk_hw_register(dev, hw);
|
|
|
|
else if (np)
|
|
|
|
ret = of_clk_hw_register(np, hw);
|
2016-02-07 16:05:48 +08:00
|
|
|
if (ret) {
|
2012-03-27 08:51:03 +08:00
|
|
|
kfree(mux);
|
2016-02-07 16:05:48 +08:00
|
|
|
hw = ERR_PTR(ret);
|
|
|
|
}
|
2012-03-27 08:51:03 +08:00
|
|
|
|
2016-02-07 16:05:48 +08:00
|
|
|
return hw;
|
|
|
|
}
|
2019-08-30 23:09:21 +08:00
|
|
|
EXPORT_SYMBOL_GPL(__clk_hw_register_mux);
|
2016-02-07 16:05:48 +08:00
|
|
|
|
|
|
|
struct clk *clk_register_mux_table(struct device *dev, const char *name,
|
|
|
|
const char * const *parent_names, u8 num_parents,
|
2019-08-30 23:09:21 +08:00
|
|
|
unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
|
2016-02-07 16:05:48 +08:00
|
|
|
u8 clk_mux_flags, u32 *table, spinlock_t *lock)
|
|
|
|
{
|
|
|
|
struct clk_hw *hw;
|
|
|
|
|
2019-08-30 23:09:21 +08:00
|
|
|
hw = clk_hw_register_mux_table(dev, name, parent_names,
|
|
|
|
num_parents, flags, reg, shift, mask,
|
|
|
|
clk_mux_flags, table, lock);
|
2016-02-07 16:05:48 +08:00
|
|
|
if (IS_ERR(hw))
|
|
|
|
return ERR_CAST(hw);
|
|
|
|
return hw->clk;
|
2012-03-16 14:11:20 +08:00
|
|
|
}
|
2013-08-16 10:06:29 +08:00
|
|
|
EXPORT_SYMBOL_GPL(clk_register_mux_table);
|
2013-03-22 20:07:53 +08:00
|
|
|
|
2015-01-05 17:52:40 +08:00
|
|
|
void clk_unregister_mux(struct clk *clk)
|
|
|
|
{
|
|
|
|
struct clk_mux *mux;
|
|
|
|
struct clk_hw *hw;
|
|
|
|
|
|
|
|
hw = __clk_get_hw(clk);
|
|
|
|
if (!hw)
|
|
|
|
return;
|
|
|
|
|
|
|
|
mux = to_clk_mux(hw);
|
|
|
|
|
|
|
|
clk_unregister(clk);
|
|
|
|
kfree(mux);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(clk_unregister_mux);
|
2016-02-07 16:05:48 +08:00
|
|
|
|
|
|
|
void clk_hw_unregister_mux(struct clk_hw *hw)
|
|
|
|
{
|
|
|
|
struct clk_mux *mux;
|
|
|
|
|
|
|
|
mux = to_clk_mux(hw);
|
|
|
|
|
|
|
|
clk_hw_unregister(hw);
|
|
|
|
kfree(mux);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(clk_hw_unregister_mux);
|