2023-04-01 19:19:21 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
|
|
/*
|
2023-04-01 19:19:23 +08:00
|
|
|
* Reset driver for the StarFive JH71X0 SoCs
|
2023-04-01 19:19:21 +08:00
|
|
|
*
|
|
|
|
* Copyright (C) 2021 Emil Renner Berthing <kernel@esmil.dk>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/bitmap.h>
|
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/io.h>
|
|
|
|
#include <linux/iopoll.h>
|
|
|
|
#include <linux/reset-controller.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
|
|
|
|
#include "reset-starfive-jh71x0.h"
|
|
|
|
|
2023-04-01 19:19:23 +08:00
|
|
|
struct jh71x0_reset {
|
2023-04-01 19:19:21 +08:00
|
|
|
struct reset_controller_dev rcdev;
|
|
|
|
/* protect registers against concurrent read-modify-write */
|
|
|
|
spinlock_t lock;
|
2023-04-01 19:19:22 +08:00
|
|
|
void __iomem *assert;
|
|
|
|
void __iomem *status;
|
2023-04-01 19:19:24 +08:00
|
|
|
const u32 *asserted;
|
2023-04-01 19:19:21 +08:00
|
|
|
};
|
|
|
|
|
2023-04-01 19:19:23 +08:00
|
|
|
static inline struct jh71x0_reset *
|
|
|
|
jh71x0_reset_from(struct reset_controller_dev *rcdev)
|
2023-04-01 19:19:21 +08:00
|
|
|
{
|
2023-04-01 19:19:23 +08:00
|
|
|
return container_of(rcdev, struct jh71x0_reset, rcdev);
|
2023-04-01 19:19:21 +08:00
|
|
|
}
|
|
|
|
|
2023-04-01 19:19:23 +08:00
|
|
|
static int jh71x0_reset_update(struct reset_controller_dev *rcdev,
|
2023-04-01 19:19:21 +08:00
|
|
|
unsigned long id, bool assert)
|
|
|
|
{
|
2023-04-01 19:19:23 +08:00
|
|
|
struct jh71x0_reset *data = jh71x0_reset_from(rcdev);
|
2023-04-01 19:19:24 +08:00
|
|
|
unsigned long offset = id / 32;
|
|
|
|
u32 mask = BIT(id % 32);
|
|
|
|
void __iomem *reg_assert = data->assert + offset * sizeof(u32);
|
|
|
|
void __iomem *reg_status = data->status + offset * sizeof(u32);
|
|
|
|
u32 done = data->asserted ? data->asserted[offset] & mask : 0;
|
|
|
|
u32 value;
|
2023-04-01 19:19:21 +08:00
|
|
|
unsigned long flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!assert)
|
|
|
|
done ^= mask;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&data->lock, flags);
|
|
|
|
|
2023-04-01 19:19:24 +08:00
|
|
|
value = readl(reg_assert);
|
2023-04-01 19:19:21 +08:00
|
|
|
if (assert)
|
|
|
|
value |= mask;
|
|
|
|
else
|
|
|
|
value &= ~mask;
|
2023-04-01 19:19:24 +08:00
|
|
|
writel(value, reg_assert);
|
2023-04-01 19:19:21 +08:00
|
|
|
|
|
|
|
/* if the associated clock is gated, deasserting might otherwise hang forever */
|
2023-04-01 19:19:24 +08:00
|
|
|
ret = readl_poll_timeout_atomic(reg_status, value, (value & mask) == done, 0, 1000);
|
2023-04-01 19:19:21 +08:00
|
|
|
|
|
|
|
spin_unlock_irqrestore(&data->lock, flags);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-04-01 19:19:23 +08:00
|
|
|
static int jh71x0_reset_assert(struct reset_controller_dev *rcdev,
|
2023-04-01 19:19:21 +08:00
|
|
|
unsigned long id)
|
|
|
|
{
|
2023-04-01 19:19:23 +08:00
|
|
|
return jh71x0_reset_update(rcdev, id, true);
|
2023-04-01 19:19:21 +08:00
|
|
|
}
|
|
|
|
|
2023-04-01 19:19:23 +08:00
|
|
|
static int jh71x0_reset_deassert(struct reset_controller_dev *rcdev,
|
2023-04-01 19:19:21 +08:00
|
|
|
unsigned long id)
|
|
|
|
{
|
2023-04-01 19:19:23 +08:00
|
|
|
return jh71x0_reset_update(rcdev, id, false);
|
2023-04-01 19:19:21 +08:00
|
|
|
}
|
|
|
|
|
2023-04-01 19:19:23 +08:00
|
|
|
static int jh71x0_reset_reset(struct reset_controller_dev *rcdev,
|
2023-04-01 19:19:21 +08:00
|
|
|
unsigned long id)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2023-04-01 19:19:23 +08:00
|
|
|
ret = jh71x0_reset_assert(rcdev, id);
|
2023-04-01 19:19:21 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2023-04-01 19:19:23 +08:00
|
|
|
return jh71x0_reset_deassert(rcdev, id);
|
2023-04-01 19:19:21 +08:00
|
|
|
}
|
|
|
|
|
2023-04-01 19:19:23 +08:00
|
|
|
static int jh71x0_reset_status(struct reset_controller_dev *rcdev,
|
2023-04-01 19:19:21 +08:00
|
|
|
unsigned long id)
|
|
|
|
{
|
2023-04-01 19:19:23 +08:00
|
|
|
struct jh71x0_reset *data = jh71x0_reset_from(rcdev);
|
2023-04-01 19:19:24 +08:00
|
|
|
unsigned long offset = id / 32;
|
|
|
|
u32 mask = BIT(id % 32);
|
|
|
|
void __iomem *reg_status = data->status + offset * sizeof(u32);
|
|
|
|
u32 value = readl(reg_status);
|
2023-04-01 19:19:21 +08:00
|
|
|
|
2023-04-01 19:19:22 +08:00
|
|
|
return !((value ^ data->asserted[offset]) & mask);
|
2023-04-01 19:19:21 +08:00
|
|
|
}
|
|
|
|
|
2023-04-01 19:19:23 +08:00
|
|
|
static const struct reset_control_ops jh71x0_reset_ops = {
|
|
|
|
.assert = jh71x0_reset_assert,
|
|
|
|
.deassert = jh71x0_reset_deassert,
|
|
|
|
.reset = jh71x0_reset_reset,
|
|
|
|
.status = jh71x0_reset_status,
|
2023-04-01 19:19:21 +08:00
|
|
|
};
|
|
|
|
|
2023-04-01 19:19:23 +08:00
|
|
|
int reset_starfive_jh71x0_register(struct device *dev, struct device_node *of_node,
|
2023-04-01 19:19:22 +08:00
|
|
|
void __iomem *assert, void __iomem *status,
|
2023-04-01 19:19:24 +08:00
|
|
|
const u32 *asserted, unsigned int nr_resets,
|
2023-04-01 19:19:22 +08:00
|
|
|
struct module *owner)
|
2023-04-01 19:19:21 +08:00
|
|
|
{
|
2023-04-01 19:19:23 +08:00
|
|
|
struct jh71x0_reset *data;
|
2023-04-01 19:19:21 +08:00
|
|
|
|
2023-04-01 19:19:22 +08:00
|
|
|
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
|
2023-04-01 19:19:21 +08:00
|
|
|
if (!data)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2023-04-01 19:19:23 +08:00
|
|
|
data->rcdev.ops = &jh71x0_reset_ops;
|
2023-04-01 19:19:22 +08:00
|
|
|
data->rcdev.owner = owner;
|
|
|
|
data->rcdev.nr_resets = nr_resets;
|
|
|
|
data->rcdev.dev = dev;
|
|
|
|
data->rcdev.of_node = of_node;
|
|
|
|
|
2023-04-01 19:19:21 +08:00
|
|
|
spin_lock_init(&data->lock);
|
2023-04-01 19:19:22 +08:00
|
|
|
data->assert = assert;
|
|
|
|
data->status = status;
|
|
|
|
data->asserted = asserted;
|
2023-04-01 19:19:21 +08:00
|
|
|
|
2023-04-01 19:19:22 +08:00
|
|
|
return devm_reset_controller_register(dev, &data->rcdev);
|
2023-04-01 19:19:21 +08:00
|
|
|
}
|
2023-04-01 19:19:23 +08:00
|
|
|
EXPORT_SYMBOL_GPL(reset_starfive_jh71x0_register);
|