bus: mhi: ep: Add support for managing MHI state machine
Add support for managing the MHI state machine by controlling the state transitions. Only the following MHI state transitions are supported: 1. Ready state 2. M0 state 3. M3 state 4. SYS_ERR state Reviewed-by: Alex Elder <elder@linaro.org> Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> Link: https://lore.kernel.org/r/20220405135754.6622-8-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
961aeb6892
commit
f9baa4f737
|
@ -1,2 +1,2 @@
|
|||
obj-$(CONFIG_MHI_BUS_EP) += mhi_ep.o
|
||||
mhi_ep-y := main.o mmio.o ring.o
|
||||
mhi_ep-y := main.o mmio.o ring.o sm.o
|
||||
|
|
|
@ -146,6 +146,11 @@ struct mhi_ep_event {
|
|||
struct mhi_ep_ring ring;
|
||||
};
|
||||
|
||||
struct mhi_ep_state_transition {
|
||||
struct list_head node;
|
||||
enum mhi_state state;
|
||||
};
|
||||
|
||||
struct mhi_ep_chan {
|
||||
char *name;
|
||||
struct mhi_ep_device *mhi_dev;
|
||||
|
@ -200,5 +205,11 @@ void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl);
|
|||
/* MHI EP core functions */
|
||||
int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state);
|
||||
int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env);
|
||||
bool mhi_ep_check_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state cur_mhi_state,
|
||||
enum mhi_state mhi_state);
|
||||
int mhi_ep_set_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state mhi_state);
|
||||
int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl);
|
||||
int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl);
|
||||
int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -105,6 +105,43 @@ static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_e
|
|||
return mhi_ep_send_event(mhi_cntrl, 0, &event, 0);
|
||||
}
|
||||
|
||||
static void mhi_ep_state_worker(struct work_struct *work)
|
||||
{
|
||||
struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work);
|
||||
struct device *dev = &mhi_cntrl->mhi_dev->dev;
|
||||
struct mhi_ep_state_transition *itr, *tmp;
|
||||
unsigned long flags;
|
||||
LIST_HEAD(head);
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&mhi_cntrl->list_lock, flags);
|
||||
list_splice_tail_init(&mhi_cntrl->st_transition_list, &head);
|
||||
spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags);
|
||||
|
||||
list_for_each_entry_safe(itr, tmp, &head, node) {
|
||||
list_del(&itr->node);
|
||||
dev_dbg(dev, "Handling MHI state transition to %s\n",
|
||||
mhi_state_str(itr->state));
|
||||
|
||||
switch (itr->state) {
|
||||
case MHI_STATE_M0:
|
||||
ret = mhi_ep_set_m0_state(mhi_cntrl);
|
||||
if (ret)
|
||||
dev_err(dev, "Failed to transition to M0 state\n");
|
||||
break;
|
||||
case MHI_STATE_M3:
|
||||
ret = mhi_ep_set_m3_state(mhi_cntrl);
|
||||
if (ret)
|
||||
dev_err(dev, "Failed to transition to M3 state\n");
|
||||
break;
|
||||
default:
|
||||
dev_err(dev, "Invalid MHI state transition: %d\n", itr->state);
|
||||
break;
|
||||
}
|
||||
kfree(itr);
|
||||
}
|
||||
}
|
||||
|
||||
static void mhi_ep_release_device(struct device *dev)
|
||||
{
|
||||
struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
|
||||
|
@ -314,6 +351,17 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
|
|||
goto err_free_ch;
|
||||
}
|
||||
|
||||
INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker);
|
||||
|
||||
mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0);
|
||||
if (!mhi_cntrl->wq) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_cmd;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&mhi_cntrl->st_transition_list);
|
||||
spin_lock_init(&mhi_cntrl->state_lock);
|
||||
spin_lock_init(&mhi_cntrl->list_lock);
|
||||
mutex_init(&mhi_cntrl->event_lock);
|
||||
|
||||
/* Set MHI version and AMSS EE before enumeration */
|
||||
|
@ -323,7 +371,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
|
|||
/* Set controller index */
|
||||
ret = ida_alloc(&mhi_ep_cntrl_ida, GFP_KERNEL);
|
||||
if (ret < 0)
|
||||
goto err_free_cmd;
|
||||
goto err_destroy_wq;
|
||||
|
||||
mhi_cntrl->index = ret;
|
||||
|
||||
|
@ -351,6 +399,8 @@ err_put_dev:
|
|||
put_device(&mhi_dev->dev);
|
||||
err_ida_free:
|
||||
ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index);
|
||||
err_destroy_wq:
|
||||
destroy_workqueue(mhi_cntrl->wq);
|
||||
err_free_cmd:
|
||||
kfree(mhi_cntrl->mhi_cmd);
|
||||
err_free_ch:
|
||||
|
@ -364,6 +414,8 @@ void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl)
|
|||
{
|
||||
struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev;
|
||||
|
||||
destroy_workqueue(mhi_cntrl->wq);
|
||||
|
||||
kfree(mhi_cntrl->mhi_cmd);
|
||||
kfree(mhi_cntrl->mhi_chan);
|
||||
|
||||
|
|
|
@ -0,0 +1,136 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2022 Linaro Ltd.
|
||||
* Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/mhi_ep.h>
|
||||
#include "internal.h"
|
||||
|
||||
bool __must_check mhi_ep_check_mhi_state(struct mhi_ep_cntrl *mhi_cntrl,
|
||||
enum mhi_state cur_mhi_state,
|
||||
enum mhi_state mhi_state)
|
||||
{
|
||||
if (mhi_state == MHI_STATE_SYS_ERR)
|
||||
return true; /* Allowed in any state */
|
||||
|
||||
if (mhi_state == MHI_STATE_READY)
|
||||
return cur_mhi_state == MHI_STATE_RESET;
|
||||
|
||||
if (mhi_state == MHI_STATE_M0)
|
||||
return cur_mhi_state == MHI_STATE_M3 || cur_mhi_state == MHI_STATE_READY;
|
||||
|
||||
if (mhi_state == MHI_STATE_M3)
|
||||
return cur_mhi_state == MHI_STATE_M0;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
int mhi_ep_set_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state mhi_state)
|
||||
{
|
||||
struct device *dev = &mhi_cntrl->mhi_dev->dev;
|
||||
|
||||
if (!mhi_ep_check_mhi_state(mhi_cntrl, mhi_cntrl->mhi_state, mhi_state)) {
|
||||
dev_err(dev, "MHI state change to %s from %s is not allowed!\n",
|
||||
mhi_state_str(mhi_state),
|
||||
mhi_state_str(mhi_cntrl->mhi_state));
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
/* TODO: Add support for M1 and M2 states */
|
||||
if (mhi_state == MHI_STATE_M1 || mhi_state == MHI_STATE_M2) {
|
||||
dev_err(dev, "MHI state (%s) not supported\n", mhi_state_str(mhi_state));
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK, mhi_state);
|
||||
mhi_cntrl->mhi_state = mhi_state;
|
||||
|
||||
if (mhi_state == MHI_STATE_READY)
|
||||
mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK, 1);
|
||||
|
||||
if (mhi_state == MHI_STATE_SYS_ERR)
|
||||
mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_SYSERR_MASK, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl)
|
||||
{
|
||||
struct device *dev = &mhi_cntrl->mhi_dev->dev;
|
||||
enum mhi_state old_state;
|
||||
int ret;
|
||||
|
||||
spin_lock_bh(&mhi_cntrl->state_lock);
|
||||
old_state = mhi_cntrl->mhi_state;
|
||||
|
||||
ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
|
||||
spin_unlock_bh(&mhi_cntrl->state_lock);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Signal host that the device moved to M0 */
|
||||
ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M0);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed sending M0 state change event\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (old_state == MHI_STATE_READY) {
|
||||
/* Send AMSS EE event to host */
|
||||
ret = mhi_ep_send_ee_event(mhi_cntrl, MHI_EE_AMSS);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed sending AMSS EE event\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl)
|
||||
{
|
||||
struct device *dev = &mhi_cntrl->mhi_dev->dev;
|
||||
int ret;
|
||||
|
||||
spin_lock_bh(&mhi_cntrl->state_lock);
|
||||
ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
|
||||
spin_unlock_bh(&mhi_cntrl->state_lock);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Signal host that the device moved to M3 */
|
||||
ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M3);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed sending M3 state change event\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl)
|
||||
{
|
||||
struct device *dev = &mhi_cntrl->mhi_dev->dev;
|
||||
enum mhi_state mhi_state;
|
||||
int ret, is_ready;
|
||||
|
||||
spin_lock_bh(&mhi_cntrl->state_lock);
|
||||
/* Ensure that the MHISTATUS is set to RESET by host */
|
||||
mhi_state = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK);
|
||||
is_ready = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK);
|
||||
|
||||
if (mhi_state != MHI_STATE_RESET || is_ready) {
|
||||
dev_err(dev, "READY state transition failed. MHI host not in RESET state\n");
|
||||
spin_unlock_bh(&mhi_cntrl->state_lock);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_READY);
|
||||
spin_unlock_bh(&mhi_cntrl->state_lock);
|
||||
|
||||
return ret;
|
||||
}
|
|
@ -67,6 +67,11 @@ struct mhi_ep_db_info {
|
|||
* @cmd_ctx_host_pa: Physical address of host command context data structure
|
||||
* @chdb: Array of channel doorbell interrupt info
|
||||
* @event_lock: Lock for protecting event rings
|
||||
* @list_lock: Lock for protecting state transition and channel doorbell lists
|
||||
* @state_lock: Lock for protecting state transitions
|
||||
* @st_transition_list: List of state transitions
|
||||
* @wq: Dedicated workqueue for handling rings and state changes
|
||||
* @state_work: State transition worker
|
||||
* @raise_irq: CB function for raising IRQ to the host
|
||||
* @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it
|
||||
* @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context
|
||||
|
@ -100,6 +105,13 @@ struct mhi_ep_cntrl {
|
|||
|
||||
struct mhi_ep_db_info chdb[4];
|
||||
struct mutex event_lock;
|
||||
spinlock_t list_lock;
|
||||
spinlock_t state_lock;
|
||||
|
||||
struct list_head st_transition_list;
|
||||
|
||||
struct workqueue_struct *wq;
|
||||
struct work_struct state_work;
|
||||
|
||||
void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector);
|
||||
int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr,
|
||||
|
|
Loading…
Reference in New Issue