2022-04-05 21:57:37 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
/*
|
|
|
|
* Copyright (c) 2022, Linaro Ltd.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
#ifndef _MHI_EP_H_
|
|
|
|
#define _MHI_EP_H_
|
|
|
|
|
|
|
|
#include <linux/dma-direction.h>
|
|
|
|
#include <linux/mhi.h>
|
|
|
|
|
|
|
|
#define MHI_EP_DEFAULT_MTU 0x8000
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct mhi_ep_channel_config - Channel configuration structure for controller
|
|
|
|
* @name: The name of this channel
|
|
|
|
* @num: The number assigned to this channel
|
|
|
|
* @num_elements: The number of elements that can be queued to this channel
|
|
|
|
* @dir: Direction that data may flow on this channel
|
|
|
|
*/
|
|
|
|
struct mhi_ep_channel_config {
|
|
|
|
char *name;
|
|
|
|
u32 num;
|
|
|
|
u32 num_elements;
|
|
|
|
enum dma_data_direction dir;
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct mhi_ep_cntrl_config - MHI Endpoint controller configuration
|
|
|
|
* @mhi_version: MHI spec version supported by the controller
|
|
|
|
* @max_channels: Maximum number of channels supported
|
|
|
|
* @num_channels: Number of channels defined in @ch_cfg
|
|
|
|
* @ch_cfg: Array of defined channels
|
|
|
|
*/
|
|
|
|
struct mhi_ep_cntrl_config {
|
|
|
|
u32 mhi_version;
|
|
|
|
u32 max_channels;
|
|
|
|
u32 num_channels;
|
|
|
|
const struct mhi_ep_channel_config *ch_cfg;
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct mhi_ep_db_info - MHI Endpoint doorbell info
|
|
|
|
* @mask: Mask of the doorbell interrupt
|
|
|
|
* @status: Status of the doorbell interrupt
|
|
|
|
*/
|
|
|
|
struct mhi_ep_db_info {
|
|
|
|
u32 mask;
|
|
|
|
u32 status;
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct mhi_ep_cntrl - MHI Endpoint controller structure
|
|
|
|
* @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI
|
|
|
|
* Endpoint controller
|
|
|
|
* @mhi_dev: MHI Endpoint device instance for the controller
|
|
|
|
* @mmio: MMIO region containing the MHI registers
|
|
|
|
* @mhi_chan: Points to the channel configuration table
|
|
|
|
* @mhi_event: Points to the event ring configurations table
|
|
|
|
* @mhi_cmd: Points to the command ring configurations table
|
|
|
|
* @sm: MHI Endpoint state machine
|
2022-04-05 21:57:42 +08:00
|
|
|
* @ch_ctx_cache: Cache of host channel context data structure
|
|
|
|
* @ev_ctx_cache: Cache of host event context data structure
|
|
|
|
* @cmd_ctx_cache: Cache of host command context data structure
|
2022-04-05 21:57:40 +08:00
|
|
|
* @ch_ctx_host_pa: Physical address of host channel context data structure
|
|
|
|
* @ev_ctx_host_pa: Physical address of host event context data structure
|
|
|
|
* @cmd_ctx_host_pa: Physical address of host command context data structure
|
2022-04-05 21:57:45 +08:00
|
|
|
* @ch_ctx_cache_phys: Physical address of the host channel context cache
|
|
|
|
* @ev_ctx_cache_phys: Physical address of the host event context cache
|
|
|
|
* @cmd_ctx_cache_phys: Physical address of the host command context cache
|
2022-04-05 21:57:40 +08:00
|
|
|
* @chdb: Array of channel doorbell interrupt info
|
2022-04-05 21:57:42 +08:00
|
|
|
* @event_lock: Lock for protecting event rings
|
2022-04-05 21:57:43 +08:00
|
|
|
* @list_lock: Lock for protecting state transition and channel doorbell lists
|
|
|
|
* @state_lock: Lock for protecting state transitions
|
|
|
|
* @st_transition_list: List of state transitions
|
2022-04-05 21:57:44 +08:00
|
|
|
* @ch_db_list: List of queued channel doorbells
|
2022-04-05 21:57:43 +08:00
|
|
|
* @wq: Dedicated workqueue for handling rings and state changes
|
|
|
|
* @state_work: State transition worker
|
2022-04-05 21:57:47 +08:00
|
|
|
* @reset_work: Worker for MHI Endpoint reset
|
2022-04-05 21:57:49 +08:00
|
|
|
* @cmd_ring_work: Worker for processing command rings
|
2022-04-05 21:57:51 +08:00
|
|
|
* @ch_ring_work: Worker for processing channel rings
|
2022-04-05 21:57:37 +08:00
|
|
|
* @raise_irq: CB function for raising IRQ to the host
|
|
|
|
* @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it
|
|
|
|
* @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context
|
|
|
|
* @read_from_host: CB function for reading from host memory from endpoint
|
|
|
|
* @write_to_host: CB function for writing to host memory from endpoint
|
|
|
|
* @mhi_state: MHI Endpoint state
|
|
|
|
* @max_chan: Maximum channels supported by the endpoint controller
|
|
|
|
* @mru: MRU (Maximum Receive Unit) value of the endpoint controller
|
2022-04-05 21:57:40 +08:00
|
|
|
* @event_rings: Number of event rings supported by the endpoint controller
|
|
|
|
* @hw_event_rings: Number of hardware event rings supported by the endpoint controller
|
|
|
|
* @chdb_offset: Channel doorbell offset set by the host
|
|
|
|
* @erdb_offset: Event ring doorbell offset set by the host
|
2022-04-05 21:57:37 +08:00
|
|
|
* @index: MHI Endpoint controller index
|
2022-04-05 21:57:44 +08:00
|
|
|
* @irq: IRQ used by the endpoint controller
|
2022-04-05 21:57:45 +08:00
|
|
|
* @enabled: Check if the endpoint controller is enabled or not
|
2022-04-05 21:57:37 +08:00
|
|
|
*/
|
|
|
|
struct mhi_ep_cntrl {
|
|
|
|
struct device *cntrl_dev;
|
|
|
|
struct mhi_ep_device *mhi_dev;
|
|
|
|
void __iomem *mmio;
|
|
|
|
|
|
|
|
struct mhi_ep_chan *mhi_chan;
|
|
|
|
struct mhi_ep_event *mhi_event;
|
|
|
|
struct mhi_ep_cmd *mhi_cmd;
|
|
|
|
struct mhi_ep_sm *sm;
|
|
|
|
|
2022-04-05 21:57:42 +08:00
|
|
|
struct mhi_chan_ctxt *ch_ctx_cache;
|
|
|
|
struct mhi_event_ctxt *ev_ctx_cache;
|
|
|
|
struct mhi_cmd_ctxt *cmd_ctx_cache;
|
2022-04-05 21:57:40 +08:00
|
|
|
u64 ch_ctx_host_pa;
|
|
|
|
u64 ev_ctx_host_pa;
|
|
|
|
u64 cmd_ctx_host_pa;
|
2022-04-05 21:57:45 +08:00
|
|
|
phys_addr_t ch_ctx_cache_phys;
|
|
|
|
phys_addr_t ev_ctx_cache_phys;
|
|
|
|
phys_addr_t cmd_ctx_cache_phys;
|
2022-04-05 21:57:40 +08:00
|
|
|
|
|
|
|
struct mhi_ep_db_info chdb[4];
|
2022-04-05 21:57:42 +08:00
|
|
|
struct mutex event_lock;
|
2022-04-05 21:57:43 +08:00
|
|
|
spinlock_t list_lock;
|
|
|
|
spinlock_t state_lock;
|
|
|
|
|
|
|
|
struct list_head st_transition_list;
|
2022-04-05 21:57:44 +08:00
|
|
|
struct list_head ch_db_list;
|
2022-04-05 21:57:43 +08:00
|
|
|
|
|
|
|
struct workqueue_struct *wq;
|
|
|
|
struct work_struct state_work;
|
2022-04-05 21:57:47 +08:00
|
|
|
struct work_struct reset_work;
|
2022-04-05 21:57:49 +08:00
|
|
|
struct work_struct cmd_ring_work;
|
2022-04-05 21:57:51 +08:00
|
|
|
struct work_struct ch_ring_work;
|
2022-04-05 21:57:40 +08:00
|
|
|
|
2022-04-05 21:57:37 +08:00
|
|
|
void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector);
|
|
|
|
int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr,
|
|
|
|
void __iomem **virt, size_t size);
|
|
|
|
void (*unmap_free)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t phys,
|
|
|
|
void __iomem *virt, size_t size);
|
|
|
|
int (*read_from_host)(struct mhi_ep_cntrl *mhi_cntrl, u64 from, void *to, size_t size);
|
|
|
|
int (*write_to_host)(struct mhi_ep_cntrl *mhi_cntrl, void *from, u64 to, size_t size);
|
|
|
|
|
|
|
|
enum mhi_state mhi_state;
|
|
|
|
|
|
|
|
u32 max_chan;
|
|
|
|
u32 mru;
|
2022-04-05 21:57:40 +08:00
|
|
|
u32 event_rings;
|
|
|
|
u32 hw_event_rings;
|
|
|
|
u32 chdb_offset;
|
|
|
|
u32 erdb_offset;
|
2022-04-05 21:57:37 +08:00
|
|
|
u32 index;
|
2022-04-05 21:57:44 +08:00
|
|
|
int irq;
|
2022-04-05 21:57:45 +08:00
|
|
|
bool enabled;
|
2022-04-05 21:57:37 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct mhi_ep_device - Structure representing an MHI Endpoint device that binds
|
|
|
|
* to channels or is associated with controllers
|
|
|
|
* @dev: Driver model device node for the MHI Endpoint device
|
|
|
|
* @mhi_cntrl: Controller the device belongs to
|
|
|
|
* @id: Pointer to MHI Endpoint device ID struct
|
|
|
|
* @name: Name of the associated MHI Endpoint device
|
2022-04-05 21:57:38 +08:00
|
|
|
* @ul_chan: UL (from host to endpoint) channel for the device
|
|
|
|
* @dl_chan: DL (from endpoint to host) channel for the device
|
2022-04-05 21:57:37 +08:00
|
|
|
* @dev_type: MHI device type
|
|
|
|
*/
|
|
|
|
struct mhi_ep_device {
|
|
|
|
struct device dev;
|
|
|
|
struct mhi_ep_cntrl *mhi_cntrl;
|
|
|
|
const struct mhi_device_id *id;
|
|
|
|
const char *name;
|
|
|
|
struct mhi_ep_chan *ul_chan;
|
|
|
|
struct mhi_ep_chan *dl_chan;
|
|
|
|
enum mhi_device_type dev_type;
|
|
|
|
};
|
|
|
|
|
2022-04-05 21:57:38 +08:00
|
|
|
/**
|
|
|
|
* struct mhi_ep_driver - Structure representing a MHI Endpoint client driver
|
|
|
|
* @id_table: Pointer to MHI Endpoint device ID table
|
|
|
|
* @driver: Device driver model driver
|
|
|
|
* @probe: CB function for client driver probe function
|
|
|
|
* @remove: CB function for client driver remove function
|
|
|
|
* @ul_xfer_cb: CB function for UL (from host to endpoint) data transfer
|
|
|
|
* @dl_xfer_cb: CB function for DL (from endpoint to host) data transfer
|
|
|
|
*/
|
|
|
|
struct mhi_ep_driver {
|
|
|
|
const struct mhi_device_id *id_table;
|
|
|
|
struct device_driver driver;
|
|
|
|
int (*probe)(struct mhi_ep_device *mhi_ep,
|
|
|
|
const struct mhi_device_id *id);
|
|
|
|
void (*remove)(struct mhi_ep_device *mhi_ep);
|
|
|
|
void (*ul_xfer_cb)(struct mhi_ep_device *mhi_dev,
|
|
|
|
struct mhi_result *result);
|
|
|
|
void (*dl_xfer_cb)(struct mhi_ep_device *mhi_dev,
|
|
|
|
struct mhi_result *result);
|
|
|
|
};
|
|
|
|
|
2022-04-05 21:57:37 +08:00
|
|
|
#define to_mhi_ep_device(dev) container_of(dev, struct mhi_ep_device, dev)
|
2022-04-05 21:57:38 +08:00
|
|
|
#define to_mhi_ep_driver(drv) container_of(drv, struct mhi_ep_driver, driver)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* module_mhi_ep_driver() - Helper macro for drivers that don't do
|
|
|
|
* anything special other than using default mhi_ep_driver_register() and
|
|
|
|
* mhi_ep_driver_unregister(). This eliminates a lot of boilerplate.
|
|
|
|
* Each module may only use this macro once.
|
|
|
|
*/
|
|
|
|
#define module_mhi_ep_driver(mhi_drv) \
|
|
|
|
module_driver(mhi_drv, mhi_ep_driver_register, \
|
|
|
|
mhi_ep_driver_unregister)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Macro to avoid include chaining to get THIS_MODULE
|
|
|
|
*/
|
|
|
|
#define mhi_ep_driver_register(mhi_drv) \
|
|
|
|
__mhi_ep_driver_register(mhi_drv, THIS_MODULE)
|
|
|
|
|
|
|
|
/**
|
|
|
|
* __mhi_ep_driver_register - Register a driver with MHI Endpoint bus
|
|
|
|
* @mhi_drv: Driver to be associated with the device
|
|
|
|
* @owner: The module owner
|
|
|
|
*
|
|
|
|
* Return: 0 if driver registrations succeeds, a negative error code otherwise.
|
|
|
|
*/
|
|
|
|
int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* mhi_ep_driver_unregister - Unregister a driver from MHI Endpoint bus
|
|
|
|
* @mhi_drv: Driver associated with the device
|
|
|
|
*/
|
|
|
|
void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv);
|
2022-04-05 21:57:37 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* mhi_ep_register_controller - Register MHI Endpoint controller
|
|
|
|
* @mhi_cntrl: MHI Endpoint controller to register
|
|
|
|
* @config: Configuration to use for the controller
|
|
|
|
*
|
|
|
|
* Return: 0 if controller registrations succeeds, a negative error code otherwise.
|
|
|
|
*/
|
|
|
|
int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
|
|
|
|
const struct mhi_ep_cntrl_config *config);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* mhi_ep_unregister_controller - Unregister MHI Endpoint controller
|
|
|
|
* @mhi_cntrl: MHI Endpoint controller to unregister
|
|
|
|
*/
|
|
|
|
void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl);
|
|
|
|
|
2022-04-05 21:57:45 +08:00
|
|
|
/**
|
|
|
|
* mhi_ep_power_up - Power up the MHI endpoint stack
|
|
|
|
* @mhi_cntrl: MHI Endpoint controller
|
|
|
|
*
|
|
|
|
* Return: 0 if power up succeeds, a negative error code otherwise.
|
|
|
|
*/
|
|
|
|
int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl);
|
|
|
|
|
2022-04-05 21:57:46 +08:00
|
|
|
/**
|
|
|
|
* mhi_ep_power_down - Power down the MHI endpoint stack
|
|
|
|
* @mhi_cntrl: MHI controller
|
|
|
|
*/
|
|
|
|
void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl);
|
|
|
|
|
2022-04-05 21:57:50 +08:00
|
|
|
/**
|
|
|
|
* mhi_ep_queue_is_empty - Determine whether the transfer queue is empty
|
|
|
|
* @mhi_dev: Device associated with the channels
|
|
|
|
* @dir: DMA direction for the channel
|
|
|
|
*
|
|
|
|
* Return: true if the queue is empty, false otherwise.
|
|
|
|
*/
|
|
|
|
bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir);
|
|
|
|
|
2022-04-05 21:57:52 +08:00
|
|
|
/**
|
|
|
|
* mhi_ep_queue_skb - Send SKBs to host over MHI Endpoint
|
|
|
|
* @mhi_dev: Device associated with the DL channel
|
|
|
|
* @skb: SKBs to be queued
|
|
|
|
*
|
|
|
|
* Return: 0 if the SKBs has been sent successfully, a negative error code otherwise.
|
|
|
|
*/
|
|
|
|
int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb);
|
|
|
|
|
2022-04-05 21:57:37 +08:00
|
|
|
#endif
|