usb: xhci: Make some static functions global
This patch makes some static functions global to avoid duplications in different files. These functions can be used in the implementation of xHCI debug capability. There is no functional change. Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
103afda0e6
commit
67d2ea9fde
|
@ -357,7 +357,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
|
|||
* Set the end flag and the cycle toggle bit on the last segment.
|
||||
* See section 4.9.1 and figures 15 and 16.
|
||||
*/
|
||||
static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
|
||||
struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
|
||||
unsigned int num_segs, unsigned int cycle_state,
|
||||
enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
|
||||
{
|
||||
|
@ -454,7 +454,7 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
|
||||
struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
|
||||
int type, gfp_t flags)
|
||||
{
|
||||
struct xhci_container_ctx *ctx;
|
||||
|
@ -479,7 +479,7 @@ static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci
|
|||
return ctx;
|
||||
}
|
||||
|
||||
static void xhci_free_container_ctx(struct xhci_hcd *xhci,
|
||||
void xhci_free_container_ctx(struct xhci_hcd *xhci,
|
||||
struct xhci_container_ctx *ctx)
|
||||
{
|
||||
if (!ctx)
|
||||
|
@ -1757,21 +1757,61 @@ void xhci_free_command(struct xhci_hcd *xhci,
|
|||
kfree(command);
|
||||
}
|
||||
|
||||
int xhci_alloc_erst(struct xhci_hcd *xhci,
|
||||
struct xhci_ring *evt_ring,
|
||||
struct xhci_erst *erst,
|
||||
gfp_t flags)
|
||||
{
|
||||
size_t size;
|
||||
unsigned int val;
|
||||
struct xhci_segment *seg;
|
||||
struct xhci_erst_entry *entry;
|
||||
|
||||
size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs;
|
||||
erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
|
||||
size,
|
||||
&erst->erst_dma_addr,
|
||||
flags);
|
||||
if (!erst->entries)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(erst->entries, 0, size);
|
||||
erst->num_entries = evt_ring->num_segs;
|
||||
|
||||
seg = evt_ring->first_seg;
|
||||
for (val = 0; val < evt_ring->num_segs; val++) {
|
||||
entry = &erst->entries[val];
|
||||
entry->seg_addr = cpu_to_le64(seg->dma);
|
||||
entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
|
||||
entry->rsvd = 0;
|
||||
seg = seg->next;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
|
||||
{
|
||||
size_t size;
|
||||
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
|
||||
|
||||
size = sizeof(struct xhci_erst_entry) * (erst->num_entries);
|
||||
if (erst->entries)
|
||||
dma_free_coherent(dev, size,
|
||||
erst->entries,
|
||||
erst->erst_dma_addr);
|
||||
erst->entries = NULL;
|
||||
}
|
||||
|
||||
void xhci_mem_cleanup(struct xhci_hcd *xhci)
|
||||
{
|
||||
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
|
||||
int size;
|
||||
int i, j, num_ports;
|
||||
|
||||
cancel_delayed_work_sync(&xhci->cmd_timer);
|
||||
|
||||
/* Free the Event Ring Segment Table and the actual Event Ring */
|
||||
size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
|
||||
if (xhci->erst.entries)
|
||||
dma_free_coherent(dev, size,
|
||||
xhci->erst.entries, xhci->erst.erst_dma_addr);
|
||||
xhci->erst.entries = NULL;
|
||||
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed ERST");
|
||||
xhci_free_erst(xhci, &xhci->erst);
|
||||
|
||||
if (xhci->event_ring)
|
||||
xhci_ring_free(xhci, xhci->event_ring);
|
||||
xhci->event_ring = NULL;
|
||||
|
@ -2308,9 +2348,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
|||
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
|
||||
unsigned int val, val2;
|
||||
u64 val_64;
|
||||
struct xhci_segment *seg;
|
||||
u32 page_size, temp;
|
||||
int i;
|
||||
u32 page_size, temp;
|
||||
int i, ret;
|
||||
|
||||
INIT_LIST_HEAD(&xhci->cmd_list);
|
||||
|
||||
|
@ -2449,32 +2488,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
|||
if (xhci_check_trb_in_td_math(xhci) < 0)
|
||||
goto fail;
|
||||
|
||||
xhci->erst.entries = dma_alloc_coherent(dev,
|
||||
sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
|
||||
flags);
|
||||
if (!xhci->erst.entries)
|
||||
ret = xhci_alloc_erst(xhci, xhci->event_ring, &xhci->erst, flags);
|
||||
if (ret)
|
||||
goto fail;
|
||||
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||
"// Allocated event ring segment table at 0x%llx",
|
||||
(unsigned long long)dma);
|
||||
|
||||
memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
|
||||
xhci->erst.num_entries = ERST_NUM_SEGS;
|
||||
xhci->erst.erst_dma_addr = dma;
|
||||
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||
"Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx",
|
||||
xhci->erst.num_entries,
|
||||
xhci->erst.entries,
|
||||
(unsigned long long)xhci->erst.erst_dma_addr);
|
||||
|
||||
/* set ring base address and size for each segment table entry */
|
||||
for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
|
||||
struct xhci_erst_entry *entry = &xhci->erst.entries[val];
|
||||
entry->seg_addr = cpu_to_le64(seg->dma);
|
||||
entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
|
||||
entry->rsvd = 0;
|
||||
seg = seg->next;
|
||||
}
|
||||
|
||||
/* set ERST count with the number of entries in the segment table */
|
||||
val = readl(&xhci->ir_set->erst_size);
|
||||
|
|
|
@ -153,7 +153,7 @@ static void next_trb(struct xhci_hcd *xhci,
|
|||
* See Cycle bit rules. SW is the consumer for the event ring only.
|
||||
* Don't make a ring full of link TRBs. That would be dumb and this would loop.
|
||||
*/
|
||||
static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
|
||||
void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
|
||||
{
|
||||
/* event ring doesn't have link trbs, check for last trb */
|
||||
if (ring->type == TYPE_EVENT) {
|
||||
|
@ -2957,7 +2957,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int count_trbs(u64 addr, u64 len)
|
||||
unsigned int count_trbs(u64 addr, u64 len)
|
||||
{
|
||||
unsigned int num_trbs;
|
||||
|
||||
|
|
|
@ -1965,9 +1965,17 @@ void xhci_slot_copy(struct xhci_hcd *xhci,
|
|||
int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
|
||||
struct usb_device *udev, struct usb_host_endpoint *ep,
|
||||
gfp_t mem_flags);
|
||||
struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
|
||||
unsigned int num_segs, unsigned int cycle_state,
|
||||
enum xhci_ring_type type, unsigned int max_packet, gfp_t flags);
|
||||
void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
|
||||
int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
|
||||
unsigned int num_trbs, gfp_t flags);
|
||||
unsigned int num_trbs, gfp_t flags);
|
||||
int xhci_alloc_erst(struct xhci_hcd *xhci,
|
||||
struct xhci_ring *evt_ring,
|
||||
struct xhci_erst *erst,
|
||||
gfp_t flags);
|
||||
void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst);
|
||||
void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
|
||||
struct xhci_virt_device *virt_dev,
|
||||
unsigned int ep_index);
|
||||
|
@ -1998,6 +2006,10 @@ struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci,
|
|||
void xhci_urb_free_priv(struct urb_priv *urb_priv);
|
||||
void xhci_free_command(struct xhci_hcd *xhci,
|
||||
struct xhci_command *command);
|
||||
struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
|
||||
int type, gfp_t flags);
|
||||
void xhci_free_container_ctx(struct xhci_hcd *xhci,
|
||||
struct xhci_container_ctx *ctx);
|
||||
|
||||
/* xHCI host controller glue */
|
||||
typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
|
||||
|
@ -2071,6 +2083,8 @@ void xhci_handle_command_timeout(struct work_struct *work);
|
|||
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
|
||||
unsigned int ep_index, unsigned int stream_id);
|
||||
void xhci_cleanup_command_queue(struct xhci_hcd *xhci);
|
||||
void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring);
|
||||
unsigned int count_trbs(u64 addr, u64 len);
|
||||
|
||||
/* xHCI roothub code */
|
||||
void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array,
|
||||
|
|
Loading…
Reference in New Issue