Merge branch 'usb-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6
* 'usb-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6: usb: musb: core: set has_tt flag USB: xhci: mark local functions as static USB: xhci: fix couple sparse annotations USB: xhci: rework xhci_print_ir_set() to get ir set from xhci itself USB: Reset USB 3.0 devices on (re)discovery xhci: Fix an error in count_sg_trbs_needed() xhci: Fix errors in the running total calculations in the TRB math xhci: Clarify some expressions in the TRB math xhci: Avoid BUG() in interrupt context
This commit is contained in:
commit
c1bc3beb06
|
@ -2681,17 +2681,13 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
|
||||||
|
|
||||||
mutex_lock(&usb_address0_mutex);
|
mutex_lock(&usb_address0_mutex);
|
||||||
|
|
||||||
if (!udev->config && oldspeed == USB_SPEED_SUPER) {
|
/* Reset the device; full speed may morph to high speed */
|
||||||
/* Don't reset USB 3.0 devices during an initial setup */
|
/* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
|
||||||
usb_set_device_state(udev, USB_STATE_DEFAULT);
|
retval = hub_port_reset(hub, port1, udev, delay);
|
||||||
} else {
|
if (retval < 0) /* error or disconnect */
|
||||||
/* Reset the device; full speed may morph to high speed */
|
goto fail;
|
||||||
/* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
|
/* success, speed is known */
|
||||||
retval = hub_port_reset(hub, port1, udev, delay);
|
|
||||||
if (retval < 0) /* error or disconnect */
|
|
||||||
goto fail;
|
|
||||||
/* success, speed is known */
|
|
||||||
}
|
|
||||||
retval = -ENODEV;
|
retval = -ENODEV;
|
||||||
|
|
||||||
if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) {
|
if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) {
|
||||||
|
|
|
@ -169,9 +169,10 @@ static void xhci_print_ports(struct xhci_hcd *xhci)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num)
|
void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num)
|
||||||
{
|
{
|
||||||
void *addr;
|
struct xhci_intr_reg __iomem *ir_set = &xhci->run_regs->ir_set[set_num];
|
||||||
|
void __iomem *addr;
|
||||||
u32 temp;
|
u32 temp;
|
||||||
u64 temp_64;
|
u64 temp_64;
|
||||||
|
|
||||||
|
@ -449,7 +450,7 @@ char *xhci_get_slot_state(struct xhci_hcd *xhci,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
|
static void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
|
||||||
{
|
{
|
||||||
/* Fields are 32 bits wide, DMA addresses are in bytes */
|
/* Fields are 32 bits wide, DMA addresses are in bytes */
|
||||||
int field_size = 32 / 8;
|
int field_size = 32 / 8;
|
||||||
|
@ -488,7 +489,7 @@ void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
|
||||||
dbg_rsvd64(xhci, (u64 *)slot_ctx, dma);
|
dbg_rsvd64(xhci, (u64 *)slot_ctx, dma);
|
||||||
}
|
}
|
||||||
|
|
||||||
void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
|
static void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
|
||||||
struct xhci_container_ctx *ctx,
|
struct xhci_container_ctx *ctx,
|
||||||
unsigned int last_ep)
|
unsigned int last_ep)
|
||||||
{
|
{
|
||||||
|
|
|
@ -307,7 +307,7 @@ struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
|
||||||
|
|
||||||
/***************** Streams structures manipulation *************************/
|
/***************** Streams structures manipulation *************************/
|
||||||
|
|
||||||
void xhci_free_stream_ctx(struct xhci_hcd *xhci,
|
static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
|
||||||
unsigned int num_stream_ctxs,
|
unsigned int num_stream_ctxs,
|
||||||
struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
|
struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
|
||||||
{
|
{
|
||||||
|
@ -335,7 +335,7 @@ void xhci_free_stream_ctx(struct xhci_hcd *xhci,
|
||||||
* The stream context array must be a power of 2, and can be as small as
|
* The stream context array must be a power of 2, and can be as small as
|
||||||
* 64 bytes or as large as 1MB.
|
* 64 bytes or as large as 1MB.
|
||||||
*/
|
*/
|
||||||
struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
|
static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
|
||||||
unsigned int num_stream_ctxs, dma_addr_t *dma,
|
unsigned int num_stream_ctxs, dma_addr_t *dma,
|
||||||
gfp_t mem_flags)
|
gfp_t mem_flags)
|
||||||
{
|
{
|
||||||
|
@ -1900,11 +1900,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
||||||
val &= DBOFF_MASK;
|
val &= DBOFF_MASK;
|
||||||
xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
|
xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
|
||||||
" from cap regs base addr\n", val);
|
" from cap regs base addr\n", val);
|
||||||
xhci->dba = (void *) xhci->cap_regs + val;
|
xhci->dba = (void __iomem *) xhci->cap_regs + val;
|
||||||
xhci_dbg_regs(xhci);
|
xhci_dbg_regs(xhci);
|
||||||
xhci_print_run_regs(xhci);
|
xhci_print_run_regs(xhci);
|
||||||
/* Set ir_set to interrupt register set 0 */
|
/* Set ir_set to interrupt register set 0 */
|
||||||
xhci->ir_set = (void *) xhci->run_regs->ir_set;
|
xhci->ir_set = &xhci->run_regs->ir_set[0];
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Event ring setup: Allocate a normal ring, but also setup
|
* Event ring setup: Allocate a normal ring, but also setup
|
||||||
|
@ -1961,7 +1961,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
||||||
/* Set the event ring dequeue address */
|
/* Set the event ring dequeue address */
|
||||||
xhci_set_hc_event_deq(xhci);
|
xhci_set_hc_event_deq(xhci);
|
||||||
xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
|
xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
|
||||||
xhci_print_ir_set(xhci, xhci->ir_set, 0);
|
xhci_print_ir_set(xhci, 0);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* XXX: Might need to set the Interrupter Moderation Register to
|
* XXX: Might need to set the Interrupter Moderation Register to
|
||||||
|
|
|
@ -474,8 +474,11 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
|
||||||
state->new_deq_seg = find_trb_seg(cur_td->start_seg,
|
state->new_deq_seg = find_trb_seg(cur_td->start_seg,
|
||||||
dev->eps[ep_index].stopped_trb,
|
dev->eps[ep_index].stopped_trb,
|
||||||
&state->new_cycle_state);
|
&state->new_cycle_state);
|
||||||
if (!state->new_deq_seg)
|
if (!state->new_deq_seg) {
|
||||||
BUG();
|
WARN_ON(1);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/* Dig out the cycle state saved by the xHC during the stop ep cmd */
|
/* Dig out the cycle state saved by the xHC during the stop ep cmd */
|
||||||
xhci_dbg(xhci, "Finding endpoint context\n");
|
xhci_dbg(xhci, "Finding endpoint context\n");
|
||||||
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
|
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
|
||||||
|
@ -486,8 +489,10 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
|
||||||
state->new_deq_seg = find_trb_seg(state->new_deq_seg,
|
state->new_deq_seg = find_trb_seg(state->new_deq_seg,
|
||||||
state->new_deq_ptr,
|
state->new_deq_ptr,
|
||||||
&state->new_cycle_state);
|
&state->new_cycle_state);
|
||||||
if (!state->new_deq_seg)
|
if (!state->new_deq_seg) {
|
||||||
BUG();
|
WARN_ON(1);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
trb = &state->new_deq_ptr->generic;
|
trb = &state->new_deq_ptr->generic;
|
||||||
if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
|
if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
|
||||||
|
@ -2363,12 +2368,13 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
|
||||||
|
|
||||||
/* Scatter gather list entries may cross 64KB boundaries */
|
/* Scatter gather list entries may cross 64KB boundaries */
|
||||||
running_total = TRB_MAX_BUFF_SIZE -
|
running_total = TRB_MAX_BUFF_SIZE -
|
||||||
(sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
|
(sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
|
||||||
|
running_total &= TRB_MAX_BUFF_SIZE - 1;
|
||||||
if (running_total != 0)
|
if (running_total != 0)
|
||||||
num_trbs++;
|
num_trbs++;
|
||||||
|
|
||||||
/* How many more 64KB chunks to transfer, how many more TRBs? */
|
/* How many more 64KB chunks to transfer, how many more TRBs? */
|
||||||
while (running_total < sg_dma_len(sg)) {
|
while (running_total < sg_dma_len(sg) && running_total < temp) {
|
||||||
num_trbs++;
|
num_trbs++;
|
||||||
running_total += TRB_MAX_BUFF_SIZE;
|
running_total += TRB_MAX_BUFF_SIZE;
|
||||||
}
|
}
|
||||||
|
@ -2394,11 +2400,11 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
|
||||||
static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
|
static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
|
||||||
{
|
{
|
||||||
if (num_trbs != 0)
|
if (num_trbs != 0)
|
||||||
dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
|
dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
|
||||||
"TRBs, %d left\n", __func__,
|
"TRBs, %d left\n", __func__,
|
||||||
urb->ep->desc.bEndpointAddress, num_trbs);
|
urb->ep->desc.bEndpointAddress, num_trbs);
|
||||||
if (running_total != urb->transfer_buffer_length)
|
if (running_total != urb->transfer_buffer_length)
|
||||||
dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
|
dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
|
||||||
"queued %#x (%d), asked for %#x (%d)\n",
|
"queued %#x (%d), asked for %#x (%d)\n",
|
||||||
__func__,
|
__func__,
|
||||||
urb->ep->desc.bEndpointAddress,
|
urb->ep->desc.bEndpointAddress,
|
||||||
|
@ -2533,8 +2539,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
||||||
sg = urb->sg;
|
sg = urb->sg;
|
||||||
addr = (u64) sg_dma_address(sg);
|
addr = (u64) sg_dma_address(sg);
|
||||||
this_sg_len = sg_dma_len(sg);
|
this_sg_len = sg_dma_len(sg);
|
||||||
trb_buff_len = TRB_MAX_BUFF_SIZE -
|
trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
|
||||||
(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
|
|
||||||
trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
|
trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
|
||||||
if (trb_buff_len > urb->transfer_buffer_length)
|
if (trb_buff_len > urb->transfer_buffer_length)
|
||||||
trb_buff_len = urb->transfer_buffer_length;
|
trb_buff_len = urb->transfer_buffer_length;
|
||||||
|
@ -2572,7 +2577,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
||||||
(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
|
(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
|
||||||
(unsigned int) addr + trb_buff_len);
|
(unsigned int) addr + trb_buff_len);
|
||||||
if (TRB_MAX_BUFF_SIZE -
|
if (TRB_MAX_BUFF_SIZE -
|
||||||
(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) {
|
(addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
|
||||||
xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
|
xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
|
||||||
xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
|
xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
|
||||||
(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
|
(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
|
||||||
|
@ -2616,7 +2621,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
||||||
}
|
}
|
||||||
|
|
||||||
trb_buff_len = TRB_MAX_BUFF_SIZE -
|
trb_buff_len = TRB_MAX_BUFF_SIZE -
|
||||||
(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
|
(addr & (TRB_MAX_BUFF_SIZE - 1));
|
||||||
trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
|
trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
|
||||||
if (running_total + trb_buff_len > urb->transfer_buffer_length)
|
if (running_total + trb_buff_len > urb->transfer_buffer_length)
|
||||||
trb_buff_len =
|
trb_buff_len =
|
||||||
|
@ -2656,7 +2661,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
||||||
num_trbs = 0;
|
num_trbs = 0;
|
||||||
/* How much data is (potentially) left before the 64KB boundary? */
|
/* How much data is (potentially) left before the 64KB boundary? */
|
||||||
running_total = TRB_MAX_BUFF_SIZE -
|
running_total = TRB_MAX_BUFF_SIZE -
|
||||||
(urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
|
(urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
|
||||||
|
running_total &= TRB_MAX_BUFF_SIZE - 1;
|
||||||
|
|
||||||
/* If there's some data on this 64KB chunk, or we have to send a
|
/* If there's some data on this 64KB chunk, or we have to send a
|
||||||
* zero-length transfer, we need at least one TRB
|
* zero-length transfer, we need at least one TRB
|
||||||
|
@ -2700,8 +2706,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
||||||
/* How much data is in the first TRB? */
|
/* How much data is in the first TRB? */
|
||||||
addr = (u64) urb->transfer_dma;
|
addr = (u64) urb->transfer_dma;
|
||||||
trb_buff_len = TRB_MAX_BUFF_SIZE -
|
trb_buff_len = TRB_MAX_BUFF_SIZE -
|
||||||
(urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
|
(urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
|
||||||
if (urb->transfer_buffer_length < trb_buff_len)
|
if (trb_buff_len > urb->transfer_buffer_length)
|
||||||
trb_buff_len = urb->transfer_buffer_length;
|
trb_buff_len = urb->transfer_buffer_length;
|
||||||
|
|
||||||
first_trb = true;
|
first_trb = true;
|
||||||
|
@ -2879,8 +2885,8 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
|
||||||
addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
|
addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
|
||||||
td_len = urb->iso_frame_desc[i].length;
|
td_len = urb->iso_frame_desc[i].length;
|
||||||
|
|
||||||
running_total = TRB_MAX_BUFF_SIZE -
|
running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
|
||||||
(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
|
running_total &= TRB_MAX_BUFF_SIZE - 1;
|
||||||
if (running_total != 0)
|
if (running_total != 0)
|
||||||
num_trbs++;
|
num_trbs++;
|
||||||
|
|
||||||
|
|
|
@ -109,7 +109,7 @@ int xhci_halt(struct xhci_hcd *xhci)
|
||||||
/*
|
/*
|
||||||
* Set the run bit and wait for the host to be running.
|
* Set the run bit and wait for the host to be running.
|
||||||
*/
|
*/
|
||||||
int xhci_start(struct xhci_hcd *xhci)
|
static int xhci_start(struct xhci_hcd *xhci)
|
||||||
{
|
{
|
||||||
u32 temp;
|
u32 temp;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -329,7 +329,7 @@ int xhci_init(struct usb_hcd *hcd)
|
||||||
|
|
||||||
|
|
||||||
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
|
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
|
||||||
void xhci_event_ring_work(unsigned long arg)
|
static void xhci_event_ring_work(unsigned long arg)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int temp;
|
int temp;
|
||||||
|
@ -473,7 +473,7 @@ int xhci_run(struct usb_hcd *hcd)
|
||||||
xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
|
xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
|
||||||
xhci_writel(xhci, ER_IRQ_ENABLE(temp),
|
xhci_writel(xhci, ER_IRQ_ENABLE(temp),
|
||||||
&xhci->ir_set->irq_pending);
|
&xhci->ir_set->irq_pending);
|
||||||
xhci_print_ir_set(xhci, xhci->ir_set, 0);
|
xhci_print_ir_set(xhci, 0);
|
||||||
|
|
||||||
if (NUM_TEST_NOOPS > 0)
|
if (NUM_TEST_NOOPS > 0)
|
||||||
doorbell = xhci_setup_one_noop(xhci);
|
doorbell = xhci_setup_one_noop(xhci);
|
||||||
|
@ -528,7 +528,7 @@ void xhci_stop(struct usb_hcd *hcd)
|
||||||
temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
|
temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
|
||||||
xhci_writel(xhci, ER_IRQ_DISABLE(temp),
|
xhci_writel(xhci, ER_IRQ_DISABLE(temp),
|
||||||
&xhci->ir_set->irq_pending);
|
&xhci->ir_set->irq_pending);
|
||||||
xhci_print_ir_set(xhci, xhci->ir_set, 0);
|
xhci_print_ir_set(xhci, 0);
|
||||||
|
|
||||||
xhci_dbg(xhci, "cleaning up memory\n");
|
xhci_dbg(xhci, "cleaning up memory\n");
|
||||||
xhci_mem_cleanup(xhci);
|
xhci_mem_cleanup(xhci);
|
||||||
|
@ -755,7 +755,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
|
||||||
temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
|
temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
|
||||||
xhci_writel(xhci, ER_IRQ_DISABLE(temp),
|
xhci_writel(xhci, ER_IRQ_DISABLE(temp),
|
||||||
&xhci->ir_set->irq_pending);
|
&xhci->ir_set->irq_pending);
|
||||||
xhci_print_ir_set(xhci, xhci->ir_set, 0);
|
xhci_print_ir_set(xhci, 0);
|
||||||
|
|
||||||
xhci_dbg(xhci, "cleaning up memory\n");
|
xhci_dbg(xhci, "cleaning up memory\n");
|
||||||
xhci_mem_cleanup(xhci);
|
xhci_mem_cleanup(xhci);
|
||||||
|
@ -857,7 +857,7 @@ unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
|
||||||
/* Returns 1 if the arguments are OK;
|
/* Returns 1 if the arguments are OK;
|
||||||
* returns 0 this is a root hub; returns -EINVAL for NULL pointers.
|
* returns 0 this is a root hub; returns -EINVAL for NULL pointers.
|
||||||
*/
|
*/
|
||||||
int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
|
static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
|
||||||
struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
|
struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
|
||||||
const char *func) {
|
const char *func) {
|
||||||
struct xhci_hcd *xhci;
|
struct xhci_hcd *xhci;
|
||||||
|
@ -1693,7 +1693,7 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
|
||||||
xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
|
xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
|
||||||
}
|
}
|
||||||
|
|
||||||
void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
|
static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
|
||||||
unsigned int slot_id, unsigned int ep_index,
|
unsigned int slot_id, unsigned int ep_index,
|
||||||
struct xhci_dequeue_state *deq_state)
|
struct xhci_dequeue_state *deq_state)
|
||||||
{
|
{
|
||||||
|
|
|
@ -1348,7 +1348,7 @@ static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* xHCI debugging */
|
/* xHCI debugging */
|
||||||
void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num);
|
void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num);
|
||||||
void xhci_print_registers(struct xhci_hcd *xhci);
|
void xhci_print_registers(struct xhci_hcd *xhci);
|
||||||
void xhci_dbg_regs(struct xhci_hcd *xhci);
|
void xhci_dbg_regs(struct xhci_hcd *xhci);
|
||||||
void xhci_print_run_regs(struct xhci_hcd *xhci);
|
void xhci_print_run_regs(struct xhci_hcd *xhci);
|
||||||
|
|
|
@ -1864,6 +1864,7 @@ allocate_instance(struct device *dev,
|
||||||
INIT_LIST_HEAD(&musb->out_bulk);
|
INIT_LIST_HEAD(&musb->out_bulk);
|
||||||
|
|
||||||
hcd->uses_new_polling = 1;
|
hcd->uses_new_polling = 1;
|
||||||
|
hcd->has_tt = 1;
|
||||||
|
|
||||||
musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
|
musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
|
||||||
musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
|
musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
|
||||||
|
|
Loading…
Reference in New Issue