Merge branch 'for-greg' of git://gitorious.org/usb/usb into usb-next

* 'for-greg' of git://gitorious.org/usb/usb:
  usb: ehci-omap: Show fatal probing time errors to end user
  usb: musb: introduce api for dma code to check compatibility with usb request
  usb: musb: maintain three states for buffer mappings instead of two
  usb: musb: disable double buffering when it's broken
  usb: musb: hsdma: change back to use musb_read/writew
  usb: musb: core: fix IRQ check
  usb: musb: fix kernel panic during s2ram(v2)
This commit is contained in:
Greg Kroah-Hartman 2011-02-03 16:52:34 -08:00
commit 4e8ed7e499
9 changed files with 96 additions and 45 deletions

View File

@ -796,7 +796,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
hcd = usb_create_hcd(&ehci_omap_hc_driver, &pdev->dev,
dev_name(&pdev->dev));
if (!hcd) {
dev_dbg(&pdev->dev, "failed to create hcd with err %d\n", ret);
dev_err(&pdev->dev, "failed to create hcd with err %d\n", ret);
ret = -ENOMEM;
goto err_create_hcd;
}
@ -864,7 +864,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
ret = omap_start_ehc(omap, hcd);
if (ret) {
dev_dbg(&pdev->dev, "failed to start ehci\n");
dev_err(&pdev->dev, "failed to start ehci with err %d\n", ret);
goto err_start;
}
@ -879,7 +879,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
if (ret) {
dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
dev_err(&pdev->dev, "failed to add hcd with err %d\n", ret);
goto err_add_hcd;
}

View File

@ -404,6 +404,7 @@ static int bfin_musb_init(struct musb *musb)
musb->xceiv->set_power = bfin_musb_set_power;
musb->isr = blackfin_interrupt;
musb->double_buffer_not_ok = true;
return 0;
}

View File

@ -128,12 +128,7 @@ MODULE_ALIAS("platform:" MUSB_DRIVER_NAME);
static inline struct musb *dev_to_musb(struct device *dev)
{
#ifdef CONFIG_USB_MUSB_HDRC_HCD
/* usbcore insists dev->driver_data is a "struct hcd *" */
return hcd_to_musb(dev_get_drvdata(dev));
#else
return dev_get_drvdata(dev);
#endif
}
/*-------------------------------------------------------------------------*/
@ -1876,10 +1871,9 @@ allocate_instance(struct device *dev,
musb = kzalloc(sizeof *musb, GFP_KERNEL);
if (!musb)
return NULL;
dev_set_drvdata(dev, musb);
#endif
dev_set_drvdata(dev, musb);
musb->mregs = mbase;
musb->ctrl_base = mbase;
musb->nIrq = -ENODEV;
@ -2191,7 +2185,7 @@ static int __init musb_probe(struct platform_device *pdev)
void __iomem *base;
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iomem || irq == 0)
if (!iomem || irq <= 0)
return -ENODEV;
base = ioremap(iomem->start, resource_size(iomem));

View File

@ -488,6 +488,18 @@ struct musb {
unsigned set_address:1;
unsigned test_mode:1;
unsigned softconnect:1;
/*
* FIXME: Remove this flag.
*
* This is only added to allow Blackfin to work
* with current driver. For some unknown reason
* Blackfin doesn't work with double buffering
* and that's enabled by default.
*
* We added this flag to forcefully disable double
* buffering until we get it working.
*/
unsigned double_buffer_not_ok:1 __deprecated;
u8 address;
u8 test_mode_nr;

View File

@ -169,6 +169,9 @@ struct dma_controller {
dma_addr_t dma_addr,
u32 length);
int (*channel_abort)(struct dma_channel *);
int (*is_compatible)(struct dma_channel *channel,
u16 maxpacket,
void *buf, u32 length);
};
/* called after channel_program(), may indicate a fault */

View File

@ -92,11 +92,33 @@
/* ----------------------------------------------------------------------- */
#define is_buffer_mapped(req) (is_dma_capable() && \
(req->map_state != UN_MAPPED))
/* Maps the buffer to dma */
static inline void map_dma_buffer(struct musb_request *request,
struct musb *musb)
struct musb *musb, struct musb_ep *musb_ep)
{
int compatible = true;
struct dma_controller *dma = musb->dma_controller;
request->map_state = UN_MAPPED;
if (!is_dma_capable() || !musb_ep->dma)
return;
/* Check if DMA engine can handle this request.
* DMA code must reject the USB request explicitly.
* Default behaviour is to map the request.
*/
if (dma->is_compatible)
compatible = dma->is_compatible(musb_ep->dma,
musb_ep->packet_sz, request->request.buf,
request->request.length);
if (!compatible)
return;
if (request->request.dma == DMA_ADDR_INVALID) {
request->request.dma = dma_map_single(
musb->controller,
@ -105,7 +127,7 @@ static inline void map_dma_buffer(struct musb_request *request,
request->tx
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
request->mapped = 1;
request->map_state = MUSB_MAPPED;
} else {
dma_sync_single_for_device(musb->controller,
request->request.dma,
@ -113,7 +135,7 @@ static inline void map_dma_buffer(struct musb_request *request,
request->tx
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
request->mapped = 0;
request->map_state = PRE_MAPPED;
}
}
@ -121,11 +143,14 @@ static inline void map_dma_buffer(struct musb_request *request,
static inline void unmap_dma_buffer(struct musb_request *request,
struct musb *musb)
{
if (!is_buffer_mapped(request))
return;
if (request->request.dma == DMA_ADDR_INVALID) {
DBG(20, "not unmapping a never mapped buffer\n");
return;
}
if (request->mapped) {
if (request->map_state == MUSB_MAPPED) {
dma_unmap_single(musb->controller,
request->request.dma,
request->request.length,
@ -133,16 +158,15 @@ static inline void unmap_dma_buffer(struct musb_request *request,
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
request->request.dma = DMA_ADDR_INVALID;
request->mapped = 0;
} else {
} else { /* PRE_MAPPED */
dma_sync_single_for_cpu(musb->controller,
request->request.dma,
request->request.length,
request->tx
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
}
request->map_state = UN_MAPPED;
}
/*
@ -172,8 +196,7 @@ __acquires(ep->musb->lock)
ep->busy = 1;
spin_unlock(&musb->lock);
if (is_dma_capable() && ep->dma)
unmap_dma_buffer(req, musb);
unmap_dma_buffer(req, musb);
if (request->status == 0)
DBG(5, "%s done request %p, %d/%d\n",
ep->end_point.name, request,
@ -335,7 +358,7 @@ static void txstate(struct musb *musb, struct musb_request *req)
csr);
#ifndef CONFIG_MUSB_PIO_ONLY
if (is_dma_capable() && musb_ep->dma) {
if (is_buffer_mapped(req)) {
struct dma_controller *c = musb->dma_controller;
size_t request_size;
@ -436,8 +459,7 @@ static void txstate(struct musb *musb, struct musb_request *req)
* Unmap the dma buffer back to cpu if dma channel
* programming fails
*/
if (is_dma_capable() && musb_ep->dma)
unmap_dma_buffer(req, musb);
unmap_dma_buffer(req, musb);
musb_write_fifo(musb_ep->hw_ep, fifo_count,
(u8 *) (request->buf + request->actual));
@ -627,7 +649,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
return;
}
if (is_cppi_enabled() && musb_ep->dma) {
if (is_cppi_enabled() && is_buffer_mapped(req)) {
struct dma_controller *c = musb->dma_controller;
struct dma_channel *channel = musb_ep->dma;
@ -658,7 +680,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
len = musb_readw(epio, MUSB_RXCOUNT);
if (request->actual < request->length) {
#ifdef CONFIG_USB_INVENTRA_DMA
if (is_dma_capable() && musb_ep->dma) {
if (is_buffer_mapped(req)) {
struct dma_controller *c;
struct dma_channel *channel;
int use_dma = 0;
@ -742,7 +764,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
fifo_count = min_t(unsigned, len, fifo_count);
#ifdef CONFIG_USB_TUSB_OMAP_DMA
if (tusb_dma_omap() && musb_ep->dma) {
if (tusb_dma_omap() && is_buffer_mapped(req)) {
struct dma_controller *c = musb->dma_controller;
struct dma_channel *channel = musb_ep->dma;
u32 dma_addr = request->dma + request->actual;
@ -762,7 +784,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
* programming fails. This buffer is mapped if the
* channel allocation is successful
*/
if (is_dma_capable() && musb_ep->dma) {
if (is_buffer_mapped(req)) {
unmap_dma_buffer(req, musb);
/*
@ -989,7 +1011,11 @@ static int musb_gadget_enable(struct usb_ep *ep,
/* Set TXMAXP with the FIFO size of the endpoint
* to disable double buffering mode.
*/
musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
if (musb->double_buffer_not_ok)
musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
else
musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
| (musb_ep->hb_mult << 11));
csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
if (musb_readw(regs, MUSB_TXCSR)
@ -1025,7 +1051,11 @@ static int musb_gadget_enable(struct usb_ep *ep,
/* Set RXMAXP with the FIFO size of the endpoint
* to disable double buffering mode.
*/
musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
if (musb->double_buffer_not_ok)
musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx);
else
musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
| (musb_ep->hb_mult << 11));
/* force shared fifo to OUT-only mode */
if (hw_ep->is_shared_fifo) {
@ -1214,10 +1244,7 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
request->epnum = musb_ep->current_epnum;
request->tx = musb_ep->is_in;
if (is_dma_capable() && musb_ep->dma)
map_dma_buffer(request, musb);
else
request->mapped = 0;
map_dma_buffer(request, musb, musb_ep);
spin_lock_irqsave(&musb->lock, lockflags);

View File

@ -35,13 +35,19 @@
#ifndef __MUSB_GADGET_H
#define __MUSB_GADGET_H
enum buffer_map_state {
UN_MAPPED = 0,
PRE_MAPPED,
MUSB_MAPPED
};
struct musb_request {
struct usb_request request;
struct musb_ep *ep;
struct musb *musb;
u8 tx; /* endpoint direction */
u8 epnum;
u8 mapped;
enum buffer_map_state map_state;
};
static inline struct musb_request *to_musb_request(struct usb_request *req)

View File

@ -609,7 +609,7 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
/* Set RXMAXP with the FIFO size of the endpoint
* to disable double buffer mode.
*/
if (musb->hwvers < MUSB_HWVERS_2000)
if (musb->double_buffer_not_ok)
musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx);
else
musb_writew(ep->regs, MUSB_RXMAXP,
@ -784,14 +784,13 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
/* protocol/endpoint/interval/NAKlimit */
if (epnum) {
musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
if (can_bulk_split(musb, qh->type))
if (musb->double_buffer_not_ok)
musb_writew(epio, MUSB_TXMAXP,
packet_sz
| ((hw_ep->max_packet_sz_tx /
packet_sz) - 1) << 11);
hw_ep->max_packet_sz_tx);
else
musb_writew(epio, MUSB_TXMAXP,
packet_sz);
qh->maxpacket |
((qh->hb_mult - 1) << 11));
musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
} else {
musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);

View File

@ -94,24 +94,33 @@ static inline void musb_write_hsdma_addr(void __iomem *mbase,
{
musb_writew(mbase,
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_LOW),
((u16)((u32) dma_addr & 0xFFFF)));
dma_addr);
musb_writew(mbase,
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_HIGH),
((u16)(((u32) dma_addr >> 16) & 0xFFFF)));
(dma_addr >> 16));
}
static inline u32 musb_read_hsdma_count(void __iomem *mbase, u8 bchannel)
{
return musb_readl(mbase,
u32 count = musb_readw(mbase,
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH));
count = count << 16;
count |= musb_readw(mbase,
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW));
return count;
}
static inline void musb_write_hsdma_count(void __iomem *mbase,
u8 bchannel, u32 len)
{
musb_writel(mbase,
musb_writew(mbase,
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW),len);
musb_writew(mbase,
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH),
len);
(len >> 16));
}
#endif /* CONFIG_BLACKFIN */