Automatic merge of rsync://rsync.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6.git/
This commit is contained in:
commit
66e60f9251
|
@ -8,6 +8,7 @@
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
|
#include <linux/delay.h>
|
||||||
|
|
||||||
#include <asm/pbm.h>
|
#include <asm/pbm.h>
|
||||||
|
|
||||||
|
@ -379,6 +380,54 @@ bad:
|
||||||
return PCI_DMA_ERROR_CODE;
|
return PCI_DMA_ERROR_CODE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void pci_strbuf_flush(struct pci_strbuf *strbuf, struct pci_iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages)
|
||||||
|
{
|
||||||
|
int limit;
|
||||||
|
|
||||||
|
PCI_STC_FLUSHFLAG_INIT(strbuf);
|
||||||
|
if (strbuf->strbuf_ctxflush &&
|
||||||
|
iommu->iommu_ctxflush) {
|
||||||
|
unsigned long matchreg, flushreg;
|
||||||
|
|
||||||
|
flushreg = strbuf->strbuf_ctxflush;
|
||||||
|
matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
|
||||||
|
|
||||||
|
limit = 10000;
|
||||||
|
do {
|
||||||
|
pci_iommu_write(flushreg, ctx);
|
||||||
|
udelay(10);
|
||||||
|
limit--;
|
||||||
|
if (!limit)
|
||||||
|
break;
|
||||||
|
} while(((long)pci_iommu_read(matchreg)) < 0L);
|
||||||
|
if (!limit)
|
||||||
|
printk(KERN_WARNING "pci_strbuf_flush: ctx flush "
|
||||||
|
"timeout vaddr[%08x] ctx[%lx]\n",
|
||||||
|
vaddr, ctx);
|
||||||
|
} else {
|
||||||
|
unsigned long i;
|
||||||
|
|
||||||
|
for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
|
||||||
|
pci_iommu_write(strbuf->strbuf_pflush, vaddr);
|
||||||
|
}
|
||||||
|
|
||||||
|
pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
|
||||||
|
(void) pci_iommu_read(iommu->write_complete_reg);
|
||||||
|
|
||||||
|
limit = 10000;
|
||||||
|
while (!PCI_STC_FLUSHFLAG_SET(strbuf)) {
|
||||||
|
limit--;
|
||||||
|
if (!limit)
|
||||||
|
break;
|
||||||
|
udelay(10);
|
||||||
|
membar("#LoadLoad");
|
||||||
|
}
|
||||||
|
if (!limit)
|
||||||
|
printk(KERN_WARNING "pci_strbuf_flush: flushflag timeout "
|
||||||
|
"vaddr[%08x] ctx[%lx] npages[%ld]\n",
|
||||||
|
vaddr, ctx, npages);
|
||||||
|
}
|
||||||
|
|
||||||
/* Unmap a single streaming mode DMA translation. */
|
/* Unmap a single streaming mode DMA translation. */
|
||||||
void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
|
void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
|
||||||
{
|
{
|
||||||
|
@ -386,7 +435,7 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int
|
||||||
struct pci_iommu *iommu;
|
struct pci_iommu *iommu;
|
||||||
struct pci_strbuf *strbuf;
|
struct pci_strbuf *strbuf;
|
||||||
iopte_t *base;
|
iopte_t *base;
|
||||||
unsigned long flags, npages, i, ctx;
|
unsigned long flags, npages, ctx;
|
||||||
|
|
||||||
if (direction == PCI_DMA_NONE)
|
if (direction == PCI_DMA_NONE)
|
||||||
BUG();
|
BUG();
|
||||||
|
@ -414,29 +463,8 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int
|
||||||
ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
|
ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
|
||||||
|
|
||||||
/* Step 1: Kick data out of streaming buffers if necessary. */
|
/* Step 1: Kick data out of streaming buffers if necessary. */
|
||||||
if (strbuf->strbuf_enabled) {
|
if (strbuf->strbuf_enabled)
|
||||||
u32 vaddr = bus_addr;
|
pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages);
|
||||||
|
|
||||||
PCI_STC_FLUSHFLAG_INIT(strbuf);
|
|
||||||
if (strbuf->strbuf_ctxflush &&
|
|
||||||
iommu->iommu_ctxflush) {
|
|
||||||
unsigned long matchreg, flushreg;
|
|
||||||
|
|
||||||
flushreg = strbuf->strbuf_ctxflush;
|
|
||||||
matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
|
|
||||||
do {
|
|
||||||
pci_iommu_write(flushreg, ctx);
|
|
||||||
} while(((long)pci_iommu_read(matchreg)) < 0L);
|
|
||||||
} else {
|
|
||||||
for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
|
|
||||||
pci_iommu_write(strbuf->strbuf_pflush, vaddr);
|
|
||||||
}
|
|
||||||
|
|
||||||
pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
|
|
||||||
(void) pci_iommu_read(iommu->write_complete_reg);
|
|
||||||
while (!PCI_STC_FLUSHFLAG_SET(strbuf))
|
|
||||||
membar("#LoadLoad");
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Step 2: Clear out first TSB entry. */
|
/* Step 2: Clear out first TSB entry. */
|
||||||
iopte_make_dummy(iommu, base);
|
iopte_make_dummy(iommu, base);
|
||||||
|
@ -647,29 +675,8 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
|
||||||
ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
|
ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
|
||||||
|
|
||||||
/* Step 1: Kick data out of streaming buffers if necessary. */
|
/* Step 1: Kick data out of streaming buffers if necessary. */
|
||||||
if (strbuf->strbuf_enabled) {
|
if (strbuf->strbuf_enabled)
|
||||||
u32 vaddr = (u32) bus_addr;
|
pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages);
|
||||||
|
|
||||||
PCI_STC_FLUSHFLAG_INIT(strbuf);
|
|
||||||
if (strbuf->strbuf_ctxflush &&
|
|
||||||
iommu->iommu_ctxflush) {
|
|
||||||
unsigned long matchreg, flushreg;
|
|
||||||
|
|
||||||
flushreg = strbuf->strbuf_ctxflush;
|
|
||||||
matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
|
|
||||||
do {
|
|
||||||
pci_iommu_write(flushreg, ctx);
|
|
||||||
} while(((long)pci_iommu_read(matchreg)) < 0L);
|
|
||||||
} else {
|
|
||||||
for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
|
|
||||||
pci_iommu_write(strbuf->strbuf_pflush, vaddr);
|
|
||||||
}
|
|
||||||
|
|
||||||
pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
|
|
||||||
(void) pci_iommu_read(iommu->write_complete_reg);
|
|
||||||
while (!PCI_STC_FLUSHFLAG_SET(strbuf))
|
|
||||||
membar("#LoadLoad");
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Step 2: Clear out first TSB entry. */
|
/* Step 2: Clear out first TSB entry. */
|
||||||
iopte_make_dummy(iommu, base);
|
iopte_make_dummy(iommu, base);
|
||||||
|
@ -715,28 +722,7 @@ void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Step 2: Kick data out of streaming buffers. */
|
/* Step 2: Kick data out of streaming buffers. */
|
||||||
PCI_STC_FLUSHFLAG_INIT(strbuf);
|
pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages);
|
||||||
if (iommu->iommu_ctxflush &&
|
|
||||||
strbuf->strbuf_ctxflush) {
|
|
||||||
unsigned long matchreg, flushreg;
|
|
||||||
|
|
||||||
flushreg = strbuf->strbuf_ctxflush;
|
|
||||||
matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
|
|
||||||
do {
|
|
||||||
pci_iommu_write(flushreg, ctx);
|
|
||||||
} while(((long)pci_iommu_read(matchreg)) < 0L);
|
|
||||||
} else {
|
|
||||||
unsigned long i;
|
|
||||||
|
|
||||||
for (i = 0; i < npages; i++, bus_addr += IO_PAGE_SIZE)
|
|
||||||
pci_iommu_write(strbuf->strbuf_pflush, bus_addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Step 3: Perform flush synchronization sequence. */
|
|
||||||
pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
|
|
||||||
(void) pci_iommu_read(iommu->write_complete_reg);
|
|
||||||
while (!PCI_STC_FLUSHFLAG_SET(strbuf))
|
|
||||||
membar("#LoadLoad");
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||||
}
|
}
|
||||||
|
@ -749,7 +735,8 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, i
|
||||||
struct pcidev_cookie *pcp;
|
struct pcidev_cookie *pcp;
|
||||||
struct pci_iommu *iommu;
|
struct pci_iommu *iommu;
|
||||||
struct pci_strbuf *strbuf;
|
struct pci_strbuf *strbuf;
|
||||||
unsigned long flags, ctx;
|
unsigned long flags, ctx, npages, i;
|
||||||
|
u32 bus_addr;
|
||||||
|
|
||||||
pcp = pdev->sysdata;
|
pcp = pdev->sysdata;
|
||||||
iommu = pcp->pbm->iommu;
|
iommu = pcp->pbm->iommu;
|
||||||
|
@ -772,36 +759,14 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, i
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Step 2: Kick data out of streaming buffers. */
|
/* Step 2: Kick data out of streaming buffers. */
|
||||||
PCI_STC_FLUSHFLAG_INIT(strbuf);
|
bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
|
||||||
if (iommu->iommu_ctxflush &&
|
for(i = 1; i < nelems; i++)
|
||||||
strbuf->strbuf_ctxflush) {
|
if (!sglist[i].dma_length)
|
||||||
unsigned long matchreg, flushreg;
|
break;
|
||||||
|
i--;
|
||||||
flushreg = strbuf->strbuf_ctxflush;
|
npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length)
|
||||||
matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
|
- bus_addr) >> IO_PAGE_SHIFT;
|
||||||
do {
|
pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages);
|
||||||
pci_iommu_write(flushreg, ctx);
|
|
||||||
} while (((long)pci_iommu_read(matchreg)) < 0L);
|
|
||||||
} else {
|
|
||||||
unsigned long i, npages;
|
|
||||||
u32 bus_addr;
|
|
||||||
|
|
||||||
bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
|
|
||||||
|
|
||||||
for(i = 1; i < nelems; i++)
|
|
||||||
if (!sglist[i].dma_length)
|
|
||||||
break;
|
|
||||||
i--;
|
|
||||||
npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - bus_addr) >> IO_PAGE_SHIFT;
|
|
||||||
for (i = 0; i < npages; i++, bus_addr += IO_PAGE_SIZE)
|
|
||||||
pci_iommu_write(strbuf->strbuf_pflush, bus_addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Step 3: Perform flush synchronization sequence. */
|
|
||||||
pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
|
|
||||||
(void) pci_iommu_read(iommu->write_complete_reg);
|
|
||||||
while (!PCI_STC_FLUSHFLAG_SET(strbuf))
|
|
||||||
membar("#LoadLoad");
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
|
@ -117,19 +117,34 @@ static void iommu_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages
|
||||||
|
|
||||||
#define STRBUF_TAG_VALID 0x02UL
|
#define STRBUF_TAG_VALID 0x02UL
|
||||||
|
|
||||||
static void strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages)
|
static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages)
|
||||||
{
|
{
|
||||||
|
unsigned long n;
|
||||||
|
int limit;
|
||||||
|
|
||||||
iommu->strbuf_flushflag = 0UL;
|
iommu->strbuf_flushflag = 0UL;
|
||||||
while (npages--)
|
n = npages;
|
||||||
upa_writeq(base + (npages << IO_PAGE_SHIFT),
|
while (n--)
|
||||||
|
upa_writeq(base + (n << IO_PAGE_SHIFT),
|
||||||
iommu->strbuf_regs + STRBUF_PFLUSH);
|
iommu->strbuf_regs + STRBUF_PFLUSH);
|
||||||
|
|
||||||
/* Whoopee cushion! */
|
/* Whoopee cushion! */
|
||||||
upa_writeq(__pa(&iommu->strbuf_flushflag),
|
upa_writeq(__pa(&iommu->strbuf_flushflag),
|
||||||
iommu->strbuf_regs + STRBUF_FSYNC);
|
iommu->strbuf_regs + STRBUF_FSYNC);
|
||||||
upa_readq(iommu->sbus_control_reg);
|
upa_readq(iommu->sbus_control_reg);
|
||||||
while (iommu->strbuf_flushflag == 0UL)
|
|
||||||
|
limit = 10000;
|
||||||
|
while (iommu->strbuf_flushflag == 0UL) {
|
||||||
|
limit--;
|
||||||
|
if (!limit)
|
||||||
|
break;
|
||||||
|
udelay(10);
|
||||||
membar("#LoadLoad");
|
membar("#LoadLoad");
|
||||||
|
}
|
||||||
|
if (!limit)
|
||||||
|
printk(KERN_WARNING "sbus_strbuf_flush: flushflag timeout "
|
||||||
|
"vaddr[%08x] npages[%ld]\n",
|
||||||
|
base, npages);
|
||||||
}
|
}
|
||||||
|
|
||||||
static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long npages)
|
static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long npages)
|
||||||
|
@ -406,7 +421,7 @@ void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t dma_addr, size_t size,
|
||||||
|
|
||||||
spin_lock_irqsave(&iommu->lock, flags);
|
spin_lock_irqsave(&iommu->lock, flags);
|
||||||
free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT);
|
free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT);
|
||||||
strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT);
|
sbus_strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT);
|
||||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -569,7 +584,7 @@ void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int
|
||||||
iommu = sdev->bus->iommu;
|
iommu = sdev->bus->iommu;
|
||||||
spin_lock_irqsave(&iommu->lock, flags);
|
spin_lock_irqsave(&iommu->lock, flags);
|
||||||
free_streaming_cluster(iommu, dvma_base, size >> IO_PAGE_SHIFT);
|
free_streaming_cluster(iommu, dvma_base, size >> IO_PAGE_SHIFT);
|
||||||
strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT);
|
sbus_strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT);
|
||||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -581,7 +596,7 @@ void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t base, size_t
|
||||||
size = (IO_PAGE_ALIGN(base + size) - (base & IO_PAGE_MASK));
|
size = (IO_PAGE_ALIGN(base + size) - (base & IO_PAGE_MASK));
|
||||||
|
|
||||||
spin_lock_irqsave(&iommu->lock, flags);
|
spin_lock_irqsave(&iommu->lock, flags);
|
||||||
strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT);
|
sbus_strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT);
|
||||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -605,7 +620,7 @@ void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sg, int
|
||||||
size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - base;
|
size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - base;
|
||||||
|
|
||||||
spin_lock_irqsave(&iommu->lock, flags);
|
spin_lock_irqsave(&iommu->lock, flags);
|
||||||
strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT);
|
sbus_strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT);
|
||||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -81,10 +81,6 @@ unsigned char irqs[4] = {
|
||||||
int irqhit=0;
|
int irqhit=0;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef MIN
|
|
||||||
#define MIN(a,b) ((a) < (b) ? (a) : (b))
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static struct tty_driver *aurora_driver;
|
static struct tty_driver *aurora_driver;
|
||||||
static struct Aurora_board aurora_board[AURORA_NBOARD] = {
|
static struct Aurora_board aurora_board[AURORA_NBOARD] = {
|
||||||
{0,},
|
{0,},
|
||||||
|
@ -594,7 +590,7 @@ static void aurora_transmit(struct Aurora_board const * bp, int chip)
|
||||||
&bp->r[chip]->r[CD180_TDR]);
|
&bp->r[chip]->r[CD180_TDR]);
|
||||||
port->COR2 &= ~COR2_ETC;
|
port->COR2 &= ~COR2_ETC;
|
||||||
}
|
}
|
||||||
count = MIN(port->break_length, 0xff);
|
count = min(port->break_length, 0xff);
|
||||||
sbus_writeb(CD180_C_ESC,
|
sbus_writeb(CD180_C_ESC,
|
||||||
&bp->r[chip]->r[CD180_TDR]);
|
&bp->r[chip]->r[CD180_TDR]);
|
||||||
sbus_writeb(CD180_C_DELAY,
|
sbus_writeb(CD180_C_DELAY,
|
||||||
|
@ -1575,7 +1571,7 @@ static int aurora_write(struct tty_struct * tty,
|
||||||
save_flags(flags);
|
save_flags(flags);
|
||||||
while (1) {
|
while (1) {
|
||||||
cli();
|
cli();
|
||||||
c = MIN(count, MIN(SERIAL_XMIT_SIZE - port->xmit_cnt - 1,
|
c = min(count, min(SERIAL_XMIT_SIZE - port->xmit_cnt - 1,
|
||||||
SERIAL_XMIT_SIZE - port->xmit_head));
|
SERIAL_XMIT_SIZE - port->xmit_head));
|
||||||
if (c <= 0) {
|
if (c <= 0) {
|
||||||
restore_flags(flags);
|
restore_flags(flags);
|
||||||
|
|
|
@ -61,6 +61,16 @@ struct uart_sunsab_port {
|
||||||
unsigned char pvr_dtr_bit; /* Which PVR bit is DTR */
|
unsigned char pvr_dtr_bit; /* Which PVR bit is DTR */
|
||||||
unsigned char pvr_dsr_bit; /* Which PVR bit is DSR */
|
unsigned char pvr_dsr_bit; /* Which PVR bit is DSR */
|
||||||
int type; /* SAB82532 version */
|
int type; /* SAB82532 version */
|
||||||
|
|
||||||
|
/* Setting configuration bits while the transmitter is active
|
||||||
|
* can cause garbage characters to get emitted by the chip.
|
||||||
|
* Therefore, we cache such writes here and do the real register
|
||||||
|
* write the next time the transmitter becomes idle.
|
||||||
|
*/
|
||||||
|
unsigned int cached_ebrg;
|
||||||
|
unsigned char cached_mode;
|
||||||
|
unsigned char cached_pvr;
|
||||||
|
unsigned char cached_dafo;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -236,6 +246,7 @@ receive_chars(struct uart_sunsab_port *up,
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sunsab_stop_tx(struct uart_port *, unsigned int);
|
static void sunsab_stop_tx(struct uart_port *, unsigned int);
|
||||||
|
static void sunsab_tx_idle(struct uart_sunsab_port *);
|
||||||
|
|
||||||
static void transmit_chars(struct uart_sunsab_port *up,
|
static void transmit_chars(struct uart_sunsab_port *up,
|
||||||
union sab82532_irq_status *stat)
|
union sab82532_irq_status *stat)
|
||||||
|
@ -258,6 +269,7 @@ static void transmit_chars(struct uart_sunsab_port *up,
|
||||||
return;
|
return;
|
||||||
|
|
||||||
set_bit(SAB82532_XPR, &up->irqflags);
|
set_bit(SAB82532_XPR, &up->irqflags);
|
||||||
|
sunsab_tx_idle(up);
|
||||||
|
|
||||||
if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
|
if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
|
||||||
up->interrupt_mask1 |= SAB82532_IMR1_XPR;
|
up->interrupt_mask1 |= SAB82532_IMR1_XPR;
|
||||||
|
@ -397,21 +409,21 @@ static void sunsab_set_mctrl(struct uart_port *port, unsigned int mctrl)
|
||||||
struct uart_sunsab_port *up = (struct uart_sunsab_port *) port;
|
struct uart_sunsab_port *up = (struct uart_sunsab_port *) port;
|
||||||
|
|
||||||
if (mctrl & TIOCM_RTS) {
|
if (mctrl & TIOCM_RTS) {
|
||||||
writeb(readb(&up->regs->rw.mode) & ~SAB82532_MODE_FRTS,
|
up->cached_mode &= ~SAB82532_MODE_FRTS;
|
||||||
&up->regs->rw.mode);
|
up->cached_mode |= SAB82532_MODE_RTS;
|
||||||
writeb(readb(&up->regs->rw.mode) | SAB82532_MODE_RTS,
|
|
||||||
&up->regs->rw.mode);
|
|
||||||
} else {
|
} else {
|
||||||
writeb(readb(&up->regs->rw.mode) | SAB82532_MODE_FRTS,
|
up->cached_mode |= (SAB82532_MODE_FRTS |
|
||||||
&up->regs->rw.mode);
|
SAB82532_MODE_RTS);
|
||||||
writeb(readb(&up->regs->rw.mode) | SAB82532_MODE_RTS,
|
|
||||||
&up->regs->rw.mode);
|
|
||||||
}
|
}
|
||||||
if (mctrl & TIOCM_DTR) {
|
if (mctrl & TIOCM_DTR) {
|
||||||
writeb(readb(&up->regs->rw.pvr) & ~(up->pvr_dtr_bit), &up->regs->rw.pvr);
|
up->cached_pvr &= ~(up->pvr_dtr_bit);
|
||||||
} else {
|
} else {
|
||||||
writeb(readb(&up->regs->rw.pvr) | up->pvr_dtr_bit, &up->regs->rw.pvr);
|
up->cached_pvr |= up->pvr_dtr_bit;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
set_bit(SAB82532_REGS_PENDING, &up->irqflags);
|
||||||
|
if (test_bit(SAB82532_XPR, &up->irqflags))
|
||||||
|
sunsab_tx_idle(up);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* port->lock is not held. */
|
/* port->lock is not held. */
|
||||||
|
@ -449,6 +461,25 @@ static void sunsab_stop_tx(struct uart_port *port, unsigned int tty_stop)
|
||||||
writeb(up->interrupt_mask1, &up->regs->w.imr1);
|
writeb(up->interrupt_mask1, &up->regs->w.imr1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* port->lock held by caller. */
|
||||||
|
static void sunsab_tx_idle(struct uart_sunsab_port *up)
|
||||||
|
{
|
||||||
|
if (test_bit(SAB82532_REGS_PENDING, &up->irqflags)) {
|
||||||
|
u8 tmp;
|
||||||
|
|
||||||
|
clear_bit(SAB82532_REGS_PENDING, &up->irqflags);
|
||||||
|
writeb(up->cached_mode, &up->regs->rw.mode);
|
||||||
|
writeb(up->cached_pvr, &up->regs->rw.pvr);
|
||||||
|
writeb(up->cached_dafo, &up->regs->w.dafo);
|
||||||
|
|
||||||
|
writeb(up->cached_ebrg & 0xff, &up->regs->w.bgr);
|
||||||
|
tmp = readb(&up->regs->rw.ccr2);
|
||||||
|
tmp &= ~0xc0;
|
||||||
|
tmp |= (up->cached_ebrg >> 2) & 0xc0;
|
||||||
|
writeb(tmp, &up->regs->rw.ccr2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* port->lock held by caller. */
|
/* port->lock held by caller. */
|
||||||
static void sunsab_start_tx(struct uart_port *port, unsigned int tty_start)
|
static void sunsab_start_tx(struct uart_port *port, unsigned int tty_start)
|
||||||
{
|
{
|
||||||
|
@ -517,12 +548,16 @@ static void sunsab_break_ctl(struct uart_port *port, int break_state)
|
||||||
|
|
||||||
spin_lock_irqsave(&up->port.lock, flags);
|
spin_lock_irqsave(&up->port.lock, flags);
|
||||||
|
|
||||||
val = readb(&up->regs->rw.dafo);
|
val = up->cached_dafo;
|
||||||
if (break_state)
|
if (break_state)
|
||||||
val |= SAB82532_DAFO_XBRK;
|
val |= SAB82532_DAFO_XBRK;
|
||||||
else
|
else
|
||||||
val &= ~SAB82532_DAFO_XBRK;
|
val &= ~SAB82532_DAFO_XBRK;
|
||||||
writeb(val, &up->regs->rw.dafo);
|
up->cached_dafo = val;
|
||||||
|
|
||||||
|
set_bit(SAB82532_REGS_PENDING, &up->irqflags);
|
||||||
|
if (test_bit(SAB82532_XPR, &up->irqflags))
|
||||||
|
sunsab_tx_idle(up);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&up->port.lock, flags);
|
spin_unlock_irqrestore(&up->port.lock, flags);
|
||||||
}
|
}
|
||||||
|
@ -566,8 +601,9 @@ static int sunsab_startup(struct uart_port *port)
|
||||||
SAB82532_CCR2_TOE, &up->regs->w.ccr2);
|
SAB82532_CCR2_TOE, &up->regs->w.ccr2);
|
||||||
writeb(0, &up->regs->w.ccr3);
|
writeb(0, &up->regs->w.ccr3);
|
||||||
writeb(SAB82532_CCR4_MCK4 | SAB82532_CCR4_EBRG, &up->regs->w.ccr4);
|
writeb(SAB82532_CCR4_MCK4 | SAB82532_CCR4_EBRG, &up->regs->w.ccr4);
|
||||||
writeb(SAB82532_MODE_RTS | SAB82532_MODE_FCTS |
|
up->cached_mode = (SAB82532_MODE_RTS | SAB82532_MODE_FCTS |
|
||||||
SAB82532_MODE_RAC, &up->regs->w.mode);
|
SAB82532_MODE_RAC);
|
||||||
|
writeb(up->cached_mode, &up->regs->w.mode);
|
||||||
writeb(SAB82532_RFC_DPS|SAB82532_RFC_RFTH_32, &up->regs->w.rfc);
|
writeb(SAB82532_RFC_DPS|SAB82532_RFC_RFTH_32, &up->regs->w.rfc);
|
||||||
|
|
||||||
tmp = readb(&up->regs->rw.ccr0);
|
tmp = readb(&up->regs->rw.ccr0);
|
||||||
|
@ -598,7 +634,6 @@ static void sunsab_shutdown(struct uart_port *port)
|
||||||
{
|
{
|
||||||
struct uart_sunsab_port *up = (struct uart_sunsab_port *) port;
|
struct uart_sunsab_port *up = (struct uart_sunsab_port *) port;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned char tmp;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&up->port.lock, flags);
|
spin_lock_irqsave(&up->port.lock, flags);
|
||||||
|
|
||||||
|
@ -609,14 +644,13 @@ static void sunsab_shutdown(struct uart_port *port)
|
||||||
writeb(up->interrupt_mask1, &up->regs->w.imr1);
|
writeb(up->interrupt_mask1, &up->regs->w.imr1);
|
||||||
|
|
||||||
/* Disable break condition */
|
/* Disable break condition */
|
||||||
tmp = readb(&up->regs->rw.dafo);
|
up->cached_dafo = readb(&up->regs->rw.dafo);
|
||||||
tmp &= ~SAB82532_DAFO_XBRK;
|
up->cached_dafo &= ~SAB82532_DAFO_XBRK;
|
||||||
writeb(tmp, &up->regs->rw.dafo);
|
writeb(up->cached_dafo, &up->regs->rw.dafo);
|
||||||
|
|
||||||
/* Disable Receiver */
|
/* Disable Receiver */
|
||||||
tmp = readb(&up->regs->rw.mode);
|
up->cached_mode &= ~SAB82532_MODE_RAC;
|
||||||
tmp &= ~SAB82532_MODE_RAC;
|
writeb(up->cached_mode, &up->regs->rw.mode);
|
||||||
writeb(tmp, &up->regs->rw.mode);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* XXX FIXME
|
* XXX FIXME
|
||||||
|
@ -685,7 +719,6 @@ static void sunsab_convert_to_sab(struct uart_sunsab_port *up, unsigned int cfla
|
||||||
unsigned int iflag, unsigned int baud,
|
unsigned int iflag, unsigned int baud,
|
||||||
unsigned int quot)
|
unsigned int quot)
|
||||||
{
|
{
|
||||||
unsigned int ebrg;
|
|
||||||
unsigned char dafo;
|
unsigned char dafo;
|
||||||
int bits, n, m;
|
int bits, n, m;
|
||||||
|
|
||||||
|
@ -714,10 +747,11 @@ static void sunsab_convert_to_sab(struct uart_sunsab_port *up, unsigned int cfla
|
||||||
} else {
|
} else {
|
||||||
dafo |= SAB82532_DAFO_PAR_EVEN;
|
dafo |= SAB82532_DAFO_PAR_EVEN;
|
||||||
}
|
}
|
||||||
|
up->cached_dafo = dafo;
|
||||||
|
|
||||||
calc_ebrg(baud, &n, &m);
|
calc_ebrg(baud, &n, &m);
|
||||||
|
|
||||||
ebrg = n | (m << 6);
|
up->cached_ebrg = n | (m << 6);
|
||||||
|
|
||||||
up->tec_timeout = (10 * 1000000) / baud;
|
up->tec_timeout = (10 * 1000000) / baud;
|
||||||
up->cec_timeout = up->tec_timeout >> 2;
|
up->cec_timeout = up->tec_timeout >> 2;
|
||||||
|
@ -770,16 +804,13 @@ static void sunsab_convert_to_sab(struct uart_sunsab_port *up, unsigned int cfla
|
||||||
uart_update_timeout(&up->port, cflag,
|
uart_update_timeout(&up->port, cflag,
|
||||||
(up->port.uartclk / (16 * quot)));
|
(up->port.uartclk / (16 * quot)));
|
||||||
|
|
||||||
/* Now bang the new settings into the chip. */
|
/* Now schedule a register update when the chip's
|
||||||
sunsab_cec_wait(up);
|
* transmitter is idle.
|
||||||
sunsab_tec_wait(up);
|
*/
|
||||||
writeb(dafo, &up->regs->w.dafo);
|
up->cached_mode |= SAB82532_MODE_RAC;
|
||||||
writeb(ebrg & 0xff, &up->regs->w.bgr);
|
set_bit(SAB82532_REGS_PENDING, &up->irqflags);
|
||||||
writeb((readb(&up->regs->rw.ccr2) & ~0xc0) | ((ebrg >> 2) & 0xc0),
|
if (test_bit(SAB82532_XPR, &up->irqflags))
|
||||||
&up->regs->rw.ccr2);
|
sunsab_tx_idle(up);
|
||||||
|
|
||||||
writeb(readb(&up->regs->rw.mode) | SAB82532_MODE_RAC, &up->regs->rw.mode);
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* port->lock is not held. */
|
/* port->lock is not held. */
|
||||||
|
@ -1084,11 +1115,13 @@ static void __init sunsab_init_hw(void)
|
||||||
up->pvr_dsr_bit = (1 << 3);
|
up->pvr_dsr_bit = (1 << 3);
|
||||||
up->pvr_dtr_bit = (1 << 2);
|
up->pvr_dtr_bit = (1 << 2);
|
||||||
}
|
}
|
||||||
writeb((1 << 1) | (1 << 2) | (1 << 4), &up->regs->w.pvr);
|
up->cached_pvr = (1 << 1) | (1 << 2) | (1 << 4);
|
||||||
writeb(readb(&up->regs->rw.mode) | SAB82532_MODE_FRTS,
|
writeb(up->cached_pvr, &up->regs->w.pvr);
|
||||||
&up->regs->rw.mode);
|
up->cached_mode = readb(&up->regs->rw.mode);
|
||||||
writeb(readb(&up->regs->rw.mode) | SAB82532_MODE_RTS,
|
up->cached_mode |= SAB82532_MODE_FRTS;
|
||||||
&up->regs->rw.mode);
|
writeb(up->cached_mode, &up->regs->rw.mode);
|
||||||
|
up->cached_mode |= SAB82532_MODE_RTS;
|
||||||
|
writeb(up->cached_mode, &up->regs->rw.mode);
|
||||||
|
|
||||||
up->tec_timeout = SAB82532_MAX_TEC_TIMEOUT;
|
up->tec_timeout = SAB82532_MAX_TEC_TIMEOUT;
|
||||||
up->cec_timeout = SAB82532_MAX_CEC_TIMEOUT;
|
up->cec_timeout = SAB82532_MAX_CEC_TIMEOUT;
|
||||||
|
|
|
@ -126,6 +126,7 @@ union sab82532_irq_status {
|
||||||
/* irqflags bits */
|
/* irqflags bits */
|
||||||
#define SAB82532_ALLS 0x00000001
|
#define SAB82532_ALLS 0x00000001
|
||||||
#define SAB82532_XPR 0x00000002
|
#define SAB82532_XPR 0x00000002
|
||||||
|
#define SAB82532_REGS_PENDING 0x00000004
|
||||||
|
|
||||||
/* RFIFO Status Byte */
|
/* RFIFO Status Byte */
|
||||||
#define SAB82532_RSTAT_PE 0x80
|
#define SAB82532_RSTAT_PE 0x80
|
||||||
|
|
Loading…
Reference in New Issue