Merge with rsync://rsync.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git

This commit is contained in:
Thomas Gleixner 2005-05-23 15:11:45 +02:00 committed by Thomas Gleixner
commit f08276136b
91 changed files with 1664 additions and 2261 deletions

View File

@ -252,8 +252,7 @@ in a tasks processor placement.
There is an exception to the above. If hotplug funtionality is used There is an exception to the above. If hotplug funtionality is used
to remove all the CPUs that are currently assigned to a cpuset, to remove all the CPUs that are currently assigned to a cpuset,
then the kernel will automatically update the cpus_allowed of all then the kernel will automatically update the cpus_allowed of all
tasks attached to CPUs in that cpuset with the online CPUs of the tasks attached to CPUs in that cpuset to allow all CPUs. When memory
nearest parent cpuset that still has some CPUs online. When memory
hotplug functionality for removing Memory Nodes is available, a hotplug functionality for removing Memory Nodes is available, a
similar exception is expected to apply there as well. In general, similar exception is expected to apply there as well. In general,
the kernel prefers to violate cpuset placement, over starving a task the kernel prefers to violate cpuset placement, over starving a task

View File

@ -25,6 +25,9 @@ APICs
noapictimer Don't set up the APIC timer noapictimer Don't set up the APIC timer
no_timer_check Don't check the IO-APIC timer. This can work around
problems with incorrect timer initialization on some boards.
Early Console Early Console
syntax: earlyprintk=vga syntax: earlyprintk=vga

View File

@ -195,7 +195,7 @@ static void __init init_amd(struct cpuinfo_x86 *c)
c->x86_num_cores = 1; c->x86_num_cores = 1;
} }
#ifdef CONFIG_X86_SMP #ifdef CONFIG_X86_HT
/* /*
* On a AMD dual core setup the lower bits of the APIC id * On a AMD dual core setup the lower bits of the APIC id
* distingush the cores. Assumes number of cores is a power * distingush the cores. Assumes number of cores is a power
@ -203,8 +203,11 @@ static void __init init_amd(struct cpuinfo_x86 *c)
*/ */
if (c->x86_num_cores > 1) { if (c->x86_num_cores > 1) {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
/* Fix up the APIC ID following AMD specifications. */ unsigned bits = 0;
cpu_core_id[cpu] >>= hweight32(c->x86_num_cores - 1); while ((1 << bits) < c->x86_num_cores)
bits++;
cpu_core_id[cpu] = phys_proc_id[cpu] & ((1<<bits)-1);
phys_proc_id[cpu] >>= bits;
printk(KERN_INFO "CPU %d(%d) -> Core %d\n", printk(KERN_INFO "CPU %d(%d) -> Core %d\n",
cpu, c->x86_num_cores, cpu_core_id[cpu]); cpu, c->x86_num_cores, cpu_core_id[cpu]);
} }

View File

@ -244,11 +244,8 @@ static void __init early_cpu_detect(void)
early_intel_workaround(c); early_intel_workaround(c);
#ifdef CONFIG_SMP
#ifdef CONFIG_X86_HT #ifdef CONFIG_X86_HT
phys_proc_id[smp_processor_id()] = phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
#endif
cpu_core_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
#endif #endif
} }

View File

@ -888,6 +888,7 @@ void *xquad_portio;
cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned; cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
EXPORT_SYMBOL(cpu_core_map);
static void __init smp_boot_cpus(unsigned int max_cpus) static void __init smp_boot_cpus(unsigned int max_cpus)
{ {

View File

@ -238,19 +238,21 @@ void iounmap(volatile void __iomem *addr)
addr < phys_to_virt(ISA_END_ADDRESS)) addr < phys_to_virt(ISA_END_ADDRESS))
return; return;
p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr)); write_lock(&vmlist_lock);
p = __remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
if (!p) { if (!p) {
printk("__iounmap: bad address %p\n", addr); printk("iounmap: bad address %p\n", addr);
return; goto out_unlock;
} }
if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) { if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) {
/* p->size includes the guard page, but cpa doesn't like that */
change_page_attr(virt_to_page(__va(p->phys_addr)), change_page_attr(virt_to_page(__va(p->phys_addr)),
p->size >> PAGE_SHIFT, p->size >> PAGE_SHIFT,
PAGE_KERNEL); PAGE_KERNEL);
global_flush_tlb(); global_flush_tlb();
} }
out_unlock:
write_unlock(&vmlist_lock);
kfree(p); kfree(p);
} }

View File

@ -330,8 +330,9 @@ interrupt_base:
/* If we are faulting a kernel address, we have to use the /* If we are faulting a kernel address, we have to use the
* kernel page tables. * kernel page tables.
*/ */
andis. r11, r10, 0x8000 lis r11, TASK_SIZE@h
beq 3f cmplw r10, r11
blt+ 3f
lis r11, swapper_pg_dir@h lis r11, swapper_pg_dir@h
ori r11, r11, swapper_pg_dir@l ori r11, r11, swapper_pg_dir@l
@ -464,8 +465,9 @@ interrupt_base:
/* If we are faulting a kernel address, we have to use the /* If we are faulting a kernel address, we have to use the
* kernel page tables. * kernel page tables.
*/ */
andis. r11, r10, 0x8000 lis r11, TASK_SIZE@h
beq 3f cmplw r10, r11
blt+ 3f
lis r11, swapper_pg_dir@h lis r11, swapper_pg_dir@h
ori r11, r11, swapper_pg_dir@l ori r11, r11, swapper_pg_dir@l
@ -533,8 +535,9 @@ interrupt_base:
/* If we are faulting a kernel address, we have to use the /* If we are faulting a kernel address, we have to use the
* kernel page tables. * kernel page tables.
*/ */
andis. r11, r10, 0x8000 lis r11, TASK_SIZE@h
beq 3f cmplw r10, r11
blt+ 3f
lis r11, swapper_pg_dir@h lis r11, swapper_pg_dir@h
ori r11, r11, swapper_pg_dir@l ori r11, r11, swapper_pg_dir@l

View File

@ -499,7 +499,7 @@ static int __init set_preferred_console(void)
{ {
struct device_node *prom_stdout; struct device_node *prom_stdout;
char *name; char *name;
int offset; int offset = 0;
if (of_stdout_device == NULL) if (of_stdout_device == NULL)
return -ENODEV; return -ENODEV;

View File

@ -446,6 +446,7 @@ _GLOBAL(__copy_tofrom_user)
#ifdef CONFIG_8xx #ifdef CONFIG_8xx
/* Don't use prefetch on 8xx */ /* Don't use prefetch on 8xx */
mtctr r0 mtctr r0
li r0,0
53: COPY_16_BYTES_WITHEX(0) 53: COPY_16_BYTES_WITHEX(0)
bdnz 53b bdnz 53b
@ -564,7 +565,9 @@ _GLOBAL(__copy_tofrom_user)
/* or write fault in cacheline loop */ /* or write fault in cacheline loop */
105: li r9,1 105: li r9,1
92: li r3,LG_CACHELINE_BYTES 92: li r3,LG_CACHELINE_BYTES
b 99f mfctr r8
add r0,r0,r8
b 106f
/* read fault in final word loop */ /* read fault in final word loop */
108: li r9,0 108: li r9,0
b 93f b 93f
@ -585,7 +588,7 @@ _GLOBAL(__copy_tofrom_user)
* r5 + (ctr << r3), and r9 is 0 for read or 1 for write. * r5 + (ctr << r3), and r9 is 0 for read or 1 for write.
*/ */
99: mfctr r0 99: mfctr r0
slw r3,r0,r3 106: slw r3,r0,r3
add. r3,r3,r5 add. r3,r3,r5
beq 120f /* shouldn't happen */ beq 120f /* shouldn't happen */
cmpwi 0,r9,0 cmpwi 0,r9,0

View File

@ -179,6 +179,7 @@ void free_initmem(void)
if (!have_of) if (!have_of)
FREESEC(openfirmware); FREESEC(openfirmware);
printk("\n"); printk("\n");
ppc_md.progress = NULL;
#undef FREESEC #undef FREESEC
} }

View File

@ -61,6 +61,7 @@ static struct plat_serial8250_port serial_platform_data[] = {
.iotype = UPIO_MEM, .iotype = UPIO_MEM,
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
}, },
{ },
}; };
struct platform_device ppc_sys_platform_devices[] = { struct platform_device ppc_sys_platform_devices[] = {

View File

@ -61,6 +61,7 @@ static struct plat_serial8250_port serial_platform_data[] = {
.iotype = UPIO_MEM, .iotype = UPIO_MEM,
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ,
}, },
{ },
}; };
struct platform_device ppc_sys_platform_devices[] = { struct platform_device ppc_sys_platform_devices[] = {

View File

@ -557,12 +557,10 @@ static void __init openpic_initipi(u_int ipi, u_int pri, u_int vec)
*/ */
void openpic_cause_IPI(u_int ipi, cpumask_t cpumask) void openpic_cause_IPI(u_int ipi, cpumask_t cpumask)
{ {
cpumask_t phys;
DECL_THIS_CPU; DECL_THIS_CPU;
CHECK_THIS_CPU; CHECK_THIS_CPU;
check_arg_ipi(ipi); check_arg_ipi(ipi);
phys = physmask(cpumask);
openpic_write(&OpenPIC->THIS_CPU.IPI_Dispatch(ipi), openpic_write(&OpenPIC->THIS_CPU.IPI_Dispatch(ipi),
cpus_addr(physmask(cpumask))[0]); cpus_addr(physmask(cpumask))[0]);
} }

View File

@ -1750,7 +1750,44 @@ static void __init flatten_device_tree(void)
prom_printf("Device tree struct 0x%x -> 0x%x\n", prom_printf("Device tree struct 0x%x -> 0x%x\n",
RELOC(dt_struct_start), RELOC(dt_struct_end)); RELOC(dt_struct_start), RELOC(dt_struct_end));
} }
static void __init fixup_device_tree(void)
{
unsigned long offset = reloc_offset();
phandle u3, i2c, mpic;
u32 u3_rev;
u32 interrupts[2];
u32 parent;
/* Some G5s have a missing interrupt definition, fix it up here */
u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
if ((long)u3 <= 0)
return;
i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
if ((long)i2c <= 0)
return;
mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
if ((long)mpic <= 0)
return;
/* check if proper rev of u3 */
if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev)) <= 0)
return;
if (u3_rev != 0x35)
return;
/* does it need fixup ? */
if (prom_getproplen(i2c, "interrupts") > 0)
return;
/* interrupt on this revision of u3 is number 0 and level */
interrupts[0] = 0;
interrupts[1] = 1;
prom_setprop(i2c, "interrupts", &interrupts, sizeof(interrupts));
parent = (u32)mpic;
prom_setprop(i2c, "interrupt-parent", &parent, sizeof(parent));
}
static void __init prom_find_boot_cpu(void) static void __init prom_find_boot_cpu(void)
{ {
@ -1919,6 +1956,11 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, unsigned long
PTRRELOC(&prom_tce_alloc_end), sizeof(RELOC(prom_tce_alloc_end))); PTRRELOC(&prom_tce_alloc_end), sizeof(RELOC(prom_tce_alloc_end)));
} }
/*
* Fixup any known bugs in the device-tree
*/
fixup_device_tree();
/* /*
* Now finally create the flattened device-tree * Now finally create the flattened device-tree
*/ */

View File

@ -8,6 +8,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/delay.h>
#include <asm/pbm.h> #include <asm/pbm.h>
@ -379,6 +380,56 @@ bad:
return PCI_DMA_ERROR_CODE; return PCI_DMA_ERROR_CODE;
} }
static void pci_strbuf_flush(struct pci_strbuf *strbuf, struct pci_iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages)
{
int limit;
PCI_STC_FLUSHFLAG_INIT(strbuf);
if (strbuf->strbuf_ctxflush &&
iommu->iommu_ctxflush) {
unsigned long matchreg, flushreg;
flushreg = strbuf->strbuf_ctxflush;
matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
limit = 100000;
pci_iommu_write(flushreg, ctx);
for(;;) {
if (((long)pci_iommu_read(matchreg)) >= 0L)
break;
limit--;
if (!limit)
break;
udelay(1);
}
if (!limit)
printk(KERN_WARNING "pci_strbuf_flush: ctx flush "
"timeout vaddr[%08x] ctx[%lx]\n",
vaddr, ctx);
} else {
unsigned long i;
for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
pci_iommu_write(strbuf->strbuf_pflush, vaddr);
}
pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
(void) pci_iommu_read(iommu->write_complete_reg);
limit = 100000;
while (!PCI_STC_FLUSHFLAG_SET(strbuf)) {
limit--;
if (!limit)
break;
udelay(1);
membar("#LoadLoad");
}
if (!limit)
printk(KERN_WARNING "pci_strbuf_flush: flushflag timeout "
"vaddr[%08x] ctx[%lx] npages[%ld]\n",
vaddr, ctx, npages);
}
/* Unmap a single streaming mode DMA translation. */ /* Unmap a single streaming mode DMA translation. */
void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
{ {
@ -386,7 +437,7 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int
struct pci_iommu *iommu; struct pci_iommu *iommu;
struct pci_strbuf *strbuf; struct pci_strbuf *strbuf;
iopte_t *base; iopte_t *base;
unsigned long flags, npages, i, ctx; unsigned long flags, npages, ctx;
if (direction == PCI_DMA_NONE) if (direction == PCI_DMA_NONE)
BUG(); BUG();
@ -414,29 +465,8 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int
ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
/* Step 1: Kick data out of streaming buffers if necessary. */ /* Step 1: Kick data out of streaming buffers if necessary. */
if (strbuf->strbuf_enabled) { if (strbuf->strbuf_enabled)
u32 vaddr = bus_addr; pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages);
PCI_STC_FLUSHFLAG_INIT(strbuf);
if (strbuf->strbuf_ctxflush &&
iommu->iommu_ctxflush) {
unsigned long matchreg, flushreg;
flushreg = strbuf->strbuf_ctxflush;
matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
do {
pci_iommu_write(flushreg, ctx);
} while(((long)pci_iommu_read(matchreg)) < 0L);
} else {
for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
pci_iommu_write(strbuf->strbuf_pflush, vaddr);
}
pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
(void) pci_iommu_read(iommu->write_complete_reg);
while (!PCI_STC_FLUSHFLAG_SET(strbuf))
membar("#LoadLoad");
}
/* Step 2: Clear out first TSB entry. */ /* Step 2: Clear out first TSB entry. */
iopte_make_dummy(iommu, base); iopte_make_dummy(iommu, base);
@ -647,29 +677,8 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
/* Step 1: Kick data out of streaming buffers if necessary. */ /* Step 1: Kick data out of streaming buffers if necessary. */
if (strbuf->strbuf_enabled) { if (strbuf->strbuf_enabled)
u32 vaddr = (u32) bus_addr; pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages);
PCI_STC_FLUSHFLAG_INIT(strbuf);
if (strbuf->strbuf_ctxflush &&
iommu->iommu_ctxflush) {
unsigned long matchreg, flushreg;
flushreg = strbuf->strbuf_ctxflush;
matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
do {
pci_iommu_write(flushreg, ctx);
} while(((long)pci_iommu_read(matchreg)) < 0L);
} else {
for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
pci_iommu_write(strbuf->strbuf_pflush, vaddr);
}
pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
(void) pci_iommu_read(iommu->write_complete_reg);
while (!PCI_STC_FLUSHFLAG_SET(strbuf))
membar("#LoadLoad");
}
/* Step 2: Clear out first TSB entry. */ /* Step 2: Clear out first TSB entry. */
iopte_make_dummy(iommu, base); iopte_make_dummy(iommu, base);
@ -715,28 +724,7 @@ void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size
} }
/* Step 2: Kick data out of streaming buffers. */ /* Step 2: Kick data out of streaming buffers. */
PCI_STC_FLUSHFLAG_INIT(strbuf); pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages);
if (iommu->iommu_ctxflush &&
strbuf->strbuf_ctxflush) {
unsigned long matchreg, flushreg;
flushreg = strbuf->strbuf_ctxflush;
matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
do {
pci_iommu_write(flushreg, ctx);
} while(((long)pci_iommu_read(matchreg)) < 0L);
} else {
unsigned long i;
for (i = 0; i < npages; i++, bus_addr += IO_PAGE_SIZE)
pci_iommu_write(strbuf->strbuf_pflush, bus_addr);
}
/* Step 3: Perform flush synchronization sequence. */
pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
(void) pci_iommu_read(iommu->write_complete_reg);
while (!PCI_STC_FLUSHFLAG_SET(strbuf))
membar("#LoadLoad");
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock_irqrestore(&iommu->lock, flags);
} }
@ -749,7 +737,8 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, i
struct pcidev_cookie *pcp; struct pcidev_cookie *pcp;
struct pci_iommu *iommu; struct pci_iommu *iommu;
struct pci_strbuf *strbuf; struct pci_strbuf *strbuf;
unsigned long flags, ctx; unsigned long flags, ctx, npages, i;
u32 bus_addr;
pcp = pdev->sysdata; pcp = pdev->sysdata;
iommu = pcp->pbm->iommu; iommu = pcp->pbm->iommu;
@ -772,36 +761,14 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, i
} }
/* Step 2: Kick data out of streaming buffers. */ /* Step 2: Kick data out of streaming buffers. */
PCI_STC_FLUSHFLAG_INIT(strbuf); bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
if (iommu->iommu_ctxflush && for(i = 1; i < nelems; i++)
strbuf->strbuf_ctxflush) { if (!sglist[i].dma_length)
unsigned long matchreg, flushreg; break;
i--;
flushreg = strbuf->strbuf_ctxflush; npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length)
matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx); - bus_addr) >> IO_PAGE_SHIFT;
do { pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages);
pci_iommu_write(flushreg, ctx);
} while (((long)pci_iommu_read(matchreg)) < 0L);
} else {
unsigned long i, npages;
u32 bus_addr;
bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
for(i = 1; i < nelems; i++)
if (!sglist[i].dma_length)
break;
i--;
npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - bus_addr) >> IO_PAGE_SHIFT;
for (i = 0; i < npages; i++, bus_addr += IO_PAGE_SIZE)
pci_iommu_write(strbuf->strbuf_pflush, bus_addr);
}
/* Step 3: Perform flush synchronization sequence. */
pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
(void) pci_iommu_read(iommu->write_complete_reg);
while (!PCI_STC_FLUSHFLAG_SET(strbuf))
membar("#LoadLoad");
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock_irqrestore(&iommu->lock, flags);
} }

View File

@ -117,19 +117,34 @@ static void iommu_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages
#define STRBUF_TAG_VALID 0x02UL #define STRBUF_TAG_VALID 0x02UL
static void strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages) static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages)
{ {
unsigned long n;
int limit;
iommu->strbuf_flushflag = 0UL; iommu->strbuf_flushflag = 0UL;
while (npages--) n = npages;
upa_writeq(base + (npages << IO_PAGE_SHIFT), while (n--)
upa_writeq(base + (n << IO_PAGE_SHIFT),
iommu->strbuf_regs + STRBUF_PFLUSH); iommu->strbuf_regs + STRBUF_PFLUSH);
/* Whoopee cushion! */ /* Whoopee cushion! */
upa_writeq(__pa(&iommu->strbuf_flushflag), upa_writeq(__pa(&iommu->strbuf_flushflag),
iommu->strbuf_regs + STRBUF_FSYNC); iommu->strbuf_regs + STRBUF_FSYNC);
upa_readq(iommu->sbus_control_reg); upa_readq(iommu->sbus_control_reg);
while (iommu->strbuf_flushflag == 0UL)
limit = 100000;
while (iommu->strbuf_flushflag == 0UL) {
limit--;
if (!limit)
break;
udelay(1);
membar("#LoadLoad"); membar("#LoadLoad");
}
if (!limit)
printk(KERN_WARNING "sbus_strbuf_flush: flushflag timeout "
"vaddr[%08x] npages[%ld]\n",
base, npages);
} }
static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long npages) static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long npages)
@ -406,7 +421,7 @@ void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t dma_addr, size_t size,
spin_lock_irqsave(&iommu->lock, flags); spin_lock_irqsave(&iommu->lock, flags);
free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT); free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT);
strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT); sbus_strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT);
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock_irqrestore(&iommu->lock, flags);
} }
@ -569,7 +584,7 @@ void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int
iommu = sdev->bus->iommu; iommu = sdev->bus->iommu;
spin_lock_irqsave(&iommu->lock, flags); spin_lock_irqsave(&iommu->lock, flags);
free_streaming_cluster(iommu, dvma_base, size >> IO_PAGE_SHIFT); free_streaming_cluster(iommu, dvma_base, size >> IO_PAGE_SHIFT);
strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT); sbus_strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT);
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock_irqrestore(&iommu->lock, flags);
} }
@ -581,7 +596,7 @@ void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t base, size_t
size = (IO_PAGE_ALIGN(base + size) - (base & IO_PAGE_MASK)); size = (IO_PAGE_ALIGN(base + size) - (base & IO_PAGE_MASK));
spin_lock_irqsave(&iommu->lock, flags); spin_lock_irqsave(&iommu->lock, flags);
strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT); sbus_strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT);
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock_irqrestore(&iommu->lock, flags);
} }
@ -605,7 +620,7 @@ void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sg, int
size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - base; size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - base;
spin_lock_irqsave(&iommu->lock, flags); spin_lock_irqsave(&iommu->lock, flags);
strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT); sbus_strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT);
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock_irqrestore(&iommu->lock, flags);
} }

View File

@ -6,6 +6,10 @@ config 64BIT
bool bool
default y default y
config TOP_ADDR
hex
default 0x80000000
config 3_LEVEL_PGTABLES config 3_LEVEL_PGTABLES
bool bool
default y default y

View File

@ -20,9 +20,17 @@
#include "os.h" #include "os.h"
#ifdef CONFIG_NOCONFIG_CHAN #ifdef CONFIG_NOCONFIG_CHAN
/* The printk's here are wrong because we are complaining that there is no
* output device, but printk is printing to that output device. The user will
* never see the error. printf would be better, except it can't run on a
* kernel stack because it will overflow it.
* Use printk for now since that will avoid crashing.
*/
static void *not_configged_init(char *str, int device, struct chan_opts *opts) static void *not_configged_init(char *str, int device, struct chan_opts *opts)
{ {
printf(KERN_ERR "Using a channel type which is configured out of " printk(KERN_ERR "Using a channel type which is configured out of "
"UML\n"); "UML\n");
return(NULL); return(NULL);
} }
@ -30,27 +38,27 @@ static void *not_configged_init(char *str, int device, struct chan_opts *opts)
static int not_configged_open(int input, int output, int primary, void *data, static int not_configged_open(int input, int output, int primary, void *data,
char **dev_out) char **dev_out)
{ {
printf(KERN_ERR "Using a channel type which is configured out of " printk(KERN_ERR "Using a channel type which is configured out of "
"UML\n"); "UML\n");
return(-ENODEV); return(-ENODEV);
} }
static void not_configged_close(int fd, void *data) static void not_configged_close(int fd, void *data)
{ {
printf(KERN_ERR "Using a channel type which is configured out of " printk(KERN_ERR "Using a channel type which is configured out of "
"UML\n"); "UML\n");
} }
static int not_configged_read(int fd, char *c_out, void *data) static int not_configged_read(int fd, char *c_out, void *data)
{ {
printf(KERN_ERR "Using a channel type which is configured out of " printk(KERN_ERR "Using a channel type which is configured out of "
"UML\n"); "UML\n");
return(-EIO); return(-EIO);
} }
static int not_configged_write(int fd, const char *buf, int len, void *data) static int not_configged_write(int fd, const char *buf, int len, void *data)
{ {
printf(KERN_ERR "Using a channel type which is configured out of " printk(KERN_ERR "Using a channel type which is configured out of "
"UML\n"); "UML\n");
return(-EIO); return(-EIO);
} }
@ -58,7 +66,7 @@ static int not_configged_write(int fd, const char *buf, int len, void *data)
static int not_configged_console_write(int fd, const char *buf, int len, static int not_configged_console_write(int fd, const char *buf, int len,
void *data) void *data)
{ {
printf(KERN_ERR "Using a channel type which is configured out of " printk(KERN_ERR "Using a channel type which is configured out of "
"UML\n"); "UML\n");
return(-EIO); return(-EIO);
} }
@ -66,7 +74,7 @@ static int not_configged_console_write(int fd, const char *buf, int len,
static int not_configged_window_size(int fd, void *data, unsigned short *rows, static int not_configged_window_size(int fd, void *data, unsigned short *rows,
unsigned short *cols) unsigned short *cols)
{ {
printf(KERN_ERR "Using a channel type which is configured out of " printk(KERN_ERR "Using a channel type which is configured out of "
"UML\n"); "UML\n");
return(-ENODEV); return(-ENODEV);
} }

View File

@ -73,7 +73,6 @@ int mcast_setup(char *str, char **mac_out, void *data)
struct mcast_init *init = data; struct mcast_init *init = data;
char *port_str = NULL, *ttl_str = NULL, *remain; char *port_str = NULL, *ttl_str = NULL, *remain;
char *last; char *last;
int n;
*init = ((struct mcast_init) *init = ((struct mcast_init)
{ .addr = "239.192.168.1", { .addr = "239.192.168.1",
@ -89,13 +88,12 @@ int mcast_setup(char *str, char **mac_out, void *data)
} }
if(port_str != NULL){ if(port_str != NULL){
n = simple_strtoul(port_str, &last, 10); init->port = simple_strtoul(port_str, &last, 10);
if((*last != '\0') || (last == port_str)){ if((*last != '\0') || (last == port_str)){
printk(KERN_ERR "mcast_setup - Bad port : '%s'\n", printk(KERN_ERR "mcast_setup - Bad port : '%s'\n",
port_str); port_str);
return(0); return(0);
} }
init->port = htons(n);
} }
if(ttl_str != NULL){ if(ttl_str != NULL){

View File

@ -38,7 +38,7 @@ static struct sockaddr_in *new_addr(char *addr, unsigned short port)
} }
sin->sin_family = AF_INET; sin->sin_family = AF_INET;
sin->sin_addr.s_addr = in_aton(addr); sin->sin_addr.s_addr = in_aton(addr);
sin->sin_port = port; sin->sin_port = htons(port);
return(sin); return(sin);
} }
@ -55,28 +55,25 @@ static int mcast_open(void *data)
struct mcast_data *pri = data; struct mcast_data *pri = data;
struct sockaddr_in *sin = pri->mcast_addr; struct sockaddr_in *sin = pri->mcast_addr;
struct ip_mreq mreq; struct ip_mreq mreq;
int fd, yes = 1; int fd = -EINVAL, yes = 1, err = -EINVAL;;
if ((sin->sin_addr.s_addr == 0) || (sin->sin_port == 0)) { if ((sin->sin_addr.s_addr == 0) || (sin->sin_port == 0))
fd = -EINVAL;
goto out; goto out;
}
fd = socket(AF_INET, SOCK_DGRAM, 0); fd = socket(AF_INET, SOCK_DGRAM, 0);
if (fd < 0){ if (fd < 0){
printk("mcast_open : data socket failed, errno = %d\n", printk("mcast_open : data socket failed, errno = %d\n",
errno); errno);
fd = -ENOMEM; fd = -errno;
goto out; goto out;
} }
if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)) < 0) { if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)) < 0) {
printk("mcast_open: SO_REUSEADDR failed, errno = %d\n", printk("mcast_open: SO_REUSEADDR failed, errno = %d\n",
errno); errno);
os_close_file(fd); goto out_close;
fd = -EINVAL;
goto out;
} }
/* set ttl according to config */ /* set ttl according to config */
@ -84,26 +81,20 @@ static int mcast_open(void *data)
sizeof(pri->ttl)) < 0) { sizeof(pri->ttl)) < 0) {
printk("mcast_open: IP_MULTICAST_TTL failed, error = %d\n", printk("mcast_open: IP_MULTICAST_TTL failed, error = %d\n",
errno); errno);
os_close_file(fd); goto out_close;
fd = -EINVAL;
goto out;
} }
/* set LOOP, so data does get fed back to local sockets */ /* set LOOP, so data does get fed back to local sockets */
if (setsockopt(fd, SOL_IP, IP_MULTICAST_LOOP, &yes, sizeof(yes)) < 0) { if (setsockopt(fd, SOL_IP, IP_MULTICAST_LOOP, &yes, sizeof(yes)) < 0) {
printk("mcast_open: IP_MULTICAST_LOOP failed, error = %d\n", printk("mcast_open: IP_MULTICAST_LOOP failed, error = %d\n",
errno); errno);
os_close_file(fd); goto out_close;
fd = -EINVAL;
goto out;
} }
/* bind socket to mcast address */ /* bind socket to mcast address */
if (bind(fd, (struct sockaddr *) sin, sizeof(*sin)) < 0) { if (bind(fd, (struct sockaddr *) sin, sizeof(*sin)) < 0) {
printk("mcast_open : data bind failed, errno = %d\n", errno); printk("mcast_open : data bind failed, errno = %d\n", errno);
os_close_file(fd); goto out_close;
fd = -EINVAL;
goto out;
} }
/* subscribe to the multicast group */ /* subscribe to the multicast group */
@ -117,12 +108,15 @@ static int mcast_open(void *data)
"interface on the host.\n"); "interface on the host.\n");
printk("eth0 should be configured in order to use the " printk("eth0 should be configured in order to use the "
"multicast transport.\n"); "multicast transport.\n");
os_close_file(fd); goto out_close;
fd = -EINVAL;
} }
out: out:
return(fd); return fd;
out_close:
os_close_file(fd);
return err;
} }
static void mcast_close(int fd, void *data) static void mcast_close(int fd, void *data)
@ -164,14 +158,3 @@ struct net_user_info mcast_user_info = {
.delete_address = NULL, .delete_address = NULL,
.max_packet = MAX_PACKET - ETH_HEADER_OTHER .max_packet = MAX_PACKET - ETH_HEADER_OTHER
}; };
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
* adjust the settings for this buffer only. This must remain at the end
* of the file.
* ---------------------------------------------------------------------------
* Local variables:
* c-file-style: "linux"
* End:
*/

View File

@ -55,7 +55,7 @@
#include "mem_kern.h" #include "mem_kern.h"
#include "cow.h" #include "cow.h"
enum ubd_req { UBD_READ, UBD_WRITE, UBD_MMAP }; enum ubd_req { UBD_READ, UBD_WRITE };
struct io_thread_req { struct io_thread_req {
enum ubd_req op; enum ubd_req op;
@ -68,8 +68,6 @@ struct io_thread_req {
unsigned long sector_mask; unsigned long sector_mask;
unsigned long long cow_offset; unsigned long long cow_offset;
unsigned long bitmap_words[2]; unsigned long bitmap_words[2];
int map_fd;
unsigned long long map_offset;
int error; int error;
}; };
@ -122,10 +120,6 @@ static int ubd_ioctl(struct inode * inode, struct file * file,
#define MAX_DEV (8) #define MAX_DEV (8)
/* Changed in early boot */
static int ubd_do_mmap = 0;
#define UBD_MMAP_BLOCK_SIZE PAGE_SIZE
static struct block_device_operations ubd_blops = { static struct block_device_operations ubd_blops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.open = ubd_open, .open = ubd_open,
@ -175,12 +169,6 @@ struct ubd {
int no_cow; int no_cow;
struct cow cow; struct cow cow;
struct platform_device pdev; struct platform_device pdev;
int map_writes;
int map_reads;
int nomap_writes;
int nomap_reads;
int write_maps;
}; };
#define DEFAULT_COW { \ #define DEFAULT_COW { \
@ -200,11 +188,6 @@ struct ubd {
.openflags = OPEN_FLAGS, \ .openflags = OPEN_FLAGS, \
.no_cow = 0, \ .no_cow = 0, \
.cow = DEFAULT_COW, \ .cow = DEFAULT_COW, \
.map_writes = 0, \
.map_reads = 0, \
.nomap_writes = 0, \
.nomap_reads = 0, \
.write_maps = 0, \
} }
struct ubd ubd_dev[MAX_DEV] = { [ 0 ... MAX_DEV - 1 ] = DEFAULT_UBD }; struct ubd ubd_dev[MAX_DEV] = { [ 0 ... MAX_DEV - 1 ] = DEFAULT_UBD };
@ -314,13 +297,6 @@ static int ubd_setup_common(char *str, int *index_out)
int major; int major;
str++; str++;
if(!strcmp(str, "mmap")){
CHOOSE_MODE(printk("mmap not supported by the ubd "
"driver in tt mode\n"),
ubd_do_mmap = 1);
return(0);
}
if(!strcmp(str, "sync")){ if(!strcmp(str, "sync")){
global_openflags = of_sync(global_openflags); global_openflags = of_sync(global_openflags);
return(0); return(0);
@ -524,7 +500,7 @@ static void ubd_handler(void)
{ {
struct io_thread_req req; struct io_thread_req req;
struct request *rq = elv_next_request(ubd_queue); struct request *rq = elv_next_request(ubd_queue);
int n, err; int n;
do_ubd = NULL; do_ubd = NULL;
intr_count++; intr_count++;
@ -538,19 +514,6 @@ static void ubd_handler(void)
return; return;
} }
if((req.op != UBD_MMAP) &&
((req.offset != ((__u64) (rq->sector)) << 9) ||
(req.length != (rq->current_nr_sectors) << 9)))
panic("I/O op mismatch");
if(req.map_fd != -1){
err = physmem_subst_mapping(req.buffer, req.map_fd,
req.map_offset, 1);
if(err)
printk("ubd_handler - physmem_subst_mapping failed, "
"err = %d\n", -err);
}
ubd_finish(rq, req.error); ubd_finish(rq, req.error);
reactivate_fd(thread_fd, UBD_IRQ); reactivate_fd(thread_fd, UBD_IRQ);
do_ubd_request(ubd_queue); do_ubd_request(ubd_queue);
@ -583,14 +546,10 @@ static int ubd_file_size(struct ubd *dev, __u64 *size_out)
static void ubd_close(struct ubd *dev) static void ubd_close(struct ubd *dev)
{ {
if(ubd_do_mmap)
physmem_forget_descriptor(dev->fd);
os_close_file(dev->fd); os_close_file(dev->fd);
if(dev->cow.file == NULL) if(dev->cow.file == NULL)
return; return;
if(ubd_do_mmap)
physmem_forget_descriptor(dev->cow.fd);
os_close_file(dev->cow.fd); os_close_file(dev->cow.fd);
vfree(dev->cow.bitmap); vfree(dev->cow.bitmap);
dev->cow.bitmap = NULL; dev->cow.bitmap = NULL;
@ -1010,94 +969,13 @@ static void cowify_req(struct io_thread_req *req, unsigned long *bitmap,
req->bitmap_words, bitmap_len); req->bitmap_words, bitmap_len);
} }
static int mmap_fd(struct request *req, struct ubd *dev, __u64 offset)
{
__u64 sector;
unsigned char *bitmap;
int bit, i;
/* mmap must have been requested on the command line */
if(!ubd_do_mmap)
return(-1);
/* The buffer must be page aligned */
if(((unsigned long) req->buffer % UBD_MMAP_BLOCK_SIZE) != 0)
return(-1);
/* The request must be a page long */
if((req->current_nr_sectors << 9) != PAGE_SIZE)
return(-1);
if(dev->cow.file == NULL)
return(dev->fd);
sector = offset >> 9;
bitmap = (unsigned char *) dev->cow.bitmap;
bit = ubd_test_bit(sector, bitmap);
for(i = 1; i < req->current_nr_sectors; i++){
if(ubd_test_bit(sector + i, bitmap) != bit)
return(-1);
}
if(bit || (rq_data_dir(req) == WRITE))
offset += dev->cow.data_offset;
/* The data on disk must be page aligned */
if((offset % UBD_MMAP_BLOCK_SIZE) != 0)
return(-1);
return(bit ? dev->fd : dev->cow.fd);
}
static int prepare_mmap_request(struct ubd *dev, int fd, __u64 offset,
struct request *req,
struct io_thread_req *io_req)
{
int err;
if(rq_data_dir(req) == WRITE){
/* Writes are almost no-ops since the new data is already in the
* host page cache
*/
dev->map_writes++;
if(dev->cow.file != NULL)
cowify_bitmap(io_req->offset, io_req->length,
&io_req->sector_mask, &io_req->cow_offset,
dev->cow.bitmap, dev->cow.bitmap_offset,
io_req->bitmap_words,
dev->cow.bitmap_len);
}
else {
int w;
if((dev->cow.file != NULL) && (fd == dev->cow.fd))
w = 0;
else w = dev->openflags.w;
if((dev->cow.file != NULL) && (fd == dev->fd))
offset += dev->cow.data_offset;
err = physmem_subst_mapping(req->buffer, fd, offset, w);
if(err){
printk("physmem_subst_mapping failed, err = %d\n",
-err);
return(1);
}
dev->map_reads++;
}
io_req->op = UBD_MMAP;
io_req->buffer = req->buffer;
return(0);
}
/* Called with ubd_io_lock held */ /* Called with ubd_io_lock held */
static int prepare_request(struct request *req, struct io_thread_req *io_req) static int prepare_request(struct request *req, struct io_thread_req *io_req)
{ {
struct gendisk *disk = req->rq_disk; struct gendisk *disk = req->rq_disk;
struct ubd *dev = disk->private_data; struct ubd *dev = disk->private_data;
__u64 offset; __u64 offset;
int len, fd; int len;
if(req->rq_status == RQ_INACTIVE) return(1); if(req->rq_status == RQ_INACTIVE) return(1);
@ -1114,34 +992,12 @@ static int prepare_request(struct request *req, struct io_thread_req *io_req)
io_req->fds[0] = (dev->cow.file != NULL) ? dev->cow.fd : dev->fd; io_req->fds[0] = (dev->cow.file != NULL) ? dev->cow.fd : dev->fd;
io_req->fds[1] = dev->fd; io_req->fds[1] = dev->fd;
io_req->map_fd = -1;
io_req->cow_offset = -1; io_req->cow_offset = -1;
io_req->offset = offset; io_req->offset = offset;
io_req->length = len; io_req->length = len;
io_req->error = 0; io_req->error = 0;
io_req->sector_mask = 0; io_req->sector_mask = 0;
fd = mmap_fd(req, dev, io_req->offset);
if(fd > 0){
/* If mmapping is otherwise OK, but the first access to the
* page is a write, then it's not mapped in yet. So we have
* to write the data to disk first, then we can map the disk
* page in and continue normally from there.
*/
if((rq_data_dir(req) == WRITE) && !is_remapped(req->buffer)){
io_req->map_fd = dev->fd;
io_req->map_offset = io_req->offset +
dev->cow.data_offset;
dev->write_maps++;
}
else return(prepare_mmap_request(dev, fd, io_req->offset, req,
io_req));
}
if(rq_data_dir(req) == READ)
dev->nomap_reads++;
else dev->nomap_writes++;
io_req->op = (rq_data_dir(req) == READ) ? UBD_READ : UBD_WRITE; io_req->op = (rq_data_dir(req) == READ) ? UBD_READ : UBD_WRITE;
io_req->offsets[0] = 0; io_req->offsets[0] = 0;
io_req->offsets[1] = dev->cow.data_offset; io_req->offsets[1] = dev->cow.data_offset;
@ -1229,143 +1085,6 @@ static int ubd_ioctl(struct inode * inode, struct file * file,
return(-EINVAL); return(-EINVAL);
} }
static int ubd_check_remapped(int fd, unsigned long address, int is_write,
__u64 offset)
{
__u64 bitmap_offset;
unsigned long new_bitmap[2];
int i, err, n;
/* If it's not a write access, we can't do anything about it */
if(!is_write)
return(0);
/* We have a write */
for(i = 0; i < sizeof(ubd_dev) / sizeof(ubd_dev[0]); i++){
struct ubd *dev = &ubd_dev[i];
if((dev->fd != fd) && (dev->cow.fd != fd))
continue;
/* It's a write to a ubd device */
/* This should be impossible now */
if(!dev->openflags.w){
/* It's a write access on a read-only device - probably
* shouldn't happen. If the kernel is trying to change
* something with no intention of writing it back out,
* then this message will clue us in that this needs
* fixing
*/
printk("Write access to mapped page from readonly ubd "
"device %d\n", i);
return(0);
}
/* It's a write to a writeable ubd device - it must be COWed
* because, otherwise, the page would have been mapped in
* writeable
*/
if(!dev->cow.file)
panic("Write fault on writeable non-COW ubd device %d",
i);
/* It should also be an access to the backing file since the
* COW pages should be mapped in read-write
*/
if(fd == dev->fd)
panic("Write fault on a backing page of ubd "
"device %d\n", i);
/* So, we do the write, copying the backing data to the COW
* file...
*/
err = os_seek_file(dev->fd, offset + dev->cow.data_offset);
if(err < 0)
panic("Couldn't seek to %lld in COW file of ubd "
"device %d, err = %d",
offset + dev->cow.data_offset, i, -err);
n = os_write_file(dev->fd, (void *) address, PAGE_SIZE);
if(n != PAGE_SIZE)
panic("Couldn't copy data to COW file of ubd "
"device %d, err = %d", i, -n);
/* ... updating the COW bitmap... */
cowify_bitmap(offset, PAGE_SIZE, NULL, &bitmap_offset,
dev->cow.bitmap, dev->cow.bitmap_offset,
new_bitmap, dev->cow.bitmap_len);
err = os_seek_file(dev->fd, bitmap_offset);
if(err < 0)
panic("Couldn't seek to %lld in COW file of ubd "
"device %d, err = %d", bitmap_offset, i, -err);
n = os_write_file(dev->fd, new_bitmap, sizeof(new_bitmap));
if(n != sizeof(new_bitmap))
panic("Couldn't update bitmap of ubd device %d, "
"err = %d", i, -n);
/* Maybe we can map the COW page in, and maybe we can't. If
* it is a pre-V3 COW file, we can't, since the alignment will
* be wrong. If it is a V3 or later COW file which has been
* moved to a system with a larger page size, then maybe we
* can't, depending on the exact location of the page.
*/
offset += dev->cow.data_offset;
/* Remove the remapping, putting the original anonymous page
* back. If the COW file can be mapped in, that is done.
* Otherwise, the COW page is read in.
*/
if(!physmem_remove_mapping((void *) address))
panic("Address 0x%lx not remapped by ubd device %d",
address, i);
if((offset % UBD_MMAP_BLOCK_SIZE) == 0)
physmem_subst_mapping((void *) address, dev->fd,
offset, 1);
else {
err = os_seek_file(dev->fd, offset);
if(err < 0)
panic("Couldn't seek to %lld in COW file of "
"ubd device %d, err = %d", offset, i,
-err);
n = os_read_file(dev->fd, (void *) address, PAGE_SIZE);
if(n != PAGE_SIZE)
panic("Failed to read page from offset %llx of "
"COW file of ubd device %d, err = %d",
offset, i, -n);
}
return(1);
}
/* It's not a write on a ubd device */
return(0);
}
static struct remapper ubd_remapper = {
.list = LIST_HEAD_INIT(ubd_remapper.list),
.proc = ubd_check_remapped,
};
static int ubd_remapper_setup(void)
{
if(ubd_do_mmap)
register_remapper(&ubd_remapper);
return(0);
}
__initcall(ubd_remapper_setup);
static int same_backing_files(char *from_cmdline, char *from_cow, char *cow) static int same_backing_files(char *from_cmdline, char *from_cow, char *cow)
{ {
struct uml_stat buf1, buf2; struct uml_stat buf1, buf2;
@ -1568,15 +1287,6 @@ void do_io(struct io_thread_req *req)
int err; int err;
__u64 off; __u64 off;
if(req->op == UBD_MMAP){
/* Touch the page to force the host to do any necessary IO to
* get it into memory
*/
n = *((volatile int *) req->buffer);
req->error = update_bitmap(req);
return;
}
nsectors = req->length / req->sectorsize; nsectors = req->length / req->sectorsize;
start = 0; start = 0;
do { do {

View File

@ -31,7 +31,6 @@ extern int sysemu_supported;
#ifdef UML_CONFIG_MODE_SKAS #ifdef UML_CONFIG_MODE_SKAS
#include "skas_ptregs.h" #include "skas_ptregs.h"
#include "sysdep/faultinfo.h"
#define REGS_IP(r) ((r)[HOST_IP]) #define REGS_IP(r) ((r)[HOST_IP])
#define REGS_SP(r) ((r)[HOST_SP]) #define REGS_SP(r) ((r)[HOST_SP])
@ -59,6 +58,7 @@ extern int sysemu_supported;
#define PTRACE_SYSEMU_SINGLESTEP 32 #define PTRACE_SYSEMU_SINGLESTEP 32
#endif #endif
#include "sysdep/faultinfo.h"
#include "choose-mode.h" #include "choose-mode.h"
union uml_pt_regs { union uml_pt_regs {

View File

@ -9,8 +9,6 @@
#include "linux/in6.h" #include "linux/in6.h"
#include "asm/uaccess.h" #include "asm/uaccess.h"
extern unsigned int csum_partial_copy_from(const unsigned char *src, unsigned char *dst, int len,
int sum, int *err_ptr);
extern unsigned csum_partial(const unsigned char *buff, unsigned len, extern unsigned csum_partial(const unsigned char *buff, unsigned len,
unsigned sum); unsigned sum);
@ -31,10 +29,15 @@ unsigned int csum_partial_copy_nocheck(const unsigned char *src, unsigned char *
} }
static __inline__ static __inline__
unsigned int csum_partial_copy_from_user(const unsigned char *src, unsigned char *dst, unsigned int csum_partial_copy_from_user(const unsigned char *src,
int len, int sum, int *err_ptr) unsigned char *dst, int len, int sum,
int *err_ptr)
{ {
return csum_partial_copy_from(src, dst, len, sum, err_ptr); if(copy_from_user(dst, src, len)){
*err_ptr = -EFAULT;
return(-1);
}
return csum_partial(dst, len, sum);
} }
/** /**
@ -137,15 +140,6 @@ static inline unsigned add32_with_carry(unsigned a, unsigned b)
return a; return a;
} }
#endif extern unsigned short ip_compute_csum(unsigned char * buff, int len);
/* #endif
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
* adjust the settings for this buffer only. This must remain at the end
* of the file.
* ---------------------------------------------------------------------------
* Local variables:
* c-file-style: "linux"
* End:
*/

View File

@ -135,6 +135,7 @@ extern int mode_tt;
__CHOOSE_MODE(SC_EFLAGS(UPT_SC(r)), REGS_EFLAGS((r)->skas.regs)) __CHOOSE_MODE(SC_EFLAGS(UPT_SC(r)), REGS_EFLAGS((r)->skas.regs))
#define UPT_SC(r) ((r)->tt.sc) #define UPT_SC(r) ((r)->tt.sc)
#define UPT_SYSCALL_NR(r) __CHOOSE_MODE((r)->tt.syscall, (r)->skas.syscall) #define UPT_SYSCALL_NR(r) __CHOOSE_MODE((r)->tt.syscall, (r)->skas.syscall)
#define UPT_SYSCALL_RET(r) UPT_RAX(r)
extern int user_context(unsigned long sp); extern int user_context(unsigned long sp);
@ -196,32 +197,32 @@ struct syscall_args {
#define UPT_SET(regs, reg, val) \ #define UPT_SET(regs, reg, val) \
({ unsigned long val; \ ({ unsigned long __upt_val = val; \
switch(reg){ \ switch(reg){ \
case R8: UPT_R8(regs) = val; break; \ case R8: UPT_R8(regs) = __upt_val; break; \
case R9: UPT_R9(regs) = val; break; \ case R9: UPT_R9(regs) = __upt_val; break; \
case R10: UPT_R10(regs) = val; break; \ case R10: UPT_R10(regs) = __upt_val; break; \
case R11: UPT_R11(regs) = val; break; \ case R11: UPT_R11(regs) = __upt_val; break; \
case R12: UPT_R12(regs) = val; break; \ case R12: UPT_R12(regs) = __upt_val; break; \
case R13: UPT_R13(regs) = val; break; \ case R13: UPT_R13(regs) = __upt_val; break; \
case R14: UPT_R14(regs) = val; break; \ case R14: UPT_R14(regs) = __upt_val; break; \
case R15: UPT_R15(regs) = val; break; \ case R15: UPT_R15(regs) = __upt_val; break; \
case RIP: UPT_IP(regs) = val; break; \ case RIP: UPT_IP(regs) = __upt_val; break; \
case RSP: UPT_SP(regs) = val; break; \ case RSP: UPT_SP(regs) = __upt_val; break; \
case RAX: UPT_RAX(regs) = val; break; \ case RAX: UPT_RAX(regs) = __upt_val; break; \
case RBX: UPT_RBX(regs) = val; break; \ case RBX: UPT_RBX(regs) = __upt_val; break; \
case RCX: UPT_RCX(regs) = val; break; \ case RCX: UPT_RCX(regs) = __upt_val; break; \
case RDX: UPT_RDX(regs) = val; break; \ case RDX: UPT_RDX(regs) = __upt_val; break; \
case RSI: UPT_RSI(regs) = val; break; \ case RSI: UPT_RSI(regs) = __upt_val; break; \
case RDI: UPT_RDI(regs) = val; break; \ case RDI: UPT_RDI(regs) = __upt_val; break; \
case RBP: UPT_RBP(regs) = val; break; \ case RBP: UPT_RBP(regs) = __upt_val; break; \
case ORIG_RAX: UPT_ORIG_RAX(regs) = val; break; \ case ORIG_RAX: UPT_ORIG_RAX(regs) = __upt_val; break; \
case CS: UPT_CS(regs) = val; break; \ case CS: UPT_CS(regs) = __upt_val; break; \
case DS: UPT_DS(regs) = val; break; \ case DS: UPT_DS(regs) = __upt_val; break; \
case ES: UPT_ES(regs) = val; break; \ case ES: UPT_ES(regs) = __upt_val; break; \
case FS: UPT_FS(regs) = val; break; \ case FS: UPT_FS(regs) = __upt_val; break; \
case GS: UPT_GS(regs) = val; break; \ case GS: UPT_GS(regs) = __upt_val; break; \
case EFLAGS: UPT_EFLAGS(regs) = val; break; \ case EFLAGS: UPT_EFLAGS(regs) = __upt_val; break; \
default : \ default : \
panic("Bad register in UPT_SET : %d\n", reg); \ panic("Bad register in UPT_SET : %d\n", reg); \
break; \ break; \
@ -245,14 +246,3 @@ struct syscall_args {
CHOOSE_MODE((&(r)->tt.faultinfo), (&(r)->skas.faultinfo)) CHOOSE_MODE((&(r)->tt.faultinfo), (&(r)->skas.faultinfo))
#endif #endif
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
* adjust the settings for this buffer only. This must remain at the end
* of the file.
* ---------------------------------------------------------------------------
* Local variables:
* c-file-style: "linux"
* End:
*/

View File

@ -14,7 +14,7 @@ obj-y = config.o exec_kern.o exitcode.o \
tlb.o trap_kern.o trap_user.o uaccess_user.o um_arch.o umid.o \ tlb.o trap_kern.o trap_user.o uaccess_user.o um_arch.o umid.o \
user_util.o user_util.o
obj-$(CONFIG_BLK_DEV_INITRD) += initrd_kern.o initrd_user.o obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o
obj-$(CONFIG_GPROF) += gprof_syms.o obj-$(CONFIG_GPROF) += gprof_syms.o
obj-$(CONFIG_GCOV) += gmon_syms.o obj-$(CONFIG_GCOV) += gmon_syms.o
obj-$(CONFIG_TTY_LOG) += tty_log.o obj-$(CONFIG_TTY_LOG) += tty_log.o

78
arch/um/kernel/initrd.c Normal file
View File

@ -0,0 +1,78 @@
/*
* Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
* Licensed under the GPL
*/
#include "linux/init.h"
#include "linux/bootmem.h"
#include "linux/initrd.h"
#include "asm/types.h"
#include "user_util.h"
#include "kern_util.h"
#include "initrd.h"
#include "init.h"
#include "os.h"
/* Changed by uml_initrd_setup, which is a setup */
static char *initrd __initdata = NULL;
static int __init read_initrd(void)
{
void *area;
long long size;
int err;
if(initrd == NULL) return 0;
err = os_file_size(initrd, &size);
if(err) return 0;
area = alloc_bootmem(size);
if(area == NULL) return 0;
if(load_initrd(initrd, area, size) == -1) return 0;
initrd_start = (unsigned long) area;
initrd_end = initrd_start + size;
return 0;
}
__uml_postsetup(read_initrd);
static int __init uml_initrd_setup(char *line, int *add)
{
initrd = line;
return 0;
}
__uml_setup("initrd=", uml_initrd_setup,
"initrd=<initrd image>\n"
" This is used to boot UML from an initrd image. The argument is the\n"
" name of the file containing the image.\n\n"
);
int load_initrd(char *filename, void *buf, int size)
{
int fd, n;
fd = os_open_file(filename, of_read(OPENFLAGS()), 0);
if(fd < 0){
printk("Opening '%s' failed - err = %d\n", filename, -fd);
return(-1);
}
n = os_read_file(fd, buf, size);
if(n != size){
printk("Read of %d bytes from '%s' failed, err = %d\n", size,
filename, -n);
return(-1);
}
os_close_file(fd);
return(0);
}
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
* adjust the settings for this buffer only. This must remain at the end
* of the file.
* ---------------------------------------------------------------------------
* Local variables:
* c-file-style: "linux"
* End:
*/

View File

@ -57,6 +57,7 @@ EXPORT_SYMBOL(copy_to_user_tt);
EXPORT_SYMBOL(strncpy_from_user_skas); EXPORT_SYMBOL(strncpy_from_user_skas);
EXPORT_SYMBOL(copy_to_user_skas); EXPORT_SYMBOL(copy_to_user_skas);
EXPORT_SYMBOL(copy_from_user_skas); EXPORT_SYMBOL(copy_from_user_skas);
EXPORT_SYMBOL(clear_user_skas);
#endif #endif
EXPORT_SYMBOL(uml_strdup); EXPORT_SYMBOL(uml_strdup);

View File

@ -100,12 +100,37 @@ void mem_init(void)
#endif #endif
} }
/*
* Create a page table and place a pointer to it in a middle page
* directory entry.
*/
static void __init one_page_table_init(pmd_t *pmd)
{
if (pmd_none(*pmd)) {
pte_t *pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
set_pmd(pmd, __pmd(_KERNPG_TABLE +
(unsigned long) __pa(pte)));
if (pte != pte_offset_kernel(pmd, 0))
BUG();
}
}
static void __init one_md_table_init(pud_t *pud)
{
#ifdef CONFIG_3_LEVEL_PGTABLES
pmd_t *pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
if (pmd_table != pmd_offset(pud, 0))
BUG();
#endif
}
static void __init fixrange_init(unsigned long start, unsigned long end, static void __init fixrange_init(unsigned long start, unsigned long end,
pgd_t *pgd_base) pgd_t *pgd_base)
{ {
pgd_t *pgd; pgd_t *pgd;
pud_t *pud;
pmd_t *pmd; pmd_t *pmd;
pte_t *pte;
int i, j; int i, j;
unsigned long vaddr; unsigned long vaddr;
@ -115,15 +140,12 @@ static void __init fixrange_init(unsigned long start, unsigned long end,
pgd = pgd_base + i; pgd = pgd_base + i;
for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) { for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
pmd = (pmd_t *)pgd; pud = pud_offset(pgd, vaddr);
if (pud_none(*pud))
one_md_table_init(pud);
pmd = pmd_offset(pud, vaddr);
for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) { for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) {
if (pmd_none(*pmd)) { one_page_table_init(pmd);
pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
set_pmd(pmd, __pmd(_KERNPG_TABLE +
(unsigned long) __pa(pte)));
if (pte != pte_offset_kernel(pmd, 0))
BUG();
}
vaddr += PMD_SIZE; vaddr += PMD_SIZE;
} }
j = 0; j = 0;

View File

@ -28,9 +28,9 @@ static inline void set_singlestepping(struct task_struct *child, int on)
child->thread.singlestep_syscall = 0; child->thread.singlestep_syscall = 0;
#ifdef SUBARCH_SET_SINGLESTEPPING #ifdef SUBARCH_SET_SINGLESTEPPING
SUBARCH_SET_SINGLESTEPPING(child, on) SUBARCH_SET_SINGLESTEPPING(child, on);
#endif #endif
} }
/* /*
* Called by kernel/ptrace.c when detaching.. * Called by kernel/ptrace.c when detaching..
@ -83,7 +83,7 @@ long sys_ptrace(long request, long pid, long addr, long data)
} }
#ifdef SUBACH_PTRACE_SPECIAL #ifdef SUBACH_PTRACE_SPECIAL
SUBARCH_PTRACE_SPECIAL(child,request,addr,data) SUBARCH_PTRACE_SPECIAL(child,request,addr,data);
#endif #endif
ret = ptrace_check_attach(child, request == PTRACE_KILL); ret = ptrace_check_attach(child, request == PTRACE_KILL);

View File

@ -57,10 +57,11 @@ int handle_page_fault(unsigned long address, unsigned long ip,
*code_out = SEGV_ACCERR; *code_out = SEGV_ACCERR;
if(is_write && !(vma->vm_flags & VM_WRITE)) if(is_write && !(vma->vm_flags & VM_WRITE))
goto out; goto out;
if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto out;
page = address & PAGE_MASK; page = address & PAGE_MASK;
pgd = pgd_offset(mm, page);
pud = pud_offset(pgd, page);
pmd = pmd_offset(pud, page);
do { do {
survive: survive:
switch (handle_mm_fault(mm, vma, address, is_write)){ switch (handle_mm_fault(mm, vma, address, is_write)){
@ -106,33 +107,6 @@ out_of_memory:
goto out; goto out;
} }
LIST_HEAD(physmem_remappers);
void register_remapper(struct remapper *info)
{
list_add(&info->list, &physmem_remappers);
}
static int check_remapped_addr(unsigned long address, int is_write)
{
struct remapper *remapper;
struct list_head *ele;
__u64 offset;
int fd;
fd = phys_mapping(__pa(address), &offset);
if(fd == -1)
return(0);
list_for_each(ele, &physmem_remappers){
remapper = list_entry(ele, struct remapper, list);
if((*remapper->proc)(fd, address, is_write, offset))
return(1);
}
return(0);
}
/* /*
* We give a *copy* of the faultinfo in the regs to segv. * We give a *copy* of the faultinfo in the regs to segv.
* This must be done, since nesting SEGVs could overwrite * This must be done, since nesting SEGVs could overwrite
@ -151,8 +125,6 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, void *sc)
flush_tlb_kernel_vm(); flush_tlb_kernel_vm();
return(0); return(0);
} }
else if(check_remapped_addr(address & PAGE_MASK, is_write))
return(0);
else if(current->mm == NULL) else if(current->mm == NULL)
panic("Segfault with no mm"); panic("Segfault with no mm");
err = handle_page_fault(address, ip, is_write, is_user, &si.si_code); err = handle_page_fault(address, ip, is_write, is_user, &si.si_code);

View File

@ -12,6 +12,7 @@ EXPORT_SYMBOL(__do_copy_to_user);
EXPORT_SYMBOL(__do_strncpy_from_user); EXPORT_SYMBOL(__do_strncpy_from_user);
EXPORT_SYMBOL(__do_strnlen_user); EXPORT_SYMBOL(__do_strnlen_user);
EXPORT_SYMBOL(__do_clear_user); EXPORT_SYMBOL(__do_clear_user);
EXPORT_SYMBOL(clear_user_tt);
EXPORT_SYMBOL(tracing_pid); EXPORT_SYMBOL(tracing_pid);
EXPORT_SYMBOL(honeypot); EXPORT_SYMBOL(honeypot);

View File

@ -73,6 +73,8 @@ SECTIONS
.got : { *(.got.plt) *(.got) } .got : { *(.got.plt) *(.got) }
.dynamic : { *(.dynamic) } .dynamic : { *(.dynamic) }
.tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
.tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
/* We want the small data sections together, so single-instruction offsets /* We want the small data sections together, so single-instruction offsets
can access them all, and initialized data all before uninitialized, so can access them all, and initialized data all before uninitialized, so
we can shorten the on-disk segment size. */ we can shorten the on-disk segment size. */

View File

@ -9,11 +9,11 @@ USER_OBJS := bugs.o ptrace_user.o sigcontext.o fault.o
SYMLINKS = bitops.c semaphore.c highmem.c module.c SYMLINKS = bitops.c semaphore.c highmem.c module.c
include arch/um/scripts/Makefile.rules
bitops.c-dir = lib bitops.c-dir = lib
semaphore.c-dir = kernel semaphore.c-dir = kernel
highmem.c-dir = mm highmem.c-dir = mm
module.c-dir = kernel module.c-dir = kernel
subdir- := util subdir- := util
include arch/um/scripts/Makefile.rules

View File

@ -1,5 +1,7 @@
#include "linux/delay.h" #include <linux/module.h>
#include "asm/param.h" #include <linux/kernel.h>
#include <linux/delay.h>
#include <asm/param.h>
void __delay(unsigned long time) void __delay(unsigned long time)
{ {
@ -20,13 +22,19 @@ void __udelay(unsigned long usecs)
int i, n; int i, n;
n = (loops_per_jiffy * HZ * usecs) / MILLION; n = (loops_per_jiffy * HZ * usecs) / MILLION;
for(i=0;i<n;i++) ; for(i=0;i<n;i++)
cpu_relax();
} }
EXPORT_SYMBOL(__udelay);
void __const_udelay(unsigned long usecs) void __const_udelay(unsigned long usecs)
{ {
int i, n; int i, n;
n = (loops_per_jiffy * HZ * usecs) / MILLION; n = (loops_per_jiffy * HZ * usecs) / MILLION;
for(i=0;i<n;i++) ; for(i=0;i<n;i++)
cpu_relax();
} }
EXPORT_SYMBOL(__const_udelay);

View File

@ -14,11 +14,11 @@ obj-$(CONFIG_MODULES) += module.o um_module.o
USER_OBJS := ptrace_user.o sigcontext.o USER_OBJS := ptrace_user.o sigcontext.o
include arch/um/scripts/Makefile.rules
SYMLINKS = bitops.c csum-copy.S csum-partial.c csum-wrappers.c memcpy.S \ SYMLINKS = bitops.c csum-copy.S csum-partial.c csum-wrappers.c memcpy.S \
semaphore.c thunk.S module.c semaphore.c thunk.S module.c
include arch/um/scripts/Makefile.rules
bitops.c-dir = lib bitops.c-dir = lib
csum-copy.S-dir = lib csum-copy.S-dir = lib
csum-partial.c-dir = lib csum-partial.c-dir = lib
@ -28,6 +28,4 @@ semaphore.c-dir = kernel
thunk.S-dir = lib thunk.S-dir = lib
module.c-dir = kernel module.c-dir = kernel
CFLAGS_csum-partial.o := -Dcsum_partial=arch_csum_partial
subdir- := util subdir- := util

View File

@ -5,40 +5,37 @@
* Licensed under the GPL * Licensed under the GPL
*/ */
#include "linux/delay.h" #include <linux/module.h>
#include "asm/processor.h" #include <linux/delay.h>
#include "asm/param.h" #include <asm/processor.h>
#include <asm/param.h>
void __delay(unsigned long loops) void __delay(unsigned long loops)
{ {
unsigned long i; unsigned long i;
for(i = 0; i < loops; i++) ; for(i = 0; i < loops; i++)
cpu_relax();
} }
void __udelay(unsigned long usecs) void __udelay(unsigned long usecs)
{ {
int i, n; unsigned long i, n;
n = (loops_per_jiffy * HZ * usecs) / MILLION; n = (loops_per_jiffy * HZ * usecs) / MILLION;
for(i=0;i<n;i++) ; for(i=0;i<n;i++)
cpu_relax();
} }
EXPORT_SYMBOL(__udelay);
void __const_udelay(unsigned long usecs) void __const_udelay(unsigned long usecs)
{ {
int i, n; unsigned long i, n;
n = (loops_per_jiffy * HZ * usecs) / MILLION; n = (loops_per_jiffy * HZ * usecs) / MILLION;
for(i=0;i<n;i++) ; for(i=0;i<n;i++)
cpu_relax();
} }
/* EXPORT_SYMBOL(__const_udelay);
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
* adjust the settings for this buffer only. This must remain at the end
* of the file.
* ---------------------------------------------------------------------------
* Local variables:
* c-file-style: "linux"
* End:
*/

View File

@ -16,5 +16,4 @@ EXPORT_SYMBOL(__up_wakeup);
EXPORT_SYMBOL(__memcpy); EXPORT_SYMBOL(__memcpy);
/* Networking helper routines. */ /* Networking helper routines. */
/*EXPORT_SYMBOL(csum_partial_copy_from); EXPORT_SYMBOL(ip_compute_csum);
EXPORT_SYMBOL(csum_partial_copy_to);*/

View File

@ -5,10 +5,11 @@
*/ */
#define __FRAME_OFFSETS #define __FRAME_OFFSETS
#include "asm/ptrace.h" #include <asm/ptrace.h>
#include "linux/sched.h" #include <linux/sched.h>
#include "linux/errno.h" #include <linux/errno.h>
#include "asm/elf.h" #include <asm/uaccess.h>
#include <asm/elf.h>
/* XXX x86_64 */ /* XXX x86_64 */
unsigned long not_ss; unsigned long not_ss;

View File

@ -15,6 +15,7 @@
#include "asm/unistd.h" #include "asm/unistd.h"
#include "asm/prctl.h" /* XXX This should get the constants from libc */ #include "asm/prctl.h" /* XXX This should get the constants from libc */
#include "choose-mode.h" #include "choose-mode.h"
#include "kern.h"
asmlinkage long sys_uname64(struct new_utsname __user * name) asmlinkage long sys_uname64(struct new_utsname __user * name)
{ {

View File

@ -3,6 +3,14 @@
#include <signal.h> #include <signal.h>
#define __FRAME_OFFSETS #define __FRAME_OFFSETS
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/types.h>
/* For some reason, x86_64 defines u64 and u32 only in <pci/types.h>, which I
* refuse to include here, even though they're used throughout the headers.
* These are used in asm/user.h, and that include can't be avoided because of
* the sizeof(struct user_regs_struct) below.
*/
typedef __u64 u64;
typedef __u32 u32;
#include <asm/user.h> #include <asm/user.h>
#define DEFINE(sym, val) \ #define DEFINE(sym, val) \

View File

@ -42,6 +42,8 @@
int sis_apic_bug; /* not actually supported, dummy for compile */ int sis_apic_bug; /* not actually supported, dummy for compile */
static int no_timer_check;
static DEFINE_SPINLOCK(ioapic_lock); static DEFINE_SPINLOCK(ioapic_lock);
/* /*
@ -1601,7 +1603,7 @@ static inline void check_timer(void)
* Ok, does IRQ0 through the IOAPIC work? * Ok, does IRQ0 through the IOAPIC work?
*/ */
unmask_IO_APIC_irq(0); unmask_IO_APIC_irq(0);
if (timer_irq_works()) { if (!no_timer_check && timer_irq_works()) {
nmi_watchdog_default(); nmi_watchdog_default();
if (nmi_watchdog == NMI_IO_APIC) { if (nmi_watchdog == NMI_IO_APIC) {
disable_8259A_irq(0); disable_8259A_irq(0);
@ -1671,6 +1673,13 @@ static inline void check_timer(void)
panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n"); panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n");
} }
static int __init notimercheck(char *s)
{
no_timer_check = 1;
return 1;
}
__setup("no_timer_check", notimercheck);
/* /*
* *
* IRQ's that are handled by the PIC in the MPS IOAPIC case. * IRQ's that are handled by the PIC in the MPS IOAPIC case.

View File

@ -380,7 +380,7 @@ asmlinkage long sys_ptrace(long request, long pid, unsigned long addr, long data
break; break;
switch (addr) { switch (addr) {
case 0 ... sizeof(struct user_regs_struct): case 0 ... sizeof(struct user_regs_struct) - sizeof(long):
tmp = getreg(child, addr); tmp = getreg(child, addr);
break; break;
case offsetof(struct user, u_debugreg[0]): case offsetof(struct user, u_debugreg[0]):
@ -425,7 +425,7 @@ asmlinkage long sys_ptrace(long request, long pid, unsigned long addr, long data
break; break;
switch (addr) { switch (addr) {
case 0 ... sizeof(struct user_regs_struct): case 0 ... sizeof(struct user_regs_struct) - sizeof(long):
ret = putreg(child, addr, data); ret = putreg(child, addr, data);
break; break;
/* Disallows to set a breakpoint into the vsyscall */ /* Disallows to set a breakpoint into the vsyscall */

View File

@ -719,7 +719,6 @@ static void __init display_cacheinfo(struct cpuinfo_x86 *c)
} }
} }
#ifdef CONFIG_SMP
/* /*
* On a AMD dual core setup the lower bits of the APIC id distingush the cores. * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
* Assumes number of cores is a power of two. * Assumes number of cores is a power of two.
@ -729,16 +728,24 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
int cpu = smp_processor_id(); int cpu = smp_processor_id();
int node = 0; int node = 0;
unsigned bits;
if (c->x86_num_cores == 1) if (c->x86_num_cores == 1)
return; return;
/* Fix up the APIC ID following the AMD specification. */
cpu_core_id[cpu] >>= hweight32(c->x86_num_cores - 1); bits = 0;
while ((1 << bits) < c->x86_num_cores)
bits++;
/* Low order bits define the core id (index of core in socket) */
cpu_core_id[cpu] = phys_proc_id[cpu] & ((1 << bits)-1);
/* Convert the APIC ID into the socket ID */
phys_proc_id[cpu] >>= bits;
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
/* When an ACPI SRAT table is available use the mappings from SRAT /* When an ACPI SRAT table is available use the mappings from SRAT
instead. */ instead. */
if (acpi_numa <= 0) { if (acpi_numa <= 0) {
node = cpu_core_id[cpu]; node = phys_proc_id[cpu];
if (!node_online(node)) if (!node_online(node))
node = first_node(node_online_map); node = first_node(node_online_map);
cpu_to_node[cpu] = node; cpu_to_node[cpu] = node;
@ -746,18 +753,11 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
node = cpu_to_node[cpu]; node = cpu_to_node[cpu];
} }
#endif #endif
/* For now: - better than BAD_APIC_ID at least*/
phys_proc_id[cpu] = cpu_core_id[cpu];
printk(KERN_INFO "CPU %d(%d) -> Node %d -> Core %d\n", printk(KERN_INFO "CPU %d(%d) -> Node %d -> Core %d\n",
cpu, c->x86_num_cores, node, cpu_core_id[cpu]); cpu, c->x86_num_cores, node, cpu_core_id[cpu]);
#endif #endif
} }
#else
static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
{
}
#endif
static int __init init_amd(struct cpuinfo_x86 *c) static int __init init_amd(struct cpuinfo_x86 *c)
{ {
@ -963,8 +963,7 @@ void __init early_identify_cpu(struct cpuinfo_x86 *c)
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
phys_proc_id[smp_processor_id()] = phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
cpu_core_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
#endif #endif
} }

View File

@ -452,7 +452,9 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
regs->rip -= 2; regs->rip -= 2;
} }
if (regs->rax == (unsigned long)-ERESTART_RESTARTBLOCK) { if (regs->rax == (unsigned long)-ERESTART_RESTARTBLOCK) {
regs->rax = __NR_restart_syscall; regs->rax = test_thread_flag(TIF_IA32) ?
__NR_ia32_restart_syscall :
__NR_restart_syscall;
regs->rip -= 2; regs->rip -= 2;
} }
} }

View File

@ -94,6 +94,7 @@ int smp_threads_ready;
cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned; cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
EXPORT_SYMBOL(cpu_core_map);
/* /*
* Trampoline 80x86 program as an array. * Trampoline 80x86 program as an array.

View File

@ -133,7 +133,7 @@ ioremap_change_attr(unsigned long phys_addr, unsigned long size,
unsigned long flags) unsigned long flags)
{ {
int err = 0; int err = 0;
if (flags && phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) { if (phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) {
unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
unsigned long vaddr = (unsigned long) __va(phys_addr); unsigned long vaddr = (unsigned long) __va(phys_addr);
@ -214,7 +214,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr)); remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
return NULL; return NULL;
} }
if (ioremap_change_attr(phys_addr, size, flags) < 0) { if (flags && ioremap_change_attr(phys_addr, size, flags) < 0) {
area->flags &= 0xffffff; area->flags &= 0xffffff;
vunmap(addr); vunmap(addr);
return NULL; return NULL;
@ -251,7 +251,7 @@ void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
void iounmap(volatile void __iomem *addr) void iounmap(volatile void __iomem *addr)
{ {
struct vm_struct *p, **pprev; struct vm_struct *p;
if (addr <= high_memory) if (addr <= high_memory)
return; return;
@ -260,24 +260,11 @@ void iounmap(volatile void __iomem *addr)
return; return;
write_lock(&vmlist_lock); write_lock(&vmlist_lock);
for (p = vmlist, pprev = &vmlist; p != NULL; pprev = &p->next, p = *pprev) p = __remove_vm_area((void *)((unsigned long)addr & PAGE_MASK));
if (p->addr == (void *)(PAGE_MASK & (unsigned long)addr)) if (!p)
break; printk("iounmap: bad address %p\n", addr);
if (!p) { else if (p->flags >> 20)
printk("__iounmap: bad address %p\n", addr); ioremap_change_attr(p->phys_addr, p->size, 0);
goto out_unlock;
}
*pprev = p->next;
unmap_vm_area(p);
if ((p->flags >> 20) &&
p->phys_addr + p->size - 1 < virt_to_phys(high_memory)) {
/* p->size includes the guard page, but cpa doesn't like that */
change_page_attr_addr((unsigned long)__va(p->phys_addr),
p->size >> PAGE_SHIFT,
PAGE_KERNEL);
global_flush_tlb();
}
out_unlock:
write_unlock(&vmlist_lock); write_unlock(&vmlist_lock);
kfree(p); kfree(p);
} }

View File

@ -2021,7 +2021,13 @@ static int pkt_open(struct inode *inode, struct file *file)
BUG_ON(pd->refcnt < 0); BUG_ON(pd->refcnt < 0);
pd->refcnt++; pd->refcnt++;
if (pd->refcnt == 1) { if (pd->refcnt > 1) {
if ((file->f_mode & FMODE_WRITE) &&
!test_bit(PACKET_WRITABLE, &pd->flags)) {
ret = -EBUSY;
goto out_dec;
}
} else {
if (pkt_open_dev(pd, file->f_mode & FMODE_WRITE)) { if (pkt_open_dev(pd, file->f_mode & FMODE_WRITE)) {
ret = -EIO; ret = -EIO;
goto out_dec; goto out_dec;

View File

@ -44,6 +44,7 @@
#include <linux/ipmi.h> #include <linux/ipmi.h>
#include <asm/semaphore.h> #include <asm/semaphore.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/device.h>
#define IPMI_DEVINTF_VERSION "v33" #define IPMI_DEVINTF_VERSION "v33"
@ -519,15 +520,21 @@ MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device. By"
" interface. Other values will set the major device number" " interface. Other values will set the major device number"
" to that value."); " to that value.");
static struct class *ipmi_class;
static void ipmi_new_smi(int if_num) static void ipmi_new_smi(int if_num)
{ {
devfs_mk_cdev(MKDEV(ipmi_major, if_num), dev_t dev = MKDEV(ipmi_major, if_num);
S_IFCHR | S_IRUSR | S_IWUSR,
devfs_mk_cdev(dev, S_IFCHR | S_IRUSR | S_IWUSR,
"ipmidev/%d", if_num); "ipmidev/%d", if_num);
class_simple_device_add(ipmi_class, dev, NULL, "ipmi%d", if_num);
} }
static void ipmi_smi_gone(int if_num) static void ipmi_smi_gone(int if_num)
{ {
class_simple_device_remove(ipmi_class, MKDEV(ipmi_major, if_num));
devfs_remove("ipmidev/%d", if_num); devfs_remove("ipmidev/%d", if_num);
} }
@ -548,8 +555,15 @@ static __init int init_ipmi_devintf(void)
printk(KERN_INFO "ipmi device interface version " printk(KERN_INFO "ipmi device interface version "
IPMI_DEVINTF_VERSION "\n"); IPMI_DEVINTF_VERSION "\n");
ipmi_class = class_simple_create(THIS_MODULE, "ipmi");
if (IS_ERR(ipmi_class)) {
printk(KERN_ERR "ipmi: can't register device class\n");
return PTR_ERR(ipmi_class);
}
rv = register_chrdev(ipmi_major, DEVICE_NAME, &ipmi_fops); rv = register_chrdev(ipmi_major, DEVICE_NAME, &ipmi_fops);
if (rv < 0) { if (rv < 0) {
class_simple_destroy(ipmi_class);
printk(KERN_ERR "ipmi: can't get major %d\n", ipmi_major); printk(KERN_ERR "ipmi: can't get major %d\n", ipmi_major);
return rv; return rv;
} }
@ -563,6 +577,7 @@ static __init int init_ipmi_devintf(void)
rv = ipmi_smi_watcher_register(&smi_watcher); rv = ipmi_smi_watcher_register(&smi_watcher);
if (rv) { if (rv) {
unregister_chrdev(ipmi_major, DEVICE_NAME); unregister_chrdev(ipmi_major, DEVICE_NAME);
class_simple_destroy(ipmi_class);
printk(KERN_WARNING "ipmi: can't register smi watcher\n"); printk(KERN_WARNING "ipmi: can't register smi watcher\n");
return rv; return rv;
} }
@ -573,6 +588,7 @@ module_init(init_ipmi_devintf);
static __exit void cleanup_ipmi(void) static __exit void cleanup_ipmi(void)
{ {
class_simple_destroy(ipmi_class);
ipmi_smi_watcher_unregister(&smi_watcher); ipmi_smi_watcher_unregister(&smi_watcher);
devfs_remove(DEVICE_NAME); devfs_remove(DEVICE_NAME);
unregister_chrdev(ipmi_major, DEVICE_NAME); unregister_chrdev(ipmi_major, DEVICE_NAME);

View File

@ -516,6 +516,11 @@ create_iface(struct device_node *np, struct device *dev)
u32 *psteps, *prate; u32 *psteps, *prate;
int rc; int rc;
if (np->n_intrs < 1 || np->n_addrs < 1) {
printk(KERN_ERR "%s: Missing interrupt or address !\n",
np->full_name);
return -ENODEV;
}
if (pmac_low_i2c_lock(np)) if (pmac_low_i2c_lock(np))
return -ENODEV; return -ENODEV;

View File

@ -383,7 +383,10 @@ static int mmc_blk_probe(struct mmc_card *card)
struct mmc_blk_data *md; struct mmc_blk_data *md;
int err; int err;
if (card->csd.cmdclass & ~0x1ff) /*
* Check that the card supports the command class(es) we need.
*/
if (!(card->csd.cmdclass & CCC_BLOCK_READ))
return -ENODEV; return -ENODEV;
if (card->csd.read_blkbits < 9) { if (card->csd.read_blkbits < 9) {

View File

@ -420,7 +420,8 @@ static void tg3_enable_ints(struct tg3 *tp)
{ {
tw32(TG3PCI_MISC_HOST_CTRL, tw32(TG3PCI_MISC_HOST_CTRL,
(tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000); tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
(tp->last_tag << 24));
tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
tg3_cond_int(tp); tg3_cond_int(tp);
@ -455,10 +456,16 @@ static void tg3_restart_ints(struct tg3 *tp)
{ {
tw32(TG3PCI_MISC_HOST_CTRL, tw32(TG3PCI_MISC_HOST_CTRL,
(tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000); tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
tp->last_tag << 24);
mmiowb(); mmiowb();
if (tg3_has_work(tp)) /* When doing tagged status, this work check is unnecessary.
* The last_tag we write above tells the chip which piece of
* work we've completed.
*/
if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
tg3_has_work(tp))
tw32(HOSTCC_MODE, tp->coalesce_mode | tw32(HOSTCC_MODE, tp->coalesce_mode |
(HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
} }
@ -2500,7 +2507,7 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
if (netif_carrier_ok(tp->dev)) { if (netif_carrier_ok(tp->dev)) {
tw32(HOSTCC_STAT_COAL_TICKS, tw32(HOSTCC_STAT_COAL_TICKS,
DEFAULT_STAT_COAL_TICKS); tp->coal.stats_block_coalesce_usecs);
} else { } else {
tw32(HOSTCC_STAT_COAL_TICKS, 0); tw32(HOSTCC_STAT_COAL_TICKS, 0);
} }
@ -2886,7 +2893,6 @@ static int tg3_poll(struct net_device *netdev, int *budget)
* All RX "locking" is done by ensuring outside * All RX "locking" is done by ensuring outside
* code synchronizes with dev->poll() * code synchronizes with dev->poll()
*/ */
done = 1;
if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) { if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
int orig_budget = *budget; int orig_budget = *budget;
int work_done; int work_done;
@ -2898,12 +2904,14 @@ static int tg3_poll(struct net_device *netdev, int *budget)
*budget -= work_done; *budget -= work_done;
netdev->quota -= work_done; netdev->quota -= work_done;
if (work_done >= orig_budget)
done = 0;
} }
if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
tp->last_tag = sblk->status_tag;
rmb();
/* if no more work, tell net stack and NIC we're done */ /* if no more work, tell net stack and NIC we're done */
done = !tg3_has_work(tp);
if (done) { if (done) {
spin_lock_irqsave(&tp->lock, flags); spin_lock_irqsave(&tp->lock, flags);
__netif_rx_complete(netdev); __netif_rx_complete(netdev);
@ -2928,22 +2936,21 @@ static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
spin_lock_irqsave(&tp->lock, flags); spin_lock_irqsave(&tp->lock, flags);
/* /*
* writing any value to intr-mbox-0 clears PCI INTA# and * Writing any value to intr-mbox-0 clears PCI INTA# and
* chip-internal interrupt pending events. * chip-internal interrupt pending events.
* writing non-zero to intr-mbox-0 additional tells the * Writing non-zero to intr-mbox-0 additional tells the
* NIC to stop sending us irqs, engaging "in-intr-handler" * NIC to stop sending us irqs, engaging "in-intr-handler"
* event coalescing. * event coalescing.
*/ */
tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
tp->last_tag = sblk->status_tag;
sblk->status &= ~SD_STATUS_UPDATED; sblk->status &= ~SD_STATUS_UPDATED;
if (likely(tg3_has_work(tp))) if (likely(tg3_has_work(tp)))
netif_rx_schedule(dev); /* schedule NAPI poll */ netif_rx_schedule(dev); /* schedule NAPI poll */
else { else {
/* no work, re-enable interrupts /* No work, re-enable interrupts. */
*/
tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
0x00000000); tp->last_tag << 24);
} }
spin_unlock_irqrestore(&tp->lock, flags); spin_unlock_irqrestore(&tp->lock, flags);
@ -2961,6 +2968,52 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
spin_lock_irqsave(&tp->lock, flags); spin_lock_irqsave(&tp->lock, flags);
/* In INTx mode, it is possible for the interrupt to arrive at
* the CPU before the status block posted prior to the interrupt.
* Reading the PCI State register will confirm whether the
* interrupt is ours and will flush the status block.
*/
if ((sblk->status & SD_STATUS_UPDATED) ||
!(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
/*
* Writing any value to intr-mbox-0 clears PCI INTA# and
* chip-internal interrupt pending events.
* Writing non-zero to intr-mbox-0 additional tells the
* NIC to stop sending us irqs, engaging "in-intr-handler"
* event coalescing.
*/
tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
0x00000001);
sblk->status &= ~SD_STATUS_UPDATED;
if (likely(tg3_has_work(tp)))
netif_rx_schedule(dev); /* schedule NAPI poll */
else {
/* No work, shared interrupt perhaps? re-enable
* interrupts, and flush that PCI write
*/
tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
0x00000000);
tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
}
} else { /* shared interrupt */
handled = 0;
}
spin_unlock_irqrestore(&tp->lock, flags);
return IRQ_RETVAL(handled);
}
static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
{
struct net_device *dev = dev_id;
struct tg3 *tp = netdev_priv(dev);
struct tg3_hw_status *sblk = tp->hw_status;
unsigned long flags;
unsigned int handled = 1;
spin_lock_irqsave(&tp->lock, flags);
/* In INTx mode, it is possible for the interrupt to arrive at /* In INTx mode, it is possible for the interrupt to arrive at
* the CPU before the status block posted prior to the interrupt. * the CPU before the status block posted prior to the interrupt.
* Reading the PCI State register will confirm whether the * Reading the PCI State register will confirm whether the
@ -2977,13 +3030,8 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
*/ */
tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
0x00000001); 0x00000001);
/* tp->last_tag = sblk->status_tag;
* Flush PCI write. This also guarantees that our
* status block has been flushed to host memory.
*/
tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
sblk->status &= ~SD_STATUS_UPDATED; sblk->status &= ~SD_STATUS_UPDATED;
if (likely(tg3_has_work(tp))) if (likely(tg3_has_work(tp)))
netif_rx_schedule(dev); /* schedule NAPI poll */ netif_rx_schedule(dev); /* schedule NAPI poll */
else { else {
@ -2991,7 +3039,7 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
* interrupts, and flush that PCI write * interrupts, and flush that PCI write
*/ */
tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
0x00000000); tp->last_tag << 24);
tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
} }
} else { /* shared interrupt */ } else { /* shared interrupt */
@ -5044,6 +5092,27 @@ static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
} }
static void __tg3_set_rx_mode(struct net_device *); static void __tg3_set_rx_mode(struct net_device *);
static void tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
{
tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
}
tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
u32 val = ec->stats_block_coalesce_usecs;
if (!netif_carrier_ok(tp->dev))
val = 0;
tw32(HOSTCC_STAT_COAL_TICKS, val);
}
}
/* tp->lock is held. */ /* tp->lock is held. */
static int tg3_reset_hw(struct tg3 *tp) static int tg3_reset_hw(struct tg3 *tp)
@ -5366,16 +5435,7 @@ static int tg3_reset_hw(struct tg3 *tp)
udelay(10); udelay(10);
} }
tw32(HOSTCC_RXCOL_TICKS, 0); tg3_set_coalesce(tp, &tp->coal);
tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
tw32(HOSTCC_RXMAX_FRAMES, 1);
tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
tw32(HOSTCC_RXCOAL_TICK_INT, 0);
tw32(HOSTCC_TXCOAL_TICK_INT, 0);
}
tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
/* set status block DMA address */ /* set status block DMA address */
tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
@ -5388,8 +5448,6 @@ static int tg3_reset_hw(struct tg3 *tp)
* the tg3_periodic_fetch_stats call there, and * the tg3_periodic_fetch_stats call there, and
* tg3_get_stats to see how this works for 5705/5750 chips. * tg3_get_stats to see how this works for 5705/5750 chips.
*/ */
tw32(HOSTCC_STAT_COAL_TICKS,
DEFAULT_STAT_COAL_TICKS);
tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
((u64) tp->stats_mapping >> 32)); ((u64) tp->stats_mapping >> 32));
tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
@ -5445,7 +5503,8 @@ static int tg3_reset_hw(struct tg3 *tp)
udelay(100); udelay(100);
tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0); tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
tr32(MAILBOX_INTERRUPT_0); tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
tp->last_tag = 0;
if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
@ -5723,31 +5782,33 @@ static void tg3_timer(unsigned long __opaque)
spin_lock_irqsave(&tp->lock, flags); spin_lock_irqsave(&tp->lock, flags);
spin_lock(&tp->tx_lock); spin_lock(&tp->tx_lock);
/* All of this garbage is because when using non-tagged if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
* IRQ status the mailbox/status_block protocol the chip /* All of this garbage is because when using non-tagged
* uses with the cpu is race prone. * IRQ status the mailbox/status_block protocol the chip
*/ * uses with the cpu is race prone.
if (tp->hw_status->status & SD_STATUS_UPDATED) { */
tw32(GRC_LOCAL_CTRL, if (tp->hw_status->status & SD_STATUS_UPDATED) {
tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); tw32(GRC_LOCAL_CTRL,
} else { tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
tw32(HOSTCC_MODE, tp->coalesce_mode | } else {
(HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); tw32(HOSTCC_MODE, tp->coalesce_mode |
} (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
}
if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER; tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
spin_unlock(&tp->tx_lock); spin_unlock(&tp->tx_lock);
spin_unlock_irqrestore(&tp->lock, flags); spin_unlock_irqrestore(&tp->lock, flags);
schedule_work(&tp->reset_task); schedule_work(&tp->reset_task);
return; return;
}
} }
if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
tg3_periodic_fetch_stats(tp);
/* This part only runs once per second. */ /* This part only runs once per second. */
if (!--tp->timer_counter) { if (!--tp->timer_counter) {
if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
tg3_periodic_fetch_stats(tp);
if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
u32 mac_stat; u32 mac_stat;
int phy_event; int phy_event;
@ -5846,9 +5907,13 @@ static int tg3_test_interrupt(struct tg3 *tp)
if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
err = request_irq(tp->pdev->irq, tg3_msi, err = request_irq(tp->pdev->irq, tg3_msi,
SA_SAMPLE_RANDOM, dev->name, dev); SA_SAMPLE_RANDOM, dev->name, dev);
else else {
err = request_irq(tp->pdev->irq, tg3_interrupt, irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
fn = tg3_interrupt_tagged;
err = request_irq(tp->pdev->irq, fn,
SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
}
if (err) if (err)
return err; return err;
@ -5900,9 +5965,14 @@ static int tg3_test_msi(struct tg3 *tp)
tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
err = request_irq(tp->pdev->irq, tg3_interrupt, {
SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
fn = tg3_interrupt_tagged;
err = request_irq(tp->pdev->irq, fn,
SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
}
if (err) if (err)
return err; return err;
@ -5948,7 +6018,13 @@ static int tg3_open(struct net_device *dev)
if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
(GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) && (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
(GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) { (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
if (pci_enable_msi(tp->pdev) == 0) { /* All MSI supporting chips should support tagged
* status. Assert that this is the case.
*/
if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
"Not using MSI.\n", tp->dev->name);
} else if (pci_enable_msi(tp->pdev) == 0) {
u32 msi_mode; u32 msi_mode;
msi_mode = tr32(MSGINT_MODE); msi_mode = tr32(MSGINT_MODE);
@ -5959,9 +6035,14 @@ static int tg3_open(struct net_device *dev)
if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
err = request_irq(tp->pdev->irq, tg3_msi, err = request_irq(tp->pdev->irq, tg3_msi,
SA_SAMPLE_RANDOM, dev->name, dev); SA_SAMPLE_RANDOM, dev->name, dev);
else else {
err = request_irq(tp->pdev->irq, tg3_interrupt, irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
fn = tg3_interrupt_tagged;
err = request_irq(tp->pdev->irq, fn,
SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
}
if (err) { if (err) {
if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
@ -5980,9 +6061,16 @@ static int tg3_open(struct net_device *dev)
tg3_halt(tp, 1); tg3_halt(tp, 1);
tg3_free_rings(tp); tg3_free_rings(tp);
} else { } else {
tp->timer_offset = HZ / 10; if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
tp->timer_counter = tp->timer_multiplier = 10; tp->timer_offset = HZ;
tp->asf_counter = tp->asf_multiplier = (10 * 120); else
tp->timer_offset = HZ / 10;
BUG_ON(tp->timer_offset > HZ);
tp->timer_counter = tp->timer_multiplier =
(HZ / tp->timer_offset);
tp->asf_counter = tp->asf_multiplier =
((HZ / tp->timer_offset) * 120);
init_timer(&tp->timer); init_timer(&tp->timer);
tp->timer.expires = jiffies + tp->timer_offset; tp->timer.expires = jiffies + tp->timer_offset;
@ -6005,6 +6093,7 @@ static int tg3_open(struct net_device *dev)
if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
err = tg3_test_msi(tp); err = tg3_test_msi(tp);
if (err) { if (err) {
spin_lock_irq(&tp->lock); spin_lock_irq(&tp->lock);
spin_lock(&tp->tx_lock); spin_lock(&tp->tx_lock);
@ -7203,6 +7292,14 @@ static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
} }
#endif #endif
static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
{
struct tg3 *tp = netdev_priv(dev);
memcpy(ec, &tp->coal, sizeof(*ec));
return 0;
}
static struct ethtool_ops tg3_ethtool_ops = { static struct ethtool_ops tg3_ethtool_ops = {
.get_settings = tg3_get_settings, .get_settings = tg3_get_settings,
.set_settings = tg3_set_settings, .set_settings = tg3_set_settings,
@ -7235,6 +7332,7 @@ static struct ethtool_ops tg3_ethtool_ops = {
.get_strings = tg3_get_strings, .get_strings = tg3_get_strings,
.get_stats_count = tg3_get_stats_count, .get_stats_count = tg3_get_stats_count,
.get_ethtool_stats = tg3_get_ethtool_stats, .get_ethtool_stats = tg3_get_ethtool_stats,
.get_coalesce = tg3_get_coalesce,
}; };
static void __devinit tg3_get_eeprom_size(struct tg3 *tp) static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
@ -8422,15 +8520,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
/* Only 5701 and later support tagged irq status mode.
* Also, 5788 chips cannot use tagged irq status.
*
* However, since we are using NAPI avoid tagged irq status
* because the interrupt condition is more difficult to
* fully clear in that mode.
*/
tp->coalesce_mode = 0; tp->coalesce_mode = 0;
if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX && if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
tp->coalesce_mode |= HOSTCC_MODE_32BYTE; tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
@ -8494,6 +8584,18 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
tp->tg3_flags2 |= TG3_FLG2_IS_5788; tp->tg3_flags2 |= TG3_FLG2_IS_5788;
if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
(GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
HOSTCC_MODE_CLRTICK_TXBD);
tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
tp->misc_host_ctrl);
}
/* these are limited to 10/100 only */ /* these are limited to 10/100 only */
if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
(grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
@ -8671,6 +8773,146 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
return 0; return 0;
} }
#define BOUNDARY_SINGLE_CACHELINE 1
#define BOUNDARY_MULTI_CACHELINE 2
static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
{
int cacheline_size;
u8 byte;
int goal;
pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
if (byte == 0)
cacheline_size = 1024;
else
cacheline_size = (int) byte * 4;
/* On 5703 and later chips, the boundary bits have no
* effect.
*/
if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
goto out;
#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
goal = BOUNDARY_MULTI_CACHELINE;
#else
#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
goal = BOUNDARY_SINGLE_CACHELINE;
#else
goal = 0;
#endif
#endif
if (!goal)
goto out;
/* PCI controllers on most RISC systems tend to disconnect
* when a device tries to burst across a cache-line boundary.
* Therefore, letting tg3 do so just wastes PCI bandwidth.
*
* Unfortunately, for PCI-E there are only limited
* write-side controls for this, and thus for reads
* we will still get the disconnects. We'll also waste
* these PCI cycles for both read and write for chips
* other than 5700 and 5701 which do not implement the
* boundary bits.
*/
if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
switch (cacheline_size) {
case 16:
case 32:
case 64:
case 128:
if (goal == BOUNDARY_SINGLE_CACHELINE) {
val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
} else {
val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
}
break;
case 256:
val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
break;
default:
val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
break;
};
} else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
switch (cacheline_size) {
case 16:
case 32:
case 64:
if (goal == BOUNDARY_SINGLE_CACHELINE) {
val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
break;
}
/* fallthrough */
case 128:
default:
val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
break;
};
} else {
switch (cacheline_size) {
case 16:
if (goal == BOUNDARY_SINGLE_CACHELINE) {
val |= (DMA_RWCTRL_READ_BNDRY_16 |
DMA_RWCTRL_WRITE_BNDRY_16);
break;
}
/* fallthrough */
case 32:
if (goal == BOUNDARY_SINGLE_CACHELINE) {
val |= (DMA_RWCTRL_READ_BNDRY_32 |
DMA_RWCTRL_WRITE_BNDRY_32);
break;
}
/* fallthrough */
case 64:
if (goal == BOUNDARY_SINGLE_CACHELINE) {
val |= (DMA_RWCTRL_READ_BNDRY_64 |
DMA_RWCTRL_WRITE_BNDRY_64);
break;
}
/* fallthrough */
case 128:
if (goal == BOUNDARY_SINGLE_CACHELINE) {
val |= (DMA_RWCTRL_READ_BNDRY_128 |
DMA_RWCTRL_WRITE_BNDRY_128);
break;
}
/* fallthrough */
case 256:
val |= (DMA_RWCTRL_READ_BNDRY_256 |
DMA_RWCTRL_WRITE_BNDRY_256);
break;
case 512:
val |= (DMA_RWCTRL_READ_BNDRY_512 |
DMA_RWCTRL_WRITE_BNDRY_512);
break;
case 1024:
default:
val |= (DMA_RWCTRL_READ_BNDRY_1024 |
DMA_RWCTRL_WRITE_BNDRY_1024);
break;
};
}
out:
return val;
}
static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device) static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
{ {
struct tg3_internal_buffer_desc test_desc; struct tg3_internal_buffer_desc test_desc;
@ -8757,7 +8999,7 @@ static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dm
static int __devinit tg3_test_dma(struct tg3 *tp) static int __devinit tg3_test_dma(struct tg3 *tp)
{ {
dma_addr_t buf_dma; dma_addr_t buf_dma;
u32 *buf; u32 *buf, saved_dma_rwctrl;
int ret; int ret;
buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
@ -8769,46 +9011,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
(0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
#ifndef CONFIG_X86 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
{
u8 byte;
int cacheline_size;
pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
if (byte == 0)
cacheline_size = 1024;
else
cacheline_size = (int) byte * 4;
switch (cacheline_size) {
case 16:
case 32:
case 64:
case 128:
if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
tp->dma_rwctrl |=
DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
break;
} else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
tp->dma_rwctrl &=
~(DMA_RWCTRL_PCI_WRITE_CMD);
tp->dma_rwctrl |=
DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
break;
}
/* fallthrough */
case 256:
if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
tp->dma_rwctrl |=
DMA_RWCTRL_WRITE_BNDRY_256;
else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
tp->dma_rwctrl |=
DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
};
}
#endif
if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
/* DMA read watermark not used on PCIE */ /* DMA read watermark not used on PCIE */
@ -8827,7 +9030,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
if (ccval == 0x6 || ccval == 0x7) if (ccval == 0x6 || ccval == 0x7)
tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
/* Set bit 23 to renable PCIX hw bug fix */ /* Set bit 23 to enable PCIX hw bug fix */
tp->dma_rwctrl |= 0x009f0000; tp->dma_rwctrl |= 0x009f0000;
} else { } else {
tp->dma_rwctrl |= 0x001b000f; tp->dma_rwctrl |= 0x001b000f;
@ -8868,6 +9071,13 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
goto out; goto out;
/* It is best to perform DMA test with maximum write burst size
* to expose the 5700/5701 write DMA bug.
*/
saved_dma_rwctrl = tp->dma_rwctrl;
tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
while (1) { while (1) {
u32 *p = buf, i; u32 *p = buf, i;
@ -8906,8 +9116,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
if (p[i] == i) if (p[i] == i)
continue; continue;
if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) == if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
DMA_RWCTRL_WRITE_BNDRY_DISAB) { DMA_RWCTRL_WRITE_BNDRY_16) {
tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
break; break;
@ -8924,6 +9135,14 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
break; break;
} }
} }
if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
DMA_RWCTRL_WRITE_BNDRY_16) {
/* DMA test passed without adjusting DMA boundary,
* just restore the calculated DMA boundary
*/
tp->dma_rwctrl = saved_dma_rwctrl;
tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
}
out: out:
pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma); pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
@ -9011,6 +9230,31 @@ static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
return peer; return peer;
} }
static void __devinit tg3_init_coal(struct tg3 *tp)
{
struct ethtool_coalesce *ec = &tp->coal;
memset(ec, 0, sizeof(*ec));
ec->cmd = ETHTOOL_GCOALESCE;
ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
HOSTCC_MODE_CLRTICK_TXBD)) {
ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
}
}
static int __devinit tg3_init_one(struct pci_dev *pdev, static int __devinit tg3_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent) const struct pci_device_id *ent)
{ {
@ -9256,6 +9500,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
/* flow control autonegotiation is default behavior */ /* flow control autonegotiation is default behavior */
tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
tg3_init_coal(tp);
err = register_netdev(dev); err = register_netdev(dev);
if (err) { if (err) {
printk(KERN_ERR PFX "Cannot register net device, " printk(KERN_ERR PFX "Cannot register net device, "
@ -9298,6 +9544,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
(tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0, (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
(tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0, (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0); (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
printk(KERN_INFO "%s: dma_rwctrl[%08x]\n",
dev->name, tp->dma_rwctrl);
return 0; return 0;

View File

@ -876,10 +876,12 @@
#define HOSTCC_STATUS_ERROR_ATTN 0x00000004 #define HOSTCC_STATUS_ERROR_ATTN 0x00000004
#define HOSTCC_RXCOL_TICKS 0x00003c08 #define HOSTCC_RXCOL_TICKS 0x00003c08
#define LOW_RXCOL_TICKS 0x00000032 #define LOW_RXCOL_TICKS 0x00000032
#define LOW_RXCOL_TICKS_CLRTCKS 0x00000014
#define DEFAULT_RXCOL_TICKS 0x00000048 #define DEFAULT_RXCOL_TICKS 0x00000048
#define HIGH_RXCOL_TICKS 0x00000096 #define HIGH_RXCOL_TICKS 0x00000096
#define HOSTCC_TXCOL_TICKS 0x00003c0c #define HOSTCC_TXCOL_TICKS 0x00003c0c
#define LOW_TXCOL_TICKS 0x00000096 #define LOW_TXCOL_TICKS 0x00000096
#define LOW_TXCOL_TICKS_CLRTCKS 0x00000048
#define DEFAULT_TXCOL_TICKS 0x0000012c #define DEFAULT_TXCOL_TICKS 0x0000012c
#define HIGH_TXCOL_TICKS 0x00000145 #define HIGH_TXCOL_TICKS 0x00000145
#define HOSTCC_RXMAX_FRAMES 0x00003c10 #define HOSTCC_RXMAX_FRAMES 0x00003c10
@ -892,8 +894,10 @@
#define HIGH_TXMAX_FRAMES 0x00000052 #define HIGH_TXMAX_FRAMES 0x00000052
#define HOSTCC_RXCOAL_TICK_INT 0x00003c18 #define HOSTCC_RXCOAL_TICK_INT 0x00003c18
#define DEFAULT_RXCOAL_TICK_INT 0x00000019 #define DEFAULT_RXCOAL_TICK_INT 0x00000019
#define DEFAULT_RXCOAL_TICK_INT_CLRTCKS 0x00000014
#define HOSTCC_TXCOAL_TICK_INT 0x00003c1c #define HOSTCC_TXCOAL_TICK_INT 0x00003c1c
#define DEFAULT_TXCOAL_TICK_INT 0x00000019 #define DEFAULT_TXCOAL_TICK_INT 0x00000019
#define DEFAULT_TXCOAL_TICK_INT_CLRTCKS 0x00000014
#define HOSTCC_RXCOAL_MAXF_INT 0x00003c20 #define HOSTCC_RXCOAL_MAXF_INT 0x00003c20
#define DEFAULT_RXCOAL_MAXF_INT 0x00000005 #define DEFAULT_RXCOAL_MAXF_INT 0x00000005
#define HOSTCC_TXCOAL_MAXF_INT 0x00003c24 #define HOSTCC_TXCOAL_MAXF_INT 0x00003c24
@ -2023,6 +2027,7 @@ struct tg3 {
struct tg3_hw_status *hw_status; struct tg3_hw_status *hw_status;
dma_addr_t status_mapping; dma_addr_t status_mapping;
u32 last_tag;
u32 msg_enable; u32 msg_enable;
@ -2068,6 +2073,7 @@ struct tg3 {
u32 rx_offset; u32 rx_offset;
u32 tg3_flags; u32 tg3_flags;
#define TG3_FLAG_TAGGED_STATUS 0x00000001
#define TG3_FLAG_TXD_MBOX_HWBUG 0x00000002 #define TG3_FLAG_TXD_MBOX_HWBUG 0x00000002
#define TG3_FLAG_RX_CHECKSUMS 0x00000004 #define TG3_FLAG_RX_CHECKSUMS 0x00000004
#define TG3_FLAG_USE_LINKCHG_REG 0x00000008 #define TG3_FLAG_USE_LINKCHG_REG 0x00000008
@ -2225,7 +2231,7 @@ struct tg3 {
#define SST_25VF0X0_PAGE_SIZE 4098 #define SST_25VF0X0_PAGE_SIZE 4098
struct ethtool_coalesce coal;
}; };
#endif /* !(_T3_H) */ #endif /* !(_T3_H) */

View File

@ -81,10 +81,6 @@ unsigned char irqs[4] = {
int irqhit=0; int irqhit=0;
#endif #endif
#ifndef MIN
#define MIN(a,b) ((a) < (b) ? (a) : (b))
#endif
static struct tty_driver *aurora_driver; static struct tty_driver *aurora_driver;
static struct Aurora_board aurora_board[AURORA_NBOARD] = { static struct Aurora_board aurora_board[AURORA_NBOARD] = {
{0,}, {0,},
@ -594,7 +590,7 @@ static void aurora_transmit(struct Aurora_board const * bp, int chip)
&bp->r[chip]->r[CD180_TDR]); &bp->r[chip]->r[CD180_TDR]);
port->COR2 &= ~COR2_ETC; port->COR2 &= ~COR2_ETC;
} }
count = MIN(port->break_length, 0xff); count = min(port->break_length, 0xff);
sbus_writeb(CD180_C_ESC, sbus_writeb(CD180_C_ESC,
&bp->r[chip]->r[CD180_TDR]); &bp->r[chip]->r[CD180_TDR]);
sbus_writeb(CD180_C_DELAY, sbus_writeb(CD180_C_DELAY,
@ -1575,7 +1571,7 @@ static int aurora_write(struct tty_struct * tty,
save_flags(flags); save_flags(flags);
while (1) { while (1) {
cli(); cli();
c = MIN(count, MIN(SERIAL_XMIT_SIZE - port->xmit_cnt - 1, c = min(count, min(SERIAL_XMIT_SIZE - port->xmit_cnt - 1,
SERIAL_XMIT_SIZE - port->xmit_head)); SERIAL_XMIT_SIZE - port->xmit_head));
if (c <= 0) { if (c <= 0) {
restore_flags(flags); restore_flags(flags);

View File

@ -41,7 +41,6 @@
#include "aic7xxx_osm.h" #include "aic7xxx_osm.h"
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
#include <linux/device.h> #include <linux/device.h>
#include <linux/eisa.h> #include <linux/eisa.h>
@ -62,13 +61,6 @@ static struct eisa_driver aic7770_driver = {
}; };
typedef struct device *aic7770_dev_t; typedef struct device *aic7770_dev_t;
#else
#define MINSLOT 1
#define NUMSLOTS 16
#define IDOFFSET 0x80
typedef void *aic7770_dev_t;
#endif
static int aic7770_linux_config(struct aic7770_identity *entry, static int aic7770_linux_config(struct aic7770_identity *entry,
aic7770_dev_t dev, u_int eisaBase); aic7770_dev_t dev, u_int eisaBase);
@ -76,7 +68,6 @@ static int aic7770_linux_config(struct aic7770_identity *entry,
int int
ahc_linux_eisa_init(void) ahc_linux_eisa_init(void)
{ {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
struct eisa_device_id *eid; struct eisa_device_id *eid;
struct aic7770_identity *id; struct aic7770_identity *id;
int i; int i;
@ -110,44 +101,6 @@ ahc_linux_eisa_init(void)
eid->sig[0] = 0; eid->sig[0] = 0;
return eisa_driver_register(&aic7770_driver); return eisa_driver_register(&aic7770_driver);
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) */
struct aic7770_identity *entry;
u_int slot;
u_int eisaBase;
u_int i;
int ret = -ENODEV;
if (aic7xxx_probe_eisa_vl == 0)
return ret;
eisaBase = 0x1000 + AHC_EISA_SLOT_OFFSET;
for (slot = 1; slot < NUMSLOTS; eisaBase+=0x1000, slot++) {
uint32_t eisa_id;
size_t id_size;
if (request_region(eisaBase, AHC_EISA_IOSIZE, "aic7xxx") == 0)
continue;
eisa_id = 0;
id_size = sizeof(eisa_id);
for (i = 0; i < 4; i++) {
/* VLcards require priming*/
outb(0x80 + i, eisaBase + IDOFFSET);
eisa_id |= inb(eisaBase + IDOFFSET + i)
<< ((id_size-i-1) * 8);
}
release_region(eisaBase, AHC_EISA_IOSIZE);
if (eisa_id & 0x80000000)
continue; /* no EISA card in slot */
entry = aic7770_find_device(eisa_id);
if (entry != NULL) {
aic7770_linux_config(entry, NULL, eisaBase);
ret = 0;
}
}
return ret;
#endif
} }
void void
@ -187,11 +140,10 @@ aic7770_linux_config(struct aic7770_identity *entry, aic7770_dev_t dev,
ahc_free(ahc); ahc_free(ahc);
return (error); return (error);
} }
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
dev->driver_data = (void *)ahc; dev->driver_data = (void *)ahc;
if (aic7xxx_detect_complete) if (aic7xxx_detect_complete)
error = ahc_linux_register_host(ahc, &aic7xxx_driver_template); error = ahc_linux_register_host(ahc, &aic7xxx_driver_template);
#endif
return (error); return (error);
} }
@ -225,7 +177,6 @@ aic7770_map_int(struct ahc_softc *ahc, u_int irq)
return (-error); return (-error);
} }
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
static int static int
aic7770_eisa_dev_probe(struct device *dev) aic7770_eisa_dev_probe(struct device *dev)
{ {
@ -261,4 +212,3 @@ aic7770_eisa_dev_remove(struct device *dev)
return (0); return (0);
} }
#endif

File diff suppressed because it is too large Load Diff

View File

@ -59,6 +59,7 @@
#ifndef _AIC7XXX_LINUX_H_ #ifndef _AIC7XXX_LINUX_H_
#define _AIC7XXX_LINUX_H_ #define _AIC7XXX_LINUX_H_
#include <linux/config.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/delay.h> #include <linux/delay.h>
@ -66,18 +67,21 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
#include <linux/version.h> #include <linux/version.h>
#include <linux/interrupt.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/slab.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <asm/io.h> #include <asm/io.h>
#include <linux/interrupt.h> /* For tasklet support. */ #include <scsi/scsi.h>
#include <linux/config.h> #include <scsi/scsi_cmnd.h>
#include <linux/slab.h> #include <scsi/scsi_eh.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
/* Core SCSI definitions */ /* Core SCSI definitions */
#define AIC_LIB_PREFIX ahc #define AIC_LIB_PREFIX ahc
#include "scsi.h"
#include <scsi/scsi_host.h>
/* Name space conflict with BSD queue macros */ /* Name space conflict with BSD queue macros */
#ifdef LIST_HEAD #ifdef LIST_HEAD
@ -106,7 +110,7 @@
/************************* Forward Declarations *******************************/ /************************* Forward Declarations *******************************/
struct ahc_softc; struct ahc_softc;
typedef struct pci_dev *ahc_dev_softc_t; typedef struct pci_dev *ahc_dev_softc_t;
typedef Scsi_Cmnd *ahc_io_ctx_t; typedef struct scsi_cmnd *ahc_io_ctx_t;
/******************************* Byte Order ***********************************/ /******************************* Byte Order ***********************************/
#define ahc_htobe16(x) cpu_to_be16(x) #define ahc_htobe16(x) cpu_to_be16(x)
@ -144,7 +148,7 @@ typedef Scsi_Cmnd *ahc_io_ctx_t;
extern u_int aic7xxx_no_probe; extern u_int aic7xxx_no_probe;
extern u_int aic7xxx_allow_memio; extern u_int aic7xxx_allow_memio;
extern int aic7xxx_detect_complete; extern int aic7xxx_detect_complete;
extern Scsi_Host_Template aic7xxx_driver_template; extern struct scsi_host_template aic7xxx_driver_template;
/***************************** Bus Space/DMA **********************************/ /***************************** Bus Space/DMA **********************************/
@ -174,11 +178,7 @@ struct ahc_linux_dma_tag
}; };
typedef struct ahc_linux_dma_tag* bus_dma_tag_t; typedef struct ahc_linux_dma_tag* bus_dma_tag_t;
struct ahc_linux_dmamap typedef dma_addr_t bus_dmamap_t;
{
dma_addr_t bus_addr;
};
typedef struct ahc_linux_dmamap* bus_dmamap_t;
typedef int bus_dma_filter_t(void*, dma_addr_t); typedef int bus_dma_filter_t(void*, dma_addr_t);
typedef void bus_dmamap_callback_t(void *, bus_dma_segment_t *, int, int); typedef void bus_dmamap_callback_t(void *, bus_dma_segment_t *, int, int);
@ -281,12 +281,6 @@ ahc_scb_timer_reset(struct scb *scb, u_int usec)
/***************************** SMP support ************************************/ /***************************** SMP support ************************************/
#include <linux/spinlock.h> #include <linux/spinlock.h>
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) || defined(SCSI_HAS_HOST_LOCK))
#define AHC_SCSI_HAS_HOST_LOCK 1
#else
#define AHC_SCSI_HAS_HOST_LOCK 0
#endif
#define AIC7XXX_DRIVER_VERSION "6.2.36" #define AIC7XXX_DRIVER_VERSION "6.2.36"
/**************************** Front End Queues ********************************/ /**************************** Front End Queues ********************************/
@ -328,20 +322,15 @@ struct ahc_cmd {
*/ */
TAILQ_HEAD(ahc_busyq, ahc_cmd); TAILQ_HEAD(ahc_busyq, ahc_cmd);
typedef enum { typedef enum {
AHC_DEV_UNCONFIGURED = 0x01,
AHC_DEV_FREEZE_TIL_EMPTY = 0x02, /* Freeze queue until active == 0 */ AHC_DEV_FREEZE_TIL_EMPTY = 0x02, /* Freeze queue until active == 0 */
AHC_DEV_TIMER_ACTIVE = 0x04, /* Our timer is active */
AHC_DEV_ON_RUN_LIST = 0x08, /* Queued to be run later */
AHC_DEV_Q_BASIC = 0x10, /* Allow basic device queuing */ AHC_DEV_Q_BASIC = 0x10, /* Allow basic device queuing */
AHC_DEV_Q_TAGGED = 0x20, /* Allow full SCSI2 command queueing */ AHC_DEV_Q_TAGGED = 0x20, /* Allow full SCSI2 command queueing */
AHC_DEV_PERIODIC_OTAG = 0x40, /* Send OTAG to prevent starvation */ AHC_DEV_PERIODIC_OTAG = 0x40, /* Send OTAG to prevent starvation */
AHC_DEV_SLAVE_CONFIGURED = 0x80 /* slave_configure() has been called */
} ahc_linux_dev_flags; } ahc_linux_dev_flags;
struct ahc_linux_target; struct ahc_linux_target;
struct ahc_linux_device { struct ahc_linux_device {
TAILQ_ENTRY(ahc_linux_device) links; TAILQ_ENTRY(ahc_linux_device) links;
struct ahc_busyq busyq;
/* /*
* The number of transactions currently * The number of transactions currently
@ -381,11 +370,6 @@ struct ahc_linux_device {
ahc_linux_dev_flags flags; ahc_linux_dev_flags flags;
/*
* Per device timer.
*/
struct timer_list timer;
/* /*
* The high limit for the tags variable. * The high limit for the tags variable.
*/ */
@ -419,7 +403,7 @@ struct ahc_linux_device {
#define AHC_OTAG_THRESH 500 #define AHC_OTAG_THRESH 500
int lun; int lun;
Scsi_Device *scsi_device; struct scsi_device *scsi_device;
struct ahc_linux_target *target; struct ahc_linux_target *target;
}; };
@ -439,32 +423,16 @@ struct ahc_linux_target {
* manner and are allocated below 4GB, the number of S/G segments is * manner and are allocated below 4GB, the number of S/G segments is
* unrestricted. * unrestricted.
*/ */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
/*
* We dynamically adjust the number of segments in pre-2.5 kernels to
* avoid fragmentation issues in the SCSI mid-layer's private memory
* allocator. See aic7xxx_osm.c ahc_linux_size_nseg() for details.
*/
extern u_int ahc_linux_nseg;
#define AHC_NSEG ahc_linux_nseg
#define AHC_LINUX_MIN_NSEG 64
#else
#define AHC_NSEG 128 #define AHC_NSEG 128
#endif
/* /*
* Per-SCB OSM storage. * Per-SCB OSM storage.
*/ */
typedef enum {
AHC_UP_EH_SEMAPHORE = 0x1
} ahc_linux_scb_flags;
struct scb_platform_data { struct scb_platform_data {
struct ahc_linux_device *dev; struct ahc_linux_device *dev;
dma_addr_t buf_busaddr; dma_addr_t buf_busaddr;
uint32_t xfer_len; uint32_t xfer_len;
uint32_t sense_resid; /* Auto-Sense residual */ uint32_t sense_resid; /* Auto-Sense residual */
ahc_linux_scb_flags flags;
}; };
/* /*
@ -473,39 +441,24 @@ struct scb_platform_data {
* alignment restrictions of the various platforms supported by * alignment restrictions of the various platforms supported by
* this driver. * this driver.
*/ */
typedef enum {
AHC_RUN_CMPLT_Q_TIMER = 0x10
} ahc_linux_softc_flags;
TAILQ_HEAD(ahc_completeq, ahc_cmd);
struct ahc_platform_data { struct ahc_platform_data {
/* /*
* Fields accessed from interrupt context. * Fields accessed from interrupt context.
*/ */
struct ahc_linux_target *targets[AHC_NUM_TARGETS]; struct ahc_linux_target *targets[AHC_NUM_TARGETS];
TAILQ_HEAD(, ahc_linux_device) device_runq;
struct ahc_completeq completeq;
spinlock_t spin_lock; spinlock_t spin_lock;
struct tasklet_struct runq_tasklet;
u_int qfrozen; u_int qfrozen;
pid_t dv_pid;
struct timer_list completeq_timer;
struct timer_list reset_timer; struct timer_list reset_timer;
struct semaphore eh_sem; struct semaphore eh_sem;
struct semaphore dv_sem;
struct semaphore dv_cmd_sem; /* XXX This needs to be in
* the target struct
*/
struct scsi_device *dv_scsi_dev;
struct Scsi_Host *host; /* pointer to scsi host */ struct Scsi_Host *host; /* pointer to scsi host */
#define AHC_LINUX_NOIRQ ((uint32_t)~0) #define AHC_LINUX_NOIRQ ((uint32_t)~0)
uint32_t irq; /* IRQ for this adapter */ uint32_t irq; /* IRQ for this adapter */
uint32_t bios_address; uint32_t bios_address;
uint32_t mem_busaddr; /* Mem Base Addr */ uint32_t mem_busaddr; /* Mem Base Addr */
uint64_t hw_dma_mask;
ahc_linux_softc_flags flags; #define AHC_UP_EH_SEMAPHORE 0x1
uint32_t flags;
}; };
/************************** OS Utility Wrappers *******************************/ /************************** OS Utility Wrappers *******************************/
@ -594,7 +547,7 @@ ahc_insb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
/**************************** Initialization **********************************/ /**************************** Initialization **********************************/
int ahc_linux_register_host(struct ahc_softc *, int ahc_linux_register_host(struct ahc_softc *,
Scsi_Host_Template *); struct scsi_host_template *);
uint64_t ahc_linux_get_memsize(void); uint64_t ahc_linux_get_memsize(void);
@ -615,17 +568,6 @@ static __inline void ahc_lockinit(struct ahc_softc *);
static __inline void ahc_lock(struct ahc_softc *, unsigned long *flags); static __inline void ahc_lock(struct ahc_softc *, unsigned long *flags);
static __inline void ahc_unlock(struct ahc_softc *, unsigned long *flags); static __inline void ahc_unlock(struct ahc_softc *, unsigned long *flags);
/* Lock acquisition and release of the above lock in midlayer entry points. */
static __inline void ahc_midlayer_entrypoint_lock(struct ahc_softc *,
unsigned long *flags);
static __inline void ahc_midlayer_entrypoint_unlock(struct ahc_softc *,
unsigned long *flags);
/* Lock held during command compeletion to the upper layer */
static __inline void ahc_done_lockinit(struct ahc_softc *);
static __inline void ahc_done_lock(struct ahc_softc *, unsigned long *flags);
static __inline void ahc_done_unlock(struct ahc_softc *, unsigned long *flags);
/* Lock held during ahc_list manipulation and ahc softc frees */ /* Lock held during ahc_list manipulation and ahc softc frees */
extern spinlock_t ahc_list_spinlock; extern spinlock_t ahc_list_spinlock;
static __inline void ahc_list_lockinit(void); static __inline void ahc_list_lockinit(void);
@ -650,57 +592,6 @@ ahc_unlock(struct ahc_softc *ahc, unsigned long *flags)
spin_unlock_irqrestore(&ahc->platform_data->spin_lock, *flags); spin_unlock_irqrestore(&ahc->platform_data->spin_lock, *flags);
} }
static __inline void
ahc_midlayer_entrypoint_lock(struct ahc_softc *ahc, unsigned long *flags)
{
/*
* In 2.5.X and some 2.4.X versions, the midlayer takes our
* lock just before calling us, so we avoid locking again.
* For other kernel versions, the io_request_lock is taken
* just before our entry point is called. In this case, we
* trade the io_request_lock for our per-softc lock.
*/
#if AHC_SCSI_HAS_HOST_LOCK == 0
spin_unlock(&io_request_lock);
spin_lock(&ahc->platform_data->spin_lock);
#endif
}
static __inline void
ahc_midlayer_entrypoint_unlock(struct ahc_softc *ahc, unsigned long *flags)
{
#if AHC_SCSI_HAS_HOST_LOCK == 0
spin_unlock(&ahc->platform_data->spin_lock);
spin_lock(&io_request_lock);
#endif
}
static __inline void
ahc_done_lockinit(struct ahc_softc *ahc)
{
/*
* In 2.5.X, our own lock is held during completions.
* In previous versions, the io_request_lock is used.
* In either case, we can't initialize this lock again.
*/
}
static __inline void
ahc_done_lock(struct ahc_softc *ahc, unsigned long *flags)
{
#if AHC_SCSI_HAS_HOST_LOCK == 0
spin_lock_irqsave(&io_request_lock, *flags);
#endif
}
static __inline void
ahc_done_unlock(struct ahc_softc *ahc, unsigned long *flags)
{
#if AHC_SCSI_HAS_HOST_LOCK == 0
spin_unlock_irqrestore(&io_request_lock, *flags);
#endif
}
static __inline void static __inline void
ahc_list_lockinit(void) ahc_list_lockinit(void)
{ {
@ -767,12 +658,6 @@ typedef enum
} ahc_power_state; } ahc_power_state;
/**************************** VL/EISA Routines ********************************/ /**************************** VL/EISA Routines ********************************/
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) \
&& (defined(__i386__) || defined(__alpha__)) \
&& (!defined(CONFIG_EISA)))
#define CONFIG_EISA
#endif
#ifdef CONFIG_EISA #ifdef CONFIG_EISA
extern uint32_t aic7xxx_probe_eisa_vl; extern uint32_t aic7xxx_probe_eisa_vl;
int ahc_linux_eisa_init(void); int ahc_linux_eisa_init(void);
@ -888,22 +773,18 @@ ahc_flush_device_writes(struct ahc_softc *ahc)
} }
/**************************** Proc FS Support *********************************/ /**************************** Proc FS Support *********************************/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
int ahc_linux_proc_info(char *, char **, off_t, int, int, int);
#else
int ahc_linux_proc_info(struct Scsi_Host *, char *, char **, int ahc_linux_proc_info(struct Scsi_Host *, char *, char **,
off_t, int, int); off_t, int, int);
#endif
/*************************** Domain Validation ********************************/ /*************************** Domain Validation ********************************/
/*********************** Transaction Access Wrappers *************************/ /*********************** Transaction Access Wrappers *************************/
static __inline void ahc_cmd_set_transaction_status(Scsi_Cmnd *, uint32_t); static __inline void ahc_cmd_set_transaction_status(struct scsi_cmnd *, uint32_t);
static __inline void ahc_set_transaction_status(struct scb *, uint32_t); static __inline void ahc_set_transaction_status(struct scb *, uint32_t);
static __inline void ahc_cmd_set_scsi_status(Scsi_Cmnd *, uint32_t); static __inline void ahc_cmd_set_scsi_status(struct scsi_cmnd *, uint32_t);
static __inline void ahc_set_scsi_status(struct scb *, uint32_t); static __inline void ahc_set_scsi_status(struct scb *, uint32_t);
static __inline uint32_t ahc_cmd_get_transaction_status(Scsi_Cmnd *cmd); static __inline uint32_t ahc_cmd_get_transaction_status(struct scsi_cmnd *cmd);
static __inline uint32_t ahc_get_transaction_status(struct scb *); static __inline uint32_t ahc_get_transaction_status(struct scb *);
static __inline uint32_t ahc_cmd_get_scsi_status(Scsi_Cmnd *cmd); static __inline uint32_t ahc_cmd_get_scsi_status(struct scsi_cmnd *cmd);
static __inline uint32_t ahc_get_scsi_status(struct scb *); static __inline uint32_t ahc_get_scsi_status(struct scb *);
static __inline void ahc_set_transaction_tag(struct scb *, int, u_int); static __inline void ahc_set_transaction_tag(struct scb *, int, u_int);
static __inline u_long ahc_get_transfer_length(struct scb *); static __inline u_long ahc_get_transfer_length(struct scb *);
@ -922,7 +803,7 @@ static __inline void ahc_platform_scb_free(struct ahc_softc *ahc,
static __inline void ahc_freeze_scb(struct scb *scb); static __inline void ahc_freeze_scb(struct scb *scb);
static __inline static __inline
void ahc_cmd_set_transaction_status(Scsi_Cmnd *cmd, uint32_t status) void ahc_cmd_set_transaction_status(struct scsi_cmnd *cmd, uint32_t status)
{ {
cmd->result &= ~(CAM_STATUS_MASK << 16); cmd->result &= ~(CAM_STATUS_MASK << 16);
cmd->result |= status << 16; cmd->result |= status << 16;
@ -935,7 +816,7 @@ void ahc_set_transaction_status(struct scb *scb, uint32_t status)
} }
static __inline static __inline
void ahc_cmd_set_scsi_status(Scsi_Cmnd *cmd, uint32_t status) void ahc_cmd_set_scsi_status(struct scsi_cmnd *cmd, uint32_t status)
{ {
cmd->result &= ~0xFFFF; cmd->result &= ~0xFFFF;
cmd->result |= status; cmd->result |= status;
@ -948,7 +829,7 @@ void ahc_set_scsi_status(struct scb *scb, uint32_t status)
} }
static __inline static __inline
uint32_t ahc_cmd_get_transaction_status(Scsi_Cmnd *cmd) uint32_t ahc_cmd_get_transaction_status(struct scsi_cmnd *cmd)
{ {
return ((cmd->result >> 16) & CAM_STATUS_MASK); return ((cmd->result >> 16) & CAM_STATUS_MASK);
} }
@ -960,7 +841,7 @@ uint32_t ahc_get_transaction_status(struct scb *scb)
} }
static __inline static __inline
uint32_t ahc_cmd_get_scsi_status(Scsi_Cmnd *cmd) uint32_t ahc_cmd_get_scsi_status(struct scsi_cmnd *cmd)
{ {
return (cmd->result & 0xFFFF); return (cmd->result & 0xFFFF);
} }

View File

@ -221,13 +221,11 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
&& ahc_linux_get_memsize() > 0x80000000 && ahc_linux_get_memsize() > 0x80000000
&& pci_set_dma_mask(pdev, mask_39bit) == 0) { && pci_set_dma_mask(pdev, mask_39bit) == 0) {
ahc->flags |= AHC_39BIT_ADDRESSING; ahc->flags |= AHC_39BIT_ADDRESSING;
ahc->platform_data->hw_dma_mask = mask_39bit;
} else { } else {
if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n"); printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n");
return (-ENODEV); return (-ENODEV);
} }
ahc->platform_data->hw_dma_mask = DMA_32BIT_MASK;
} }
ahc->dev_softc = pci; ahc->dev_softc = pci;
error = ahc_pci_config(ahc, entry); error = ahc_pci_config(ahc, entry);
@ -236,15 +234,8 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return (-error); return (-error);
} }
pci_set_drvdata(pdev, ahc); pci_set_drvdata(pdev, ahc);
if (aic7xxx_detect_complete) { if (aic7xxx_detect_complete)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
ahc_linux_register_host(ahc, &aic7xxx_driver_template); ahc_linux_register_host(ahc, &aic7xxx_driver_template);
#else
printf("aic7xxx: ignoring PCI device found after "
"initialization\n");
return (-ENODEV);
#endif
}
return (0); return (0);
} }

View File

@ -289,13 +289,8 @@ done:
* Return information to handle /proc support for the driver. * Return information to handle /proc support for the driver.
*/ */
int int
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
ahc_linux_proc_info(char *buffer, char **start, off_t offset,
int length, int hostno, int inout)
#else
ahc_linux_proc_info(struct Scsi_Host *shost, char *buffer, char **start, ahc_linux_proc_info(struct Scsi_Host *shost, char *buffer, char **start,
off_t offset, int length, int inout) off_t offset, int length, int inout)
#endif
{ {
struct ahc_softc *ahc; struct ahc_softc *ahc;
struct info_str info; struct info_str info;
@ -307,15 +302,7 @@ ahc_linux_proc_info(struct Scsi_Host *shost, char *buffer, char **start,
retval = -EINVAL; retval = -EINVAL;
ahc_list_lock(&s); ahc_list_lock(&s);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
TAILQ_FOREACH(ahc, &ahc_tailq, links) {
if (ahc->platform_data->host->host_no == hostno)
break;
}
#else
ahc = ahc_find_softc(*(struct ahc_softc **)shost->hostdata); ahc = ahc_find_softc(*(struct ahc_softc **)shost->hostdata);
#endif
if (ahc == NULL) if (ahc == NULL)
goto done; goto done;

View File

@ -35,7 +35,6 @@
#include <linux/version.h> #include <linux/version.h>
/* Core SCSI definitions */ /* Core SCSI definitions */
#include "scsi.h"
#include <scsi/scsi_host.h> #include <scsi/scsi_host.h>
#include "aiclib.h" #include "aiclib.h"
#include "cam.h" #include "cam.h"

View File

@ -35,7 +35,7 @@
#define SPI_PRINTK(x, l, f, a...) dev_printk(l, &(x)->dev, f , ##a) #define SPI_PRINTK(x, l, f, a...) dev_printk(l, &(x)->dev, f , ##a)
#define SPI_NUM_ATTRS 10 /* increase this if you add attributes */ #define SPI_NUM_ATTRS 13 /* increase this if you add attributes */
#define SPI_OTHER_ATTRS 1 /* Increase this if you add "always #define SPI_OTHER_ATTRS 1 /* Increase this if you add "always
* on" attributes */ * on" attributes */
#define SPI_HOST_ATTRS 1 #define SPI_HOST_ATTRS 1
@ -219,8 +219,11 @@ static int spi_setup_transport_attrs(struct device *dev)
struct scsi_target *starget = to_scsi_target(dev); struct scsi_target *starget = to_scsi_target(dev);
spi_period(starget) = -1; /* illegal value */ spi_period(starget) = -1; /* illegal value */
spi_min_period(starget) = 0;
spi_offset(starget) = 0; /* async */ spi_offset(starget) = 0; /* async */
spi_max_offset(starget) = 255;
spi_width(starget) = 0; /* narrow */ spi_width(starget) = 0; /* narrow */
spi_max_width(starget) = 1;
spi_iu(starget) = 0; /* no IU */ spi_iu(starget) = 0; /* no IU */
spi_dt(starget) = 0; /* ST */ spi_dt(starget) = 0; /* ST */
spi_qas(starget) = 0; spi_qas(starget) = 0;
@ -235,6 +238,34 @@ static int spi_setup_transport_attrs(struct device *dev)
return 0; return 0;
} }
#define spi_transport_show_simple(field, format_string) \
\
static ssize_t \
show_spi_transport_##field(struct class_device *cdev, char *buf) \
{ \
struct scsi_target *starget = transport_class_to_starget(cdev); \
struct spi_transport_attrs *tp; \
\
tp = (struct spi_transport_attrs *)&starget->starget_data; \
return snprintf(buf, 20, format_string, tp->field); \
}
#define spi_transport_store_simple(field, format_string) \
\
static ssize_t \
store_spi_transport_##field(struct class_device *cdev, const char *buf, \
size_t count) \
{ \
int val; \
struct scsi_target *starget = transport_class_to_starget(cdev); \
struct spi_transport_attrs *tp; \
\
tp = (struct spi_transport_attrs *)&starget->starget_data; \
val = simple_strtoul(buf, NULL, 0); \
tp->field = val; \
return count; \
}
#define spi_transport_show_function(field, format_string) \ #define spi_transport_show_function(field, format_string) \
\ \
static ssize_t \ static ssize_t \
@ -261,6 +292,25 @@ store_spi_transport_##field(struct class_device *cdev, const char *buf, \
struct spi_internal *i = to_spi_internal(shost->transportt); \ struct spi_internal *i = to_spi_internal(shost->transportt); \
\ \
val = simple_strtoul(buf, NULL, 0); \ val = simple_strtoul(buf, NULL, 0); \
i->f->set_##field(starget, val); \
return count; \
}
#define spi_transport_store_max(field, format_string) \
static ssize_t \
store_spi_transport_##field(struct class_device *cdev, const char *buf, \
size_t count) \
{ \
int val; \
struct scsi_target *starget = transport_class_to_starget(cdev); \
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \
struct spi_internal *i = to_spi_internal(shost->transportt); \
struct spi_transport_attrs *tp \
= (struct spi_transport_attrs *)&starget->starget_data; \
\
val = simple_strtoul(buf, NULL, 0); \
if (val > tp->max_##field) \
val = tp->max_##field; \
i->f->set_##field(starget, val); \ i->f->set_##field(starget, val); \
return count; \ return count; \
} }
@ -272,9 +322,24 @@ static CLASS_DEVICE_ATTR(field, S_IRUGO | S_IWUSR, \
show_spi_transport_##field, \ show_spi_transport_##field, \
store_spi_transport_##field); store_spi_transport_##field);
#define spi_transport_simple_attr(field, format_string) \
spi_transport_show_simple(field, format_string) \
spi_transport_store_simple(field, format_string) \
static CLASS_DEVICE_ATTR(field, S_IRUGO | S_IWUSR, \
show_spi_transport_##field, \
store_spi_transport_##field);
#define spi_transport_max_attr(field, format_string) \
spi_transport_show_function(field, format_string) \
spi_transport_store_max(field, format_string) \
spi_transport_simple_attr(max_##field, format_string) \
static CLASS_DEVICE_ATTR(field, S_IRUGO | S_IWUSR, \
show_spi_transport_##field, \
store_spi_transport_##field);
/* The Parallel SCSI Tranport Attributes: */ /* The Parallel SCSI Tranport Attributes: */
spi_transport_rd_attr(offset, "%d\n"); spi_transport_max_attr(offset, "%d\n");
spi_transport_rd_attr(width, "%d\n"); spi_transport_max_attr(width, "%d\n");
spi_transport_rd_attr(iu, "%d\n"); spi_transport_rd_attr(iu, "%d\n");
spi_transport_rd_attr(dt, "%d\n"); spi_transport_rd_attr(dt, "%d\n");
spi_transport_rd_attr(qas, "%d\n"); spi_transport_rd_attr(qas, "%d\n");
@ -300,26 +365,18 @@ static CLASS_DEVICE_ATTR(revalidate, S_IWUSR, NULL, store_spi_revalidate);
/* Translate the period into ns according to the current spec /* Translate the period into ns according to the current spec
* for SDTR/PPR messages */ * for SDTR/PPR messages */
static ssize_t show_spi_transport_period(struct class_device *cdev, char *buf) static ssize_t
show_spi_transport_period_helper(struct class_device *cdev, char *buf,
int period)
{ {
struct scsi_target *starget = transport_class_to_starget(cdev);
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct spi_transport_attrs *tp;
int len, picosec; int len, picosec;
struct spi_internal *i = to_spi_internal(shost->transportt);
tp = (struct spi_transport_attrs *)&starget->starget_data; if (period < 0 || period > 0xff) {
if (i->f->get_period)
i->f->get_period(starget);
if (tp->period < 0 || tp->period > 0xff) {
picosec = -1; picosec = -1;
} else if (tp->period <= SPI_STATIC_PPR) { } else if (period <= SPI_STATIC_PPR) {
picosec = ppr_to_ps[tp->period]; picosec = ppr_to_ps[period];
} else { } else {
picosec = tp->period * 4000; picosec = period * 4000;
} }
if (picosec == -1) { if (picosec == -1) {
@ -334,12 +391,9 @@ static ssize_t show_spi_transport_period(struct class_device *cdev, char *buf)
} }
static ssize_t static ssize_t
store_spi_transport_period(struct class_device *cdev, const char *buf, store_spi_transport_period_helper(struct class_device *cdev, const char *buf,
size_t count) size_t count, int *periodp)
{ {
struct scsi_target *starget = transport_class_to_starget(cdev);
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct spi_internal *i = to_spi_internal(shost->transportt);
int j, picosec, period = -1; int j, picosec, period = -1;
char *endp; char *endp;
@ -368,15 +422,79 @@ store_spi_transport_period(struct class_device *cdev, const char *buf,
if (period > 0xff) if (period > 0xff)
period = 0xff; period = 0xff;
i->f->set_period(starget, period); *periodp = period;
return count; return count;
} }
static ssize_t
show_spi_transport_period(struct class_device *cdev, char *buf)
{
struct scsi_target *starget = transport_class_to_starget(cdev);
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct spi_internal *i = to_spi_internal(shost->transportt);
struct spi_transport_attrs *tp =
(struct spi_transport_attrs *)&starget->starget_data;
if (i->f->get_period)
i->f->get_period(starget);
return show_spi_transport_period_helper(cdev, buf, tp->period);
}
static ssize_t
store_spi_transport_period(struct class_device *cdev, const char *buf,
size_t count)
{
struct scsi_target *starget = transport_class_to_starget(cdev);
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct spi_internal *i = to_spi_internal(shost->transportt);
struct spi_transport_attrs *tp =
(struct spi_transport_attrs *)&starget->starget_data;
int period, retval;
retval = store_spi_transport_period_helper(cdev, buf, count, &period);
if (period < tp->min_period)
period = tp->min_period;
i->f->set_period(starget, period);
return retval;
}
static CLASS_DEVICE_ATTR(period, S_IRUGO | S_IWUSR, static CLASS_DEVICE_ATTR(period, S_IRUGO | S_IWUSR,
show_spi_transport_period, show_spi_transport_period,
store_spi_transport_period); store_spi_transport_period);
static ssize_t
show_spi_transport_min_period(struct class_device *cdev, char *buf)
{
struct scsi_target *starget = transport_class_to_starget(cdev);
struct spi_transport_attrs *tp =
(struct spi_transport_attrs *)&starget->starget_data;
return show_spi_transport_period_helper(cdev, buf, tp->min_period);
}
static ssize_t
store_spi_transport_min_period(struct class_device *cdev, const char *buf,
size_t count)
{
struct scsi_target *starget = transport_class_to_starget(cdev);
struct spi_transport_attrs *tp =
(struct spi_transport_attrs *)&starget->starget_data;
return store_spi_transport_period_helper(cdev, buf, count,
&tp->min_period);
}
static CLASS_DEVICE_ATTR(min_period, S_IRUGO | S_IWUSR,
show_spi_transport_min_period,
store_spi_transport_min_period);
static ssize_t show_spi_host_signalling(struct class_device *cdev, char *buf) static ssize_t show_spi_host_signalling(struct class_device *cdev, char *buf)
{ {
struct Scsi_Host *shost = transport_class_to_shost(cdev); struct Scsi_Host *shost = transport_class_to_shost(cdev);
@ -642,6 +760,7 @@ spi_dv_device_internal(struct scsi_request *sreq, u8 *buffer)
{ {
struct spi_internal *i = to_spi_internal(sreq->sr_host->transportt); struct spi_internal *i = to_spi_internal(sreq->sr_host->transportt);
struct scsi_device *sdev = sreq->sr_device; struct scsi_device *sdev = sreq->sr_device;
struct scsi_target *starget = sdev->sdev_target;
int len = sdev->inquiry_len; int len = sdev->inquiry_len;
/* first set us up for narrow async */ /* first set us up for narrow async */
DV_SET(offset, 0); DV_SET(offset, 0);
@ -655,9 +774,11 @@ spi_dv_device_internal(struct scsi_request *sreq, u8 *buffer)
} }
/* test width */ /* test width */
if (i->f->set_width && sdev->wdtr) { if (i->f->set_width && spi_max_width(starget) && sdev->wdtr) {
i->f->set_width(sdev->sdev_target, 1); i->f->set_width(sdev->sdev_target, 1);
printk("WIDTH IS %d\n", spi_max_width(starget));
if (spi_dv_device_compare_inquiry(sreq, buffer, if (spi_dv_device_compare_inquiry(sreq, buffer,
buffer + len, buffer + len,
DV_LOOPS) DV_LOOPS)
@ -684,8 +805,8 @@ spi_dv_device_internal(struct scsi_request *sreq, u8 *buffer)
retry: retry:
/* now set up to the maximum */ /* now set up to the maximum */
DV_SET(offset, 255); DV_SET(offset, spi_max_offset(starget));
DV_SET(period, 1); DV_SET(period, spi_min_period(starget));
if (len == 0) { if (len == 0) {
SPI_PRINTK(sdev->sdev_target, KERN_INFO, "Domain Validation skipping write tests\n"); SPI_PRINTK(sdev->sdev_target, KERN_INFO, "Domain Validation skipping write tests\n");
@ -892,6 +1013,16 @@ EXPORT_SYMBOL(spi_display_xfer_agreement);
if (i->f->show_##field) \ if (i->f->show_##field) \
count++ count++
#define SETUP_RELATED_ATTRIBUTE(field, rel_field) \
i->private_attrs[count] = class_device_attr_##field; \
if (!i->f->set_##rel_field) { \
i->private_attrs[count].attr.mode = S_IRUGO; \
i->private_attrs[count].store = NULL; \
} \
i->attrs[count] = &i->private_attrs[count]; \
if (i->f->show_##rel_field) \
count++
#define SETUP_HOST_ATTRIBUTE(field) \ #define SETUP_HOST_ATTRIBUTE(field) \
i->private_host_attrs[count] = class_device_attr_##field; \ i->private_host_attrs[count] = class_device_attr_##field; \
if (!i->f->set_##field) { \ if (!i->f->set_##field) { \
@ -975,8 +1106,11 @@ spi_attach_transport(struct spi_function_template *ft)
i->f = ft; i->f = ft;
SETUP_ATTRIBUTE(period); SETUP_ATTRIBUTE(period);
SETUP_RELATED_ATTRIBUTE(min_period, period);
SETUP_ATTRIBUTE(offset); SETUP_ATTRIBUTE(offset);
SETUP_RELATED_ATTRIBUTE(max_offset, offset);
SETUP_ATTRIBUTE(width); SETUP_ATTRIBUTE(width);
SETUP_RELATED_ATTRIBUTE(max_width, width);
SETUP_ATTRIBUTE(iu); SETUP_ATTRIBUTE(iu);
SETUP_ATTRIBUTE(dt); SETUP_ATTRIBUTE(dt);
SETUP_ATTRIBUTE(qas); SETUP_ATTRIBUTE(qas);

View File

@ -682,8 +682,6 @@ static void autoconfig_16550a(struct uart_8250_port *up)
* from EXCR1. Switch back to bank 0, change it in MCR. Then * from EXCR1. Switch back to bank 0, change it in MCR. Then
* switch back to bank 2, read it from EXCR1 again and check * switch back to bank 2, read it from EXCR1 again and check
* it's changed. If so, set baud_base in EXCR2 to 921600. -- dwmw2 * it's changed. If so, set baud_base in EXCR2 to 921600. -- dwmw2
* On PowerPC we don't want to change baud_base, as we have
* a number of different divisors. -- Tom Rini
*/ */
serial_outp(up, UART_LCR, 0); serial_outp(up, UART_LCR, 0);
status1 = serial_in(up, UART_MCR); status1 = serial_in(up, UART_MCR);
@ -699,16 +697,25 @@ static void autoconfig_16550a(struct uart_8250_port *up)
serial_outp(up, UART_MCR, status1); serial_outp(up, UART_MCR, status1);
if ((status2 ^ status1) & UART_MCR_LOOP) { if ((status2 ^ status1) & UART_MCR_LOOP) {
#ifndef CONFIG_PPC unsigned short quot;
serial_outp(up, UART_LCR, 0xE0); serial_outp(up, UART_LCR, 0xE0);
quot = serial_inp(up, UART_DLM) << 8;
quot += serial_inp(up, UART_DLL);
quot <<= 3;
status1 = serial_in(up, 0x04); /* EXCR1 */ status1 = serial_in(up, 0x04); /* EXCR1 */
status1 &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */ status1 &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */
status1 |= 0x10; /* 1.625 divisor for baud_base --> 921600 */ status1 |= 0x10; /* 1.625 divisor for baud_base --> 921600 */
serial_outp(up, 0x04, status1); serial_outp(up, 0x04, status1);
serial_outp(up, UART_LCR, 0);
up->port.uartclk = 921600*16; serial_outp(up, UART_DLL, quot & 0xff);
#endif serial_outp(up, UART_DLM, quot >> 8);
serial_outp(up, UART_LCR, 0);
up->port.uartclk = 921600*16;
up->port.type = PORT_NS16550A; up->port.type = PORT_NS16550A;
up->capabilities |= UART_NATSEMI; up->capabilities |= UART_NATSEMI;
return; return;

View File

@ -61,6 +61,16 @@ struct uart_sunsab_port {
unsigned char pvr_dtr_bit; /* Which PVR bit is DTR */ unsigned char pvr_dtr_bit; /* Which PVR bit is DTR */
unsigned char pvr_dsr_bit; /* Which PVR bit is DSR */ unsigned char pvr_dsr_bit; /* Which PVR bit is DSR */
int type; /* SAB82532 version */ int type; /* SAB82532 version */
/* Setting configuration bits while the transmitter is active
* can cause garbage characters to get emitted by the chip.
* Therefore, we cache such writes here and do the real register
* write the next time the transmitter becomes idle.
*/
unsigned int cached_ebrg;
unsigned char cached_mode;
unsigned char cached_pvr;
unsigned char cached_dafo;
}; };
/* /*
@ -236,6 +246,7 @@ receive_chars(struct uart_sunsab_port *up,
} }
static void sunsab_stop_tx(struct uart_port *, unsigned int); static void sunsab_stop_tx(struct uart_port *, unsigned int);
static void sunsab_tx_idle(struct uart_sunsab_port *);
static void transmit_chars(struct uart_sunsab_port *up, static void transmit_chars(struct uart_sunsab_port *up,
union sab82532_irq_status *stat) union sab82532_irq_status *stat)
@ -258,6 +269,7 @@ static void transmit_chars(struct uart_sunsab_port *up,
return; return;
set_bit(SAB82532_XPR, &up->irqflags); set_bit(SAB82532_XPR, &up->irqflags);
sunsab_tx_idle(up);
if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) { if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
up->interrupt_mask1 |= SAB82532_IMR1_XPR; up->interrupt_mask1 |= SAB82532_IMR1_XPR;
@ -397,21 +409,21 @@ static void sunsab_set_mctrl(struct uart_port *port, unsigned int mctrl)
struct uart_sunsab_port *up = (struct uart_sunsab_port *) port; struct uart_sunsab_port *up = (struct uart_sunsab_port *) port;
if (mctrl & TIOCM_RTS) { if (mctrl & TIOCM_RTS) {
writeb(readb(&up->regs->rw.mode) & ~SAB82532_MODE_FRTS, up->cached_mode &= ~SAB82532_MODE_FRTS;
&up->regs->rw.mode); up->cached_mode |= SAB82532_MODE_RTS;
writeb(readb(&up->regs->rw.mode) | SAB82532_MODE_RTS,
&up->regs->rw.mode);
} else { } else {
writeb(readb(&up->regs->rw.mode) | SAB82532_MODE_FRTS, up->cached_mode |= (SAB82532_MODE_FRTS |
&up->regs->rw.mode); SAB82532_MODE_RTS);
writeb(readb(&up->regs->rw.mode) | SAB82532_MODE_RTS,
&up->regs->rw.mode);
} }
if (mctrl & TIOCM_DTR) { if (mctrl & TIOCM_DTR) {
writeb(readb(&up->regs->rw.pvr) & ~(up->pvr_dtr_bit), &up->regs->rw.pvr); up->cached_pvr &= ~(up->pvr_dtr_bit);
} else { } else {
writeb(readb(&up->regs->rw.pvr) | up->pvr_dtr_bit, &up->regs->rw.pvr); up->cached_pvr |= up->pvr_dtr_bit;
} }
set_bit(SAB82532_REGS_PENDING, &up->irqflags);
if (test_bit(SAB82532_XPR, &up->irqflags))
sunsab_tx_idle(up);
} }
/* port->lock is not held. */ /* port->lock is not held. */
@ -449,6 +461,25 @@ static void sunsab_stop_tx(struct uart_port *port, unsigned int tty_stop)
writeb(up->interrupt_mask1, &up->regs->w.imr1); writeb(up->interrupt_mask1, &up->regs->w.imr1);
} }
/* port->lock held by caller. */
static void sunsab_tx_idle(struct uart_sunsab_port *up)
{
if (test_bit(SAB82532_REGS_PENDING, &up->irqflags)) {
u8 tmp;
clear_bit(SAB82532_REGS_PENDING, &up->irqflags);
writeb(up->cached_mode, &up->regs->rw.mode);
writeb(up->cached_pvr, &up->regs->rw.pvr);
writeb(up->cached_dafo, &up->regs->w.dafo);
writeb(up->cached_ebrg & 0xff, &up->regs->w.bgr);
tmp = readb(&up->regs->rw.ccr2);
tmp &= ~0xc0;
tmp |= (up->cached_ebrg >> 2) & 0xc0;
writeb(tmp, &up->regs->rw.ccr2);
}
}
/* port->lock held by caller. */ /* port->lock held by caller. */
static void sunsab_start_tx(struct uart_port *port, unsigned int tty_start) static void sunsab_start_tx(struct uart_port *port, unsigned int tty_start)
{ {
@ -517,12 +548,16 @@ static void sunsab_break_ctl(struct uart_port *port, int break_state)
spin_lock_irqsave(&up->port.lock, flags); spin_lock_irqsave(&up->port.lock, flags);
val = readb(&up->regs->rw.dafo); val = up->cached_dafo;
if (break_state) if (break_state)
val |= SAB82532_DAFO_XBRK; val |= SAB82532_DAFO_XBRK;
else else
val &= ~SAB82532_DAFO_XBRK; val &= ~SAB82532_DAFO_XBRK;
writeb(val, &up->regs->rw.dafo); up->cached_dafo = val;
set_bit(SAB82532_REGS_PENDING, &up->irqflags);
if (test_bit(SAB82532_XPR, &up->irqflags))
sunsab_tx_idle(up);
spin_unlock_irqrestore(&up->port.lock, flags); spin_unlock_irqrestore(&up->port.lock, flags);
} }
@ -566,8 +601,9 @@ static int sunsab_startup(struct uart_port *port)
SAB82532_CCR2_TOE, &up->regs->w.ccr2); SAB82532_CCR2_TOE, &up->regs->w.ccr2);
writeb(0, &up->regs->w.ccr3); writeb(0, &up->regs->w.ccr3);
writeb(SAB82532_CCR4_MCK4 | SAB82532_CCR4_EBRG, &up->regs->w.ccr4); writeb(SAB82532_CCR4_MCK4 | SAB82532_CCR4_EBRG, &up->regs->w.ccr4);
writeb(SAB82532_MODE_RTS | SAB82532_MODE_FCTS | up->cached_mode = (SAB82532_MODE_RTS | SAB82532_MODE_FCTS |
SAB82532_MODE_RAC, &up->regs->w.mode); SAB82532_MODE_RAC);
writeb(up->cached_mode, &up->regs->w.mode);
writeb(SAB82532_RFC_DPS|SAB82532_RFC_RFTH_32, &up->regs->w.rfc); writeb(SAB82532_RFC_DPS|SAB82532_RFC_RFTH_32, &up->regs->w.rfc);
tmp = readb(&up->regs->rw.ccr0); tmp = readb(&up->regs->rw.ccr0);
@ -598,7 +634,6 @@ static void sunsab_shutdown(struct uart_port *port)
{ {
struct uart_sunsab_port *up = (struct uart_sunsab_port *) port; struct uart_sunsab_port *up = (struct uart_sunsab_port *) port;
unsigned long flags; unsigned long flags;
unsigned char tmp;
spin_lock_irqsave(&up->port.lock, flags); spin_lock_irqsave(&up->port.lock, flags);
@ -609,14 +644,13 @@ static void sunsab_shutdown(struct uart_port *port)
writeb(up->interrupt_mask1, &up->regs->w.imr1); writeb(up->interrupt_mask1, &up->regs->w.imr1);
/* Disable break condition */ /* Disable break condition */
tmp = readb(&up->regs->rw.dafo); up->cached_dafo = readb(&up->regs->rw.dafo);
tmp &= ~SAB82532_DAFO_XBRK; up->cached_dafo &= ~SAB82532_DAFO_XBRK;
writeb(tmp, &up->regs->rw.dafo); writeb(up->cached_dafo, &up->regs->rw.dafo);
/* Disable Receiver */ /* Disable Receiver */
tmp = readb(&up->regs->rw.mode); up->cached_mode &= ~SAB82532_MODE_RAC;
tmp &= ~SAB82532_MODE_RAC; writeb(up->cached_mode, &up->regs->rw.mode);
writeb(tmp, &up->regs->rw.mode);
/* /*
* XXX FIXME * XXX FIXME
@ -685,7 +719,6 @@ static void sunsab_convert_to_sab(struct uart_sunsab_port *up, unsigned int cfla
unsigned int iflag, unsigned int baud, unsigned int iflag, unsigned int baud,
unsigned int quot) unsigned int quot)
{ {
unsigned int ebrg;
unsigned char dafo; unsigned char dafo;
int bits, n, m; int bits, n, m;
@ -714,10 +747,11 @@ static void sunsab_convert_to_sab(struct uart_sunsab_port *up, unsigned int cfla
} else { } else {
dafo |= SAB82532_DAFO_PAR_EVEN; dafo |= SAB82532_DAFO_PAR_EVEN;
} }
up->cached_dafo = dafo;
calc_ebrg(baud, &n, &m); calc_ebrg(baud, &n, &m);
ebrg = n | (m << 6); up->cached_ebrg = n | (m << 6);
up->tec_timeout = (10 * 1000000) / baud; up->tec_timeout = (10 * 1000000) / baud;
up->cec_timeout = up->tec_timeout >> 2; up->cec_timeout = up->tec_timeout >> 2;
@ -770,16 +804,13 @@ static void sunsab_convert_to_sab(struct uart_sunsab_port *up, unsigned int cfla
uart_update_timeout(&up->port, cflag, uart_update_timeout(&up->port, cflag,
(up->port.uartclk / (16 * quot))); (up->port.uartclk / (16 * quot)));
/* Now bang the new settings into the chip. */ /* Now schedule a register update when the chip's
sunsab_cec_wait(up); * transmitter is idle.
sunsab_tec_wait(up); */
writeb(dafo, &up->regs->w.dafo); up->cached_mode |= SAB82532_MODE_RAC;
writeb(ebrg & 0xff, &up->regs->w.bgr); set_bit(SAB82532_REGS_PENDING, &up->irqflags);
writeb((readb(&up->regs->rw.ccr2) & ~0xc0) | ((ebrg >> 2) & 0xc0), if (test_bit(SAB82532_XPR, &up->irqflags))
&up->regs->rw.ccr2); sunsab_tx_idle(up);
writeb(readb(&up->regs->rw.mode) | SAB82532_MODE_RAC, &up->regs->rw.mode);
} }
/* port->lock is not held. */ /* port->lock is not held. */
@ -1084,11 +1115,13 @@ static void __init sunsab_init_hw(void)
up->pvr_dsr_bit = (1 << 3); up->pvr_dsr_bit = (1 << 3);
up->pvr_dtr_bit = (1 << 2); up->pvr_dtr_bit = (1 << 2);
} }
writeb((1 << 1) | (1 << 2) | (1 << 4), &up->regs->w.pvr); up->cached_pvr = (1 << 1) | (1 << 2) | (1 << 4);
writeb(readb(&up->regs->rw.mode) | SAB82532_MODE_FRTS, writeb(up->cached_pvr, &up->regs->w.pvr);
&up->regs->rw.mode); up->cached_mode = readb(&up->regs->rw.mode);
writeb(readb(&up->regs->rw.mode) | SAB82532_MODE_RTS, up->cached_mode |= SAB82532_MODE_FRTS;
&up->regs->rw.mode); writeb(up->cached_mode, &up->regs->rw.mode);
up->cached_mode |= SAB82532_MODE_RTS;
writeb(up->cached_mode, &up->regs->rw.mode);
up->tec_timeout = SAB82532_MAX_TEC_TIMEOUT; up->tec_timeout = SAB82532_MAX_TEC_TIMEOUT;
up->cec_timeout = SAB82532_MAX_CEC_TIMEOUT; up->cec_timeout = SAB82532_MAX_CEC_TIMEOUT;

View File

@ -126,6 +126,7 @@ union sab82532_irq_status {
/* irqflags bits */ /* irqflags bits */
#define SAB82532_ALLS 0x00000001 #define SAB82532_ALLS 0x00000001
#define SAB82532_XPR 0x00000002 #define SAB82532_XPR 0x00000002
#define SAB82532_REGS_PENDING 0x00000004
/* RFIFO Status Byte */ /* RFIFO Status Byte */
#define SAB82532_RSTAT_PE 0x80 #define SAB82532_RSTAT_PE 0x80

View File

@ -1580,6 +1580,7 @@ enoent:
fail: fail:
return dentry; return dentry;
} }
EXPORT_SYMBOL_GPL(lookup_create);
int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
{ {

View File

@ -230,7 +230,6 @@ const struct reiserfs_key MAX_KEY = {
__constant_cpu_to_le32(0xffffffff)},} __constant_cpu_to_le32(0xffffffff)},}
}; };
const struct in_core_key MAX_IN_CORE_KEY = {~0U, ~0U, ~0ULL>>4, 15};
/* Get delimiting key of the buffer by looking for it in the buffers in the path, starting from the bottom /* Get delimiting key of the buffer by looking for it in the buffers in the path, starting from the bottom
of the path, and going upwards. We must check the path's validity at each step. If the key is not in of the path, and going upwards. We must check the path's validity at each step. If the key is not in

View File

@ -164,7 +164,9 @@ static int finish_unfinished (struct super_block * s)
/* compose key to look for "save" links */ /* compose key to look for "save" links */
max_cpu_key.version = KEY_FORMAT_3_5; max_cpu_key.version = KEY_FORMAT_3_5;
max_cpu_key.on_disk_key = MAX_IN_CORE_KEY; max_cpu_key.on_disk_key.k_dir_id = ~0U;
max_cpu_key.on_disk_key.k_objectid = ~0U;
set_cpu_key_k_offset (&max_cpu_key, ~0U);
max_cpu_key.key_length = 3; max_cpu_key.key_length = 3;
#ifdef CONFIG_QUOTA #ifdef CONFIG_QUOTA

View File

@ -5,7 +5,7 @@
#ifndef __UM_ELF_I386_H #ifndef __UM_ELF_I386_H
#define __UM_ELF_I386_H #define __UM_ELF_I386_H
#include "user.h" #include <asm/user.h>
#define R_386_NONE 0 #define R_386_NONE 0
#define R_386_32 1 #define R_386_32 1

View File

@ -8,6 +8,27 @@
#include <asm/user.h> #include <asm/user.h>
/* x86-64 relocation types, taken from asm-x86_64/elf.h */
#define R_X86_64_NONE 0 /* No reloc */
#define R_X86_64_64 1 /* Direct 64 bit */
#define R_X86_64_PC32 2 /* PC relative 32 bit signed */
#define R_X86_64_GOT32 3 /* 32 bit GOT entry */
#define R_X86_64_PLT32 4 /* 32 bit PLT address */
#define R_X86_64_COPY 5 /* Copy symbol at runtime */
#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */
#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */
#define R_X86_64_RELATIVE 8 /* Adjust by program base */
#define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative
offset to GOT */
#define R_X86_64_32 10 /* Direct 32 bit zero extended */
#define R_X86_64_32S 11 /* Direct 32 bit sign extended */
#define R_X86_64_16 12 /* Direct 16 bit zero extended */
#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
#define R_X86_64_8 14 /* Direct 8 bit sign extended */
#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
#define R_X86_64_NUM 16
typedef unsigned long elf_greg_t; typedef unsigned long elf_greg_t;
#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t)) #define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))
@ -44,7 +65,8 @@ typedef struct { } elf_fpregset_t;
} while (0) } while (0)
#ifdef TIF_IA32 /* XXX */ #ifdef TIF_IA32 /* XXX */
clear_thread_flag(TIF_IA32); \ #error XXX, indeed
clear_thread_flag(TIF_IA32);
#endif #endif
#define USE_ELF_CORE_DUMP #define USE_ELF_CORE_DUMP

View File

@ -13,6 +13,8 @@
* This should be a per-architecture thing, to allow different * This should be a per-architecture thing, to allow different
* error and pointer decisions. * error and pointer decisions.
*/ */
#define IS_ERR_VALUE(x) unlikely((x) > (unsigned long)-1000L)
static inline void *ERR_PTR(long error) static inline void *ERR_PTR(long error)
{ {
return (void *) error; return (void *) error;
@ -25,7 +27,7 @@ static inline long PTR_ERR(const void *ptr)
static inline long IS_ERR(const void *ptr) static inline long IS_ERR(const void *ptr)
{ {
return unlikely((unsigned long)ptr > (unsigned long)-1000L); return IS_ERR_VALUE((unsigned long)ptr);
} }
#endif /* _LINUX_ERR_H */ #endif /* _LINUX_ERR_H */

View File

@ -195,6 +195,33 @@ struct _mmc_csd {
#define MMC_VDD_35_36 0x00800000 /* VDD voltage 3.5 ~ 3.6 */ #define MMC_VDD_35_36 0x00800000 /* VDD voltage 3.5 ~ 3.6 */
#define MMC_CARD_BUSY 0x80000000 /* Card Power up status bit */ #define MMC_CARD_BUSY 0x80000000 /* Card Power up status bit */
/*
* Card Command Classes (CCC)
*/
#define CCC_BASIC (1<<0) /* (0) Basic protocol functions */
/* (CMD0,1,2,3,4,7,9,10,12,13,15) */
#define CCC_STREAM_READ (1<<1) /* (1) Stream read commands */
/* (CMD11) */
#define CCC_BLOCK_READ (1<<2) /* (2) Block read commands */
/* (CMD16,17,18) */
#define CCC_STREAM_WRITE (1<<3) /* (3) Stream write commands */
/* (CMD20) */
#define CCC_BLOCK_WRITE (1<<4) /* (4) Block write commands */
/* (CMD16,24,25,26,27) */
#define CCC_ERASE (1<<5) /* (5) Ability to erase blocks */
/* (CMD32,33,34,35,36,37,38,39) */
#define CCC_WRITE_PROT (1<<6) /* (6) Able to write protect blocks */
/* (CMD28,29,30) */
#define CCC_LOCK_CARD (1<<7) /* (7) Able to lock down card */
/* (CMD16,CMD42) */
#define CCC_APP_SPEC (1<<8) /* (8) Application specific */
/* (CMD55,56,57,ACMD*) */
#define CCC_IO_MODE (1<<9) /* (9) I/O mode */
/* (CMD5,39,40,52,53) */
#define CCC_SWITCH (1<<10) /* (10) High speed switch */
/* (CMD6,34,35,36,37,50) */
/* (11) Reserved */
/* (CMD?) */
/* /*
* CSD field definitions * CSD field definitions

View File

@ -248,7 +248,7 @@ typedef struct {
#define _spin_trylock_bh(lock) ({preempt_disable(); local_bh_disable(); \ #define _spin_trylock_bh(lock) ({preempt_disable(); local_bh_disable(); \
_raw_spin_trylock(lock) ? \ _raw_spin_trylock(lock) ? \
1 : ({preempt_enable(); local_bh_enable(); 0;});}) 1 : ({preempt_enable_no_resched(); local_bh_enable(); 0;});})
#define _spin_lock(lock) \ #define _spin_lock(lock) \
do { \ do { \
@ -383,7 +383,7 @@ do { \
#define _spin_unlock_bh(lock) \ #define _spin_unlock_bh(lock) \
do { \ do { \
_raw_spin_unlock(lock); \ _raw_spin_unlock(lock); \
preempt_enable(); \ preempt_enable_no_resched(); \
local_bh_enable(); \ local_bh_enable(); \
__release(lock); \ __release(lock); \
} while (0) } while (0)
@ -391,7 +391,7 @@ do { \
#define _write_unlock_bh(lock) \ #define _write_unlock_bh(lock) \
do { \ do { \
_raw_write_unlock(lock); \ _raw_write_unlock(lock); \
preempt_enable(); \ preempt_enable_no_resched(); \
local_bh_enable(); \ local_bh_enable(); \
__release(lock); \ __release(lock); \
} while (0) } while (0)
@ -423,8 +423,8 @@ do { \
#define _read_unlock_bh(lock) \ #define _read_unlock_bh(lock) \
do { \ do { \
_raw_read_unlock(lock); \ _raw_read_unlock(lock); \
preempt_enable_no_resched(); \
local_bh_enable(); \ local_bh_enable(); \
preempt_enable(); \
__release(lock); \ __release(lock); \
} while (0) } while (0)

View File

@ -41,6 +41,7 @@ extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end); unsigned long start, unsigned long end);
extern struct vm_struct *remove_vm_area(void *addr); extern struct vm_struct *remove_vm_area(void *addr);
extern struct vm_struct *__remove_vm_area(void *addr);
extern int map_vm_area(struct vm_struct *area, pgprot_t prot, extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
struct page ***pages); struct page ***pages);
extern void unmap_vm_area(struct vm_struct *area); extern void unmap_vm_area(struct vm_struct *area);

View File

@ -2,8 +2,8 @@
* include/net/act_generic.h * include/net/act_generic.h
* *
*/ */
#ifndef ACT_GENERIC_H #ifndef _NET_ACT_GENERIC_H
#define ACT_GENERIC_H #define _NET_ACT_GENERIC_H
static inline int tcf_defact_release(struct tcf_defact *p, int bind) static inline int tcf_defact_release(struct tcf_defact *p, int bind)
{ {
int ret = 0; int ret = 0;

View File

@ -27,8 +27,11 @@ struct scsi_transport_template;
struct spi_transport_attrs { struct spi_transport_attrs {
int period; /* value in the PPR/SDTR command */ int period; /* value in the PPR/SDTR command */
int min_period;
int offset; int offset;
int max_offset;
unsigned int width:1; /* 0 - narrow, 1 - wide */ unsigned int width:1; /* 0 - narrow, 1 - wide */
unsigned int max_width:1;
unsigned int iu:1; /* Information Units enabled */ unsigned int iu:1; /* Information Units enabled */
unsigned int dt:1; /* DT clocking enabled */ unsigned int dt:1; /* DT clocking enabled */
unsigned int qas:1; /* Quick Arbitration and Selection enabled */ unsigned int qas:1; /* Quick Arbitration and Selection enabled */
@ -63,8 +66,11 @@ struct spi_host_attrs {
/* accessor functions */ /* accessor functions */
#define spi_period(x) (((struct spi_transport_attrs *)&(x)->starget_data)->period) #define spi_period(x) (((struct spi_transport_attrs *)&(x)->starget_data)->period)
#define spi_min_period(x) (((struct spi_transport_attrs *)&(x)->starget_data)->min_period)
#define spi_offset(x) (((struct spi_transport_attrs *)&(x)->starget_data)->offset) #define spi_offset(x) (((struct spi_transport_attrs *)&(x)->starget_data)->offset)
#define spi_max_offset(x) (((struct spi_transport_attrs *)&(x)->starget_data)->max_offset)
#define spi_width(x) (((struct spi_transport_attrs *)&(x)->starget_data)->width) #define spi_width(x) (((struct spi_transport_attrs *)&(x)->starget_data)->width)
#define spi_max_width(x) (((struct spi_transport_attrs *)&(x)->starget_data)->max_width)
#define spi_iu(x) (((struct spi_transport_attrs *)&(x)->starget_data)->iu) #define spi_iu(x) (((struct spi_transport_attrs *)&(x)->starget_data)->iu)
#define spi_dt(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dt) #define spi_dt(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dt)
#define spi_qas(x) (((struct spi_transport_attrs *)&(x)->starget_data)->qas) #define spi_qas(x) (((struct spi_transport_attrs *)&(x)->starget_data)->qas)

View File

@ -4243,7 +4243,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk)
/* No more Mr. Nice Guy. */ /* No more Mr. Nice Guy. */
if (dest_cpu == NR_CPUS) { if (dest_cpu == NR_CPUS) {
tsk->cpus_allowed = cpuset_cpus_allowed(tsk); cpus_setall(tsk->cpus_allowed);
dest_cpu = any_online_cpu(tsk->cpus_allowed); dest_cpu = any_online_cpu(tsk->cpus_allowed);
/* /*

View File

@ -294,7 +294,7 @@ EXPORT_SYMBOL(_spin_unlock_irq);
void __lockfunc _spin_unlock_bh(spinlock_t *lock) void __lockfunc _spin_unlock_bh(spinlock_t *lock)
{ {
_raw_spin_unlock(lock); _raw_spin_unlock(lock);
preempt_enable(); preempt_enable_no_resched();
local_bh_enable(); local_bh_enable();
} }
EXPORT_SYMBOL(_spin_unlock_bh); EXPORT_SYMBOL(_spin_unlock_bh);
@ -318,7 +318,7 @@ EXPORT_SYMBOL(_read_unlock_irq);
void __lockfunc _read_unlock_bh(rwlock_t *lock) void __lockfunc _read_unlock_bh(rwlock_t *lock)
{ {
_raw_read_unlock(lock); _raw_read_unlock(lock);
preempt_enable(); preempt_enable_no_resched();
local_bh_enable(); local_bh_enable();
} }
EXPORT_SYMBOL(_read_unlock_bh); EXPORT_SYMBOL(_read_unlock_bh);
@ -342,7 +342,7 @@ EXPORT_SYMBOL(_write_unlock_irq);
void __lockfunc _write_unlock_bh(rwlock_t *lock) void __lockfunc _write_unlock_bh(rwlock_t *lock)
{ {
_raw_write_unlock(lock); _raw_write_unlock(lock);
preempt_enable(); preempt_enable_no_resched();
local_bh_enable(); local_bh_enable();
} }
EXPORT_SYMBOL(_write_unlock_bh); EXPORT_SYMBOL(_write_unlock_bh);
@ -354,7 +354,7 @@ int __lockfunc _spin_trylock_bh(spinlock_t *lock)
if (_raw_spin_trylock(lock)) if (_raw_spin_trylock(lock))
return 1; return 1;
preempt_enable(); preempt_enable_no_resched();
local_bh_enable(); local_bh_enable();
return 0; return 0;
} }

View File

@ -1004,7 +1004,7 @@ __generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
if (pos < size) { if (pos < size) {
retval = generic_file_direct_IO(READ, iocb, retval = generic_file_direct_IO(READ, iocb,
iov, pos, nr_segs); iov, pos, nr_segs);
if (retval >= 0 && !is_sync_kiocb(iocb)) if (retval > 0 && !is_sync_kiocb(iocb))
retval = -EIOCBQUEUED; retval = -EIOCBQUEUED;
if (retval > 0) if (retval > 0)
*ppos = pos + retval; *ppos = pos + retval;

View File

@ -1302,37 +1302,40 @@ unsigned long
get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags) unsigned long pgoff, unsigned long flags)
{ {
if (flags & MAP_FIXED) { unsigned long ret;
unsigned long ret;
if (addr > TASK_SIZE - len) if (!(flags & MAP_FIXED)) {
return -ENOMEM; unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
if (addr & ~PAGE_MASK)
return -EINVAL; get_area = current->mm->get_unmapped_area;
if (file && is_file_hugepages(file)) { if (file && file->f_op && file->f_op->get_unmapped_area)
/* get_area = file->f_op->get_unmapped_area;
* Check if the given range is hugepage aligned, and addr = get_area(file, addr, len, pgoff, flags);
* can be made suitable for hugepages. if (IS_ERR_VALUE(addr))
*/ return addr;
ret = prepare_hugepage_range(addr, len);
} else {
/*
* Ensure that a normal request is not falling in a
* reserved hugepage range. For some archs like IA-64,
* there is a separate region for hugepages.
*/
ret = is_hugepage_only_range(current->mm, addr, len);
}
if (ret)
return -EINVAL;
return addr;
} }
if (file && file->f_op && file->f_op->get_unmapped_area) if (addr > TASK_SIZE - len)
return file->f_op->get_unmapped_area(file, addr, len, return -ENOMEM;
pgoff, flags); if (addr & ~PAGE_MASK)
return -EINVAL;
return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); if (file && is_file_hugepages(file)) {
/*
* Check if the given range is hugepage aligned, and
* can be made suitable for hugepages.
*/
ret = prepare_hugepage_range(addr, len);
} else {
/*
* Ensure that a normal request is not falling in a
* reserved hugepage range. For some archs like IA-64,
* there is a separate region for hugepages.
*/
ret = is_hugepage_only_range(current->mm, addr, len);
}
if (ret)
return -EINVAL;
return addr;
} }
EXPORT_SYMBOL(get_unmapped_area); EXPORT_SYMBOL(get_unmapped_area);

View File

@ -248,6 +248,28 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END); return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
} }
/* Caller must hold vmlist_lock */
struct vm_struct *__remove_vm_area(void *addr)
{
struct vm_struct **p, *tmp;
for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
if (tmp->addr == addr)
goto found;
}
return NULL;
found:
unmap_vm_area(tmp);
*p = tmp->next;
/*
* Remove the guard page.
*/
tmp->size -= PAGE_SIZE;
return tmp;
}
/** /**
* remove_vm_area - find and remove a contingous kernel virtual area * remove_vm_area - find and remove a contingous kernel virtual area
* *
@ -255,30 +277,15 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
* *
* Search for the kernel VM area starting at @addr, and remove it. * Search for the kernel VM area starting at @addr, and remove it.
* This function returns the found VM area, but using it is NOT safe * This function returns the found VM area, but using it is NOT safe
* on SMP machines. * on SMP machines, except for its size or flags.
*/ */
struct vm_struct *remove_vm_area(void *addr) struct vm_struct *remove_vm_area(void *addr)
{ {
struct vm_struct **p, *tmp; struct vm_struct *v;
write_lock(&vmlist_lock); write_lock(&vmlist_lock);
for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) { v = __remove_vm_area(addr);
if (tmp->addr == addr)
goto found;
}
write_unlock(&vmlist_lock); write_unlock(&vmlist_lock);
return NULL; return v;
found:
unmap_vm_area(tmp);
*p = tmp->next;
write_unlock(&vmlist_lock);
/*
* Remove the guard page.
*/
tmp->size -= PAGE_SIZE;
return tmp;
} }
void __vunmap(void *addr, int deallocate_pages) void __vunmap(void *addr, int deallocate_pages)

View File

@ -490,6 +490,14 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
/* Partially cloned skb? */ /* Partially cloned skb? */
if (skb_shared(frag)) if (skb_shared(frag))
goto slow_path; goto slow_path;
BUG_ON(frag->sk);
if (skb->sk) {
sock_hold(skb->sk);
frag->sk = skb->sk;
frag->destructor = sock_wfree;
skb->truesize -= frag->truesize;
}
} }
/* Everything is OK. Generate! */ /* Everything is OK. Generate! */

View File

@ -508,7 +508,6 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
rc = NF_ACCEPT; rc = NF_ACCEPT;
/* do not touch skb anymore */ /* do not touch skb anymore */
atomic_inc(&cp->in_pkts); atomic_inc(&cp->in_pkts);
__ip_vs_conn_put(cp);
goto out; goto out;
} }

View File

@ -940,37 +940,25 @@ void ip_ct_refresh_acct(struct ip_conntrack *ct,
struct sk_buff * struct sk_buff *
ip_ct_gather_frags(struct sk_buff *skb, u_int32_t user) ip_ct_gather_frags(struct sk_buff *skb, u_int32_t user)
{ {
struct sock *sk = skb->sk;
#ifdef CONFIG_NETFILTER_DEBUG #ifdef CONFIG_NETFILTER_DEBUG
unsigned int olddebug = skb->nf_debug; unsigned int olddebug = skb->nf_debug;
#endif #endif
if (sk) { skb_orphan(skb);
sock_hold(sk);
skb_orphan(skb);
}
local_bh_disable(); local_bh_disable();
skb = ip_defrag(skb, user); skb = ip_defrag(skb, user);
local_bh_enable(); local_bh_enable();
if (!skb) { if (skb) {
if (sk) ip_send_check(skb->nh.iph);
sock_put(sk); skb->nfcache |= NFC_ALTERED;
return skb;
}
if (sk) {
skb_set_owner_w(skb, sk);
sock_put(sk);
}
ip_send_check(skb->nh.iph);
skb->nfcache |= NFC_ALTERED;
#ifdef CONFIG_NETFILTER_DEBUG #ifdef CONFIG_NETFILTER_DEBUG
/* Packet path as if nothing had happened. */ /* Packet path as if nothing had happened. */
skb->nf_debug = olddebug; skb->nf_debug = olddebug;
#endif #endif
}
return skb; return skb;
} }

View File

@ -552,13 +552,17 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
skb_headroom(frag) < hlen) skb_headroom(frag) < hlen)
goto slow_path; goto slow_path;
/* Correct socket ownership. */
if (frag->sk == NULL)
goto slow_path;
/* Partially cloned skb? */ /* Partially cloned skb? */
if (skb_shared(frag)) if (skb_shared(frag))
goto slow_path; goto slow_path;
BUG_ON(frag->sk);
if (skb->sk) {
sock_hold(skb->sk);
frag->sk = skb->sk;
frag->destructor = sock_wfree;
skb->truesize -= frag->truesize;
}
} }
err = 0; err = 0;
@ -1116,12 +1120,10 @@ int ip6_push_pending_frames(struct sock *sk)
tail_skb = &(tmp_skb->next); tail_skb = &(tmp_skb->next);
skb->len += tmp_skb->len; skb->len += tmp_skb->len;
skb->data_len += tmp_skb->len; skb->data_len += tmp_skb->len;
#if 0 /* Logically correct, but useless work, ip_fragment() will have to undo */
skb->truesize += tmp_skb->truesize; skb->truesize += tmp_skb->truesize;
__sock_put(tmp_skb->sk); __sock_put(tmp_skb->sk);
tmp_skb->destructor = NULL; tmp_skb->destructor = NULL;
tmp_skb->sk = NULL; tmp_skb->sk = NULL;
#endif
} }
ipv6_addr_copy(final_dst, &fl->fl6_dst); ipv6_addr_copy(final_dst, &fl->fl6_dst);

View File

@ -735,11 +735,15 @@ static inline int do_one_broadcast(struct sock *sk,
sock_hold(sk); sock_hold(sk);
if (p->skb2 == NULL) { if (p->skb2 == NULL) {
if (atomic_read(&p->skb->users) != 1) { if (skb_shared(p->skb)) {
p->skb2 = skb_clone(p->skb, p->allocation); p->skb2 = skb_clone(p->skb, p->allocation);
} else { } else {
p->skb2 = p->skb; p->skb2 = skb_get(p->skb);
atomic_inc(&p->skb->users); /*
* skb ownership may have been set when
* delivered to a previous socket.
*/
skb_orphan(p->skb2);
} }
} }
if (p->skb2 == NULL) { if (p->skb2 == NULL) {
@ -785,11 +789,12 @@ int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
do_one_broadcast(sk, &info); do_one_broadcast(sk, &info);
kfree_skb(skb);
netlink_unlock_table(); netlink_unlock_table();
if (info.skb2) if (info.skb2)
kfree_skb(info.skb2); kfree_skb(info.skb2);
kfree_skb(skb);
if (info.delivered) { if (info.delivered) {
if (info.congested && (allocation & __GFP_WAIT)) if (info.congested && (allocation & __GFP_WAIT))

View File

@ -770,33 +770,12 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
err = path_lookup(sunaddr->sun_path, LOOKUP_PARENT, &nd); err = path_lookup(sunaddr->sun_path, LOOKUP_PARENT, &nd);
if (err) if (err)
goto out_mknod_parent; goto out_mknod_parent;
/*
* Yucky last component or no last component at all? dentry = lookup_create(&nd, 0);
* (foo/., foo/.., /////)
*/
err = -EEXIST;
if (nd.last_type != LAST_NORM)
goto out_mknod;
/*
* Lock the directory.
*/
down(&nd.dentry->d_inode->i_sem);
/*
* Do the final lookup.
*/
dentry = lookup_hash(&nd.last, nd.dentry);
err = PTR_ERR(dentry); err = PTR_ERR(dentry);
if (IS_ERR(dentry)) if (IS_ERR(dentry))
goto out_mknod_unlock; goto out_mknod_unlock;
err = -ENOENT;
/*
* Special case - lookup gave negative, but... we had foo/bar/
* From the vfs_mknod() POV we just have a negative dentry -
* all is fine. Let's be bastards - you had / on the end, you've
* been asking for (non-existent) directory. -ENOENT for you.
*/
if (nd.last.name[nd.last.len] && !dentry->d_inode)
goto out_mknod_dput;
/* /*
* All right, let's create it. * All right, let's create it.
*/ */
@ -845,7 +824,6 @@ out_mknod_dput:
dput(dentry); dput(dentry);
out_mknod_unlock: out_mknod_unlock:
up(&nd.dentry->d_inode->i_sem); up(&nd.dentry->d_inode->i_sem);
out_mknod:
path_release(&nd); path_release(&nd);
out_mknod_parent: out_mknod_parent:
if (err==-EEXIST) if (err==-EEXIST)

View File

@ -698,7 +698,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
return -ENOMEM; return -ENOMEM;
if (skb1->sk) if (skb1->sk)
skb_set_owner_w(skb, skb1->sk); skb_set_owner_w(skb2, skb1->sk);
/* Looking around. Are we still alive? /* Looking around. Are we still alive?
* OK, link new skb, drop old one */ * OK, link new skb, drop old one */

View File

@ -34,14 +34,21 @@ static int verify_one_alg(struct rtattr **xfrma, enum xfrm_attr_type_t type)
{ {
struct rtattr *rt = xfrma[type - 1]; struct rtattr *rt = xfrma[type - 1];
struct xfrm_algo *algp; struct xfrm_algo *algp;
int len;
if (!rt) if (!rt)
return 0; return 0;
if ((rt->rta_len - sizeof(*rt)) < sizeof(*algp)) len = (rt->rta_len - sizeof(*rt)) - sizeof(*algp);
if (len < 0)
return -EINVAL; return -EINVAL;
algp = RTA_DATA(rt); algp = RTA_DATA(rt);
len -= (algp->alg_key_len + 7U) / 8;
if (len < 0)
return -EINVAL;
switch (type) { switch (type) {
case XFRMA_ALG_AUTH: case XFRMA_ALG_AUTH:
if (!algp->alg_key_len && if (!algp->alg_key_len &&
@ -162,6 +169,7 @@ static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
struct rtattr *rta = u_arg; struct rtattr *rta = u_arg;
struct xfrm_algo *p, *ualg; struct xfrm_algo *p, *ualg;
struct xfrm_algo_desc *algo; struct xfrm_algo_desc *algo;
int len;
if (!rta) if (!rta)
return 0; return 0;
@ -173,11 +181,12 @@ static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
return -ENOSYS; return -ENOSYS;
*props = algo->desc.sadb_alg_id; *props = algo->desc.sadb_alg_id;
p = kmalloc(sizeof(*ualg) + ualg->alg_key_len, GFP_KERNEL); len = sizeof(*ualg) + (ualg->alg_key_len + 7U) / 8;
p = kmalloc(len, GFP_KERNEL);
if (!p) if (!p)
return -ENOMEM; return -ENOMEM;
memcpy(p, ualg, sizeof(*ualg) + ualg->alg_key_len); memcpy(p, ualg, len);
*algpp = p; *algpp = p;
return 0; return 0;
} }