Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc: (23 commits) [POWERPC] Remove leftover printk in isa-bridge.c [POWERPC] Remove duplicate #include [POWERPC] Initialize lockdep earlier [POWERPC] Document when printk is useable [POWERPC] Fix bogus paca->_current initialization [POWERPC] Fix of_i2c include for module compilation [POWERPC] Make default cputable entries reflect selected CPU family [POWERPC] spufs: lockdep annotations for spufs_dir_close [POWERPC] spufs: don't requeue victim contex in find_victim if it's not in spu_run [POWERPC] 4xx: Fix PCI mem in sequoia DTS [POWERPC] 4xx: Add endpoint support to 4xx PCIe driver [POWERPC] 4xx: Fix problem with new TLB storage attibute fields on 440x6 core [POWERPC] spufs: spu_create should send inotify IM_CREATE event [POWERPC] spufs: handle faults while the context switch pending flag is set [POWERPC] spufs: fix concurrent delivery of class 0 & 1 exceptions [POWERPC] spufs: try to route SPU interrupts to local node [POWERPC] spufs: set SPU_CONTEXT_SWITCH_PENDING before synchronising SPU irqs [POWERPC] spufs: don't acquire state_mutex interruptible while performing callback [POWERPC] spufs: update master runcntl with context lock held [POWERPC] spufs: fix post-stopped update of MFC_CNTL register ...
This commit is contained in:
commit
d9a9a23ff2
|
@ -342,9 +342,14 @@
|
||||||
/* Outbound ranges, one memory and one IO,
|
/* Outbound ranges, one memory and one IO,
|
||||||
* later cannot be changed. Chip supports a second
|
* later cannot be changed. Chip supports a second
|
||||||
* IO range but we don't use it for now
|
* IO range but we don't use it for now
|
||||||
|
* From the 440EPx user manual:
|
||||||
|
* PCI 1 Memory 1 8000 0000 1 BFFF FFFF 1GB
|
||||||
|
* I/O 1 E800 0000 1 E800 FFFF 64KB
|
||||||
|
* I/O 1 E880 0000 1 EBFF FFFF 56MB
|
||||||
*/
|
*/
|
||||||
ranges = <02000000 0 80000000 1 80000000 0 10000000
|
ranges = <02000000 0 80000000 1 80000000 0 40000000
|
||||||
01000000 0 00000000 1 e8000000 0 00100000>;
|
01000000 0 00000000 1 e8000000 0 00010000
|
||||||
|
01000000 0 00000000 1 e8800000 0 03800000>;
|
||||||
|
|
||||||
/* Inbound 2GB range starting at 0 */
|
/* Inbound 2GB range starting at 0 */
|
||||||
dma-ranges = <42000000 0 0 0 0 0 80000000>;
|
dma-ranges = <42000000 0 0 0 0 0 80000000>;
|
||||||
|
|
|
@ -16,7 +16,6 @@
|
||||||
#include <asm/mmu.h>
|
#include <asm/mmu.h>
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
#include <asm/prom.h>
|
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/udbg.h>
|
#include <asm/udbg.h>
|
||||||
|
|
||||||
|
|
|
@ -1208,6 +1208,18 @@ static struct cpu_spec __initdata cpu_specs[] = {
|
||||||
.machine_check = machine_check_4xx,
|
.machine_check = machine_check_4xx,
|
||||||
.platform = "ppc405",
|
.platform = "ppc405",
|
||||||
},
|
},
|
||||||
|
{ /* default match */
|
||||||
|
.pvr_mask = 0x00000000,
|
||||||
|
.pvr_value = 0x00000000,
|
||||||
|
.cpu_name = "(generic 40x PPC)",
|
||||||
|
.cpu_features = CPU_FTRS_40X,
|
||||||
|
.cpu_user_features = PPC_FEATURE_32 |
|
||||||
|
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
|
||||||
|
.icache_bsize = 32,
|
||||||
|
.dcache_bsize = 32,
|
||||||
|
.machine_check = machine_check_4xx,
|
||||||
|
.platform = "ppc405",
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_40x */
|
#endif /* CONFIG_40x */
|
||||||
#ifdef CONFIG_44x
|
#ifdef CONFIG_44x
|
||||||
|
@ -1421,8 +1433,18 @@ static struct cpu_spec __initdata cpu_specs[] = {
|
||||||
.machine_check = machine_check_440A,
|
.machine_check = machine_check_440A,
|
||||||
.platform = "ppc440",
|
.platform = "ppc440",
|
||||||
},
|
},
|
||||||
|
{ /* default match */
|
||||||
|
.pvr_mask = 0x00000000,
|
||||||
|
.pvr_value = 0x00000000,
|
||||||
|
.cpu_name = "(generic 44x PPC)",
|
||||||
|
.cpu_features = CPU_FTRS_44X,
|
||||||
|
.cpu_user_features = COMMON_USER_BOOKE,
|
||||||
|
.icache_bsize = 32,
|
||||||
|
.dcache_bsize = 32,
|
||||||
|
.machine_check = machine_check_4xx,
|
||||||
|
.platform = "ppc440",
|
||||||
|
}
|
||||||
#endif /* CONFIG_44x */
|
#endif /* CONFIG_44x */
|
||||||
#ifdef CONFIG_FSL_BOOKE
|
|
||||||
#ifdef CONFIG_E200
|
#ifdef CONFIG_E200
|
||||||
{ /* e200z5 */
|
{ /* e200z5 */
|
||||||
.pvr_mask = 0xfff00000,
|
.pvr_mask = 0xfff00000,
|
||||||
|
@ -1451,7 +1473,19 @@ static struct cpu_spec __initdata cpu_specs[] = {
|
||||||
.machine_check = machine_check_e200,
|
.machine_check = machine_check_e200,
|
||||||
.platform = "ppc5554",
|
.platform = "ppc5554",
|
||||||
},
|
},
|
||||||
#elif defined(CONFIG_E500)
|
{ /* default match */
|
||||||
|
.pvr_mask = 0x00000000,
|
||||||
|
.pvr_value = 0x00000000,
|
||||||
|
.cpu_name = "(generic E200 PPC)",
|
||||||
|
.cpu_features = CPU_FTRS_E200,
|
||||||
|
.cpu_user_features = COMMON_USER_BOOKE |
|
||||||
|
PPC_FEATURE_HAS_EFP_SINGLE |
|
||||||
|
PPC_FEATURE_UNIFIED_CACHE,
|
||||||
|
.dcache_bsize = 32,
|
||||||
|
.machine_check = machine_check_e200,
|
||||||
|
.platform = "ppc5554",
|
||||||
|
#endif /* CONFIG_E200 */
|
||||||
|
#ifdef CONFIG_E500
|
||||||
{ /* e500 */
|
{ /* e500 */
|
||||||
.pvr_mask = 0xffff0000,
|
.pvr_mask = 0xffff0000,
|
||||||
.pvr_value = 0x80200000,
|
.pvr_value = 0x80200000,
|
||||||
|
@ -1487,20 +1521,19 @@ static struct cpu_spec __initdata cpu_specs[] = {
|
||||||
.machine_check = machine_check_e500,
|
.machine_check = machine_check_e500,
|
||||||
.platform = "ppc8548",
|
.platform = "ppc8548",
|
||||||
},
|
},
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
#if !CLASSIC_PPC
|
|
||||||
{ /* default match */
|
{ /* default match */
|
||||||
.pvr_mask = 0x00000000,
|
.pvr_mask = 0x00000000,
|
||||||
.pvr_value = 0x00000000,
|
.pvr_value = 0x00000000,
|
||||||
.cpu_name = "(generic PPC)",
|
.cpu_name = "(generic E500 PPC)",
|
||||||
.cpu_features = CPU_FTRS_GENERIC_32,
|
.cpu_features = CPU_FTRS_E500,
|
||||||
.cpu_user_features = PPC_FEATURE_32,
|
.cpu_user_features = COMMON_USER_BOOKE |
|
||||||
|
PPC_FEATURE_HAS_SPE_COMP |
|
||||||
|
PPC_FEATURE_HAS_EFP_SINGLE_COMP,
|
||||||
.icache_bsize = 32,
|
.icache_bsize = 32,
|
||||||
.dcache_bsize = 32,
|
.dcache_bsize = 32,
|
||||||
|
.machine_check = machine_check_e500,
|
||||||
.platform = "powerpc",
|
.platform = "powerpc",
|
||||||
}
|
#endif /* CONFIG_E500 */
|
||||||
#endif /* !CLASSIC_PPC */
|
|
||||||
#endif /* CONFIG_PPC32 */
|
#endif /* CONFIG_PPC32 */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -653,7 +653,14 @@ finish_tlb_load:
|
||||||
rlwimi r10, r11, 0, 26, 26 /* UX = HWEXEC & USER */
|
rlwimi r10, r11, 0, 26, 26 /* UX = HWEXEC & USER */
|
||||||
|
|
||||||
rlwimi r12, r10, 0, 26, 31 /* Insert static perms */
|
rlwimi r12, r10, 0, 26, 31 /* Insert static perms */
|
||||||
rlwinm r12, r12, 0, 20, 15 /* Clear U0-U3 */
|
|
||||||
|
/*
|
||||||
|
* Clear U0-U3 and WL1 IL1I IL1D IL2I IL2D bits which are added
|
||||||
|
* on newer 440 cores like the 440x6 used on AMCC 460EX/460GT (see
|
||||||
|
* include/asm-powerpc/pgtable-ppc32.h for details).
|
||||||
|
*/
|
||||||
|
rlwinm r12, r12, 0, 20, 10
|
||||||
|
|
||||||
tlbwe r12, r13, PPC44x_TLB_ATTRIB /* Write ATTRIB */
|
tlbwe r12, r13, PPC44x_TLB_ATTRIB /* Write ATTRIB */
|
||||||
|
|
||||||
/* Done...restore registers and get out of here.
|
/* Done...restore registers and get out of here.
|
||||||
|
|
|
@ -1517,10 +1517,6 @@ _INIT_STATIC(start_here_multiplatform)
|
||||||
addi r2,r2,0x4000
|
addi r2,r2,0x4000
|
||||||
add r2,r2,r26
|
add r2,r2,r26
|
||||||
|
|
||||||
/* Set initial ptr to current */
|
|
||||||
LOAD_REG_IMMEDIATE(r4, init_task)
|
|
||||||
std r4,PACACURRENT(r13)
|
|
||||||
|
|
||||||
/* Do very early kernel initializations, including initial hash table,
|
/* Do very early kernel initializations, including initial hash table,
|
||||||
* stab and slb setup before we turn on relocation. */
|
* stab and slb setup before we turn on relocation. */
|
||||||
|
|
||||||
|
|
|
@ -108,9 +108,6 @@ static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node,
|
||||||
if (size > 0x10000)
|
if (size > 0x10000)
|
||||||
size = 0x10000;
|
size = 0x10000;
|
||||||
|
|
||||||
printk(KERN_ERR "no ISA IO ranges or unexpected isa range, "
|
|
||||||
"mapping 64k\n");
|
|
||||||
|
|
||||||
__ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE,
|
__ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE,
|
||||||
size, _PAGE_NO_CACHE|_PAGE_GUARDED);
|
size, _PAGE_NO_CACHE|_PAGE_GUARDED);
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -170,6 +170,8 @@ void __init setup_paca(int cpu)
|
||||||
|
|
||||||
void __init early_setup(unsigned long dt_ptr)
|
void __init early_setup(unsigned long dt_ptr)
|
||||||
{
|
{
|
||||||
|
/* -------- printk is _NOT_ safe to use here ! ------- */
|
||||||
|
|
||||||
/* Fill in any unititialised pacas */
|
/* Fill in any unititialised pacas */
|
||||||
initialise_pacas();
|
initialise_pacas();
|
||||||
|
|
||||||
|
@ -179,12 +181,14 @@ void __init early_setup(unsigned long dt_ptr)
|
||||||
/* Assume we're on cpu 0 for now. Don't write to the paca yet! */
|
/* Assume we're on cpu 0 for now. Don't write to the paca yet! */
|
||||||
setup_paca(0);
|
setup_paca(0);
|
||||||
|
|
||||||
/* Enable early debugging if any specified (see udbg.h) */
|
|
||||||
udbg_early_init();
|
|
||||||
|
|
||||||
/* Initialize lockdep early or else spinlocks will blow */
|
/* Initialize lockdep early or else spinlocks will blow */
|
||||||
lockdep_init();
|
lockdep_init();
|
||||||
|
|
||||||
|
/* -------- printk is now safe to use ------- */
|
||||||
|
|
||||||
|
/* Enable early debugging if any specified (see udbg.h) */
|
||||||
|
udbg_early_init();
|
||||||
|
|
||||||
DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);
|
DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -35,6 +35,7 @@
|
||||||
#include <linux/percpu.h>
|
#include <linux/percpu.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/ioport.h>
|
#include <linux/ioport.h>
|
||||||
|
#include <linux/kernel_stat.h>
|
||||||
|
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
|
@ -231,6 +232,54 @@ static int iic_host_match(struct irq_host *h, struct device_node *node)
|
||||||
"IBM,CBEA-Internal-Interrupt-Controller");
|
"IBM,CBEA-Internal-Interrupt-Controller");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extern int noirqdebug;
|
||||||
|
|
||||||
|
static void handle_iic_irq(unsigned int irq, struct irq_desc *desc)
|
||||||
|
{
|
||||||
|
const unsigned int cpu = smp_processor_id();
|
||||||
|
|
||||||
|
spin_lock(&desc->lock);
|
||||||
|
|
||||||
|
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we're currently running this IRQ, or its disabled,
|
||||||
|
* we shouldn't process the IRQ. Mark it pending, handle
|
||||||
|
* the necessary masking and go out
|
||||||
|
*/
|
||||||
|
if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) ||
|
||||||
|
!desc->action)) {
|
||||||
|
desc->status |= IRQ_PENDING;
|
||||||
|
goto out_eoi;
|
||||||
|
}
|
||||||
|
|
||||||
|
kstat_cpu(cpu).irqs[irq]++;
|
||||||
|
|
||||||
|
/* Mark the IRQ currently in progress.*/
|
||||||
|
desc->status |= IRQ_INPROGRESS;
|
||||||
|
|
||||||
|
do {
|
||||||
|
struct irqaction *action = desc->action;
|
||||||
|
irqreturn_t action_ret;
|
||||||
|
|
||||||
|
if (unlikely(!action))
|
||||||
|
goto out_eoi;
|
||||||
|
|
||||||
|
desc->status &= ~IRQ_PENDING;
|
||||||
|
spin_unlock(&desc->lock);
|
||||||
|
action_ret = handle_IRQ_event(irq, action);
|
||||||
|
if (!noirqdebug)
|
||||||
|
note_interrupt(irq, desc, action_ret);
|
||||||
|
spin_lock(&desc->lock);
|
||||||
|
|
||||||
|
} while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
|
||||||
|
|
||||||
|
desc->status &= ~IRQ_INPROGRESS;
|
||||||
|
out_eoi:
|
||||||
|
desc->chip->eoi(irq);
|
||||||
|
spin_unlock(&desc->lock);
|
||||||
|
}
|
||||||
|
|
||||||
static int iic_host_map(struct irq_host *h, unsigned int virq,
|
static int iic_host_map(struct irq_host *h, unsigned int virq,
|
||||||
irq_hw_number_t hw)
|
irq_hw_number_t hw)
|
||||||
{
|
{
|
||||||
|
@ -240,10 +289,10 @@ static int iic_host_map(struct irq_host *h, unsigned int virq,
|
||||||
break;
|
break;
|
||||||
case IIC_IRQ_TYPE_IOEXC:
|
case IIC_IRQ_TYPE_IOEXC:
|
||||||
set_irq_chip_and_handler(virq, &iic_ioexc_chip,
|
set_irq_chip_and_handler(virq, &iic_ioexc_chip,
|
||||||
handle_fasteoi_irq);
|
handle_iic_irq);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
set_irq_chip_and_handler(virq, &iic_chip, handle_fasteoi_irq);
|
set_irq_chip_and_handler(virq, &iic_chip, handle_iic_irq);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -141,6 +141,10 @@ static void spu_restart_dma(struct spu *spu)
|
||||||
|
|
||||||
if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
|
if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
|
||||||
out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
|
out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
|
||||||
|
else {
|
||||||
|
set_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags);
|
||||||
|
mb();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb)
|
static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb)
|
||||||
|
@ -226,11 +230,13 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
spu->class_0_pending = 0;
|
spu->class_1_dar = ea;
|
||||||
spu->dar = ea;
|
spu->class_1_dsisr = dsisr;
|
||||||
spu->dsisr = dsisr;
|
|
||||||
|
|
||||||
spu->stop_callback(spu);
|
spu->stop_callback(spu, 1);
|
||||||
|
|
||||||
|
spu->class_1_dar = 0;
|
||||||
|
spu->class_1_dsisr = 0;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -318,11 +324,15 @@ spu_irq_class_0(int irq, void *data)
|
||||||
stat = spu_int_stat_get(spu, 0) & mask;
|
stat = spu_int_stat_get(spu, 0) & mask;
|
||||||
|
|
||||||
spu->class_0_pending |= stat;
|
spu->class_0_pending |= stat;
|
||||||
spu->dsisr = spu_mfc_dsisr_get(spu);
|
spu->class_0_dsisr = spu_mfc_dsisr_get(spu);
|
||||||
spu->dar = spu_mfc_dar_get(spu);
|
spu->class_0_dar = spu_mfc_dar_get(spu);
|
||||||
spin_unlock(&spu->register_lock);
|
spin_unlock(&spu->register_lock);
|
||||||
|
|
||||||
spu->stop_callback(spu);
|
spu->stop_callback(spu, 0);
|
||||||
|
|
||||||
|
spu->class_0_pending = 0;
|
||||||
|
spu->class_0_dsisr = 0;
|
||||||
|
spu->class_0_dar = 0;
|
||||||
|
|
||||||
spu_int_stat_clear(spu, 0, stat);
|
spu_int_stat_clear(spu, 0, stat);
|
||||||
|
|
||||||
|
@ -363,6 +373,9 @@ spu_irq_class_1(int irq, void *data)
|
||||||
if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR)
|
if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR)
|
||||||
;
|
;
|
||||||
|
|
||||||
|
spu->class_1_dsisr = 0;
|
||||||
|
spu->class_1_dar = 0;
|
||||||
|
|
||||||
return stat ? IRQ_HANDLED : IRQ_NONE;
|
return stat ? IRQ_HANDLED : IRQ_NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -396,10 +409,10 @@ spu_irq_class_2(int irq, void *data)
|
||||||
spu->ibox_callback(spu);
|
spu->ibox_callback(spu);
|
||||||
|
|
||||||
if (stat & CLASS2_SPU_STOP_INTR)
|
if (stat & CLASS2_SPU_STOP_INTR)
|
||||||
spu->stop_callback(spu);
|
spu->stop_callback(spu, 2);
|
||||||
|
|
||||||
if (stat & CLASS2_SPU_HALT_INTR)
|
if (stat & CLASS2_SPU_HALT_INTR)
|
||||||
spu->stop_callback(spu);
|
spu->stop_callback(spu, 2);
|
||||||
|
|
||||||
if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR)
|
if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR)
|
||||||
spu->mfc_callback(spu);
|
spu->mfc_callback(spu);
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
#include <linux/mutex.h>
|
#include <linux/mutex.h>
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
|
#include <linux/sched.h>
|
||||||
|
|
||||||
#include <asm/spu.h>
|
#include <asm/spu.h>
|
||||||
#include <asm/spu_priv1.h>
|
#include <asm/spu_priv1.h>
|
||||||
|
@ -75,8 +76,19 @@ static u64 int_stat_get(struct spu *spu, int class)
|
||||||
|
|
||||||
static void cpu_affinity_set(struct spu *spu, int cpu)
|
static void cpu_affinity_set(struct spu *spu, int cpu)
|
||||||
{
|
{
|
||||||
u64 target = iic_get_target_id(cpu);
|
u64 target;
|
||||||
u64 route = target << 48 | target << 32 | target << 16;
|
u64 route;
|
||||||
|
|
||||||
|
if (nr_cpus_node(spu->node)) {
|
||||||
|
cpumask_t spumask = node_to_cpumask(spu->node);
|
||||||
|
cpumask_t cpumask = node_to_cpumask(cpu_to_node(cpu));
|
||||||
|
|
||||||
|
if (!cpus_intersects(spumask, cpumask))
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
target = iic_get_target_id(cpu);
|
||||||
|
route = target << 48 | target << 32 | target << 16;
|
||||||
out_be64(&spu->priv1->int_route_RW, route);
|
out_be64(&spu->priv1->int_route_RW, route);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -83,13 +83,18 @@ int spufs_handle_class0(struct spu_context *ctx)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (stat & CLASS0_DMA_ALIGNMENT_INTR)
|
if (stat & CLASS0_DMA_ALIGNMENT_INTR)
|
||||||
spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_DMA_ALIGNMENT);
|
spufs_handle_event(ctx, ctx->csa.class_0_dar,
|
||||||
|
SPE_EVENT_DMA_ALIGNMENT);
|
||||||
|
|
||||||
if (stat & CLASS0_INVALID_DMA_COMMAND_INTR)
|
if (stat & CLASS0_INVALID_DMA_COMMAND_INTR)
|
||||||
spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_INVALID_DMA);
|
spufs_handle_event(ctx, ctx->csa.class_0_dar,
|
||||||
|
SPE_EVENT_INVALID_DMA);
|
||||||
|
|
||||||
if (stat & CLASS0_SPU_ERROR_INTR)
|
if (stat & CLASS0_SPU_ERROR_INTR)
|
||||||
spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_SPE_ERROR);
|
spufs_handle_event(ctx, ctx->csa.class_0_dar,
|
||||||
|
SPE_EVENT_SPE_ERROR);
|
||||||
|
|
||||||
|
ctx->csa.class_0_pending = 0;
|
||||||
|
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
@ -119,8 +124,8 @@ int spufs_handle_class1(struct spu_context *ctx)
|
||||||
* in time, we can still expect to get the same fault
|
* in time, we can still expect to get the same fault
|
||||||
* the immediately after the context restore.
|
* the immediately after the context restore.
|
||||||
*/
|
*/
|
||||||
ea = ctx->csa.dar;
|
ea = ctx->csa.class_1_dar;
|
||||||
dsisr = ctx->csa.dsisr;
|
dsisr = ctx->csa.class_1_dsisr;
|
||||||
|
|
||||||
if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)))
|
if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)))
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -158,7 +163,7 @@ int spufs_handle_class1(struct spu_context *ctx)
|
||||||
* time slicing will not preempt the context while the page fault
|
* time slicing will not preempt the context while the page fault
|
||||||
* handler is running. Context switch code removes mappings.
|
* handler is running. Context switch code removes mappings.
|
||||||
*/
|
*/
|
||||||
ctx->csa.dar = ctx->csa.dsisr = 0;
|
ctx->csa.class_1_dar = ctx->csa.class_1_dsisr = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we handled the fault successfully and are in runnable
|
* If we handled the fault successfully and are in runnable
|
||||||
|
|
|
@ -23,6 +23,7 @@
|
||||||
|
|
||||||
#include <linux/file.h>
|
#include <linux/file.h>
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
|
#include <linux/fsnotify.h>
|
||||||
#include <linux/backing-dev.h>
|
#include <linux/backing-dev.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/ioctl.h>
|
#include <linux/ioctl.h>
|
||||||
|
@ -223,7 +224,7 @@ static int spufs_dir_close(struct inode *inode, struct file *file)
|
||||||
parent = dir->d_parent->d_inode;
|
parent = dir->d_parent->d_inode;
|
||||||
ctx = SPUFS_I(dir->d_inode)->i_ctx;
|
ctx = SPUFS_I(dir->d_inode)->i_ctx;
|
||||||
|
|
||||||
mutex_lock(&parent->i_mutex);
|
mutex_lock_nested(&parent->i_mutex, I_MUTEX_PARENT);
|
||||||
ret = spufs_rmdir(parent, dir);
|
ret = spufs_rmdir(parent, dir);
|
||||||
mutex_unlock(&parent->i_mutex);
|
mutex_unlock(&parent->i_mutex);
|
||||||
WARN_ON(ret);
|
WARN_ON(ret);
|
||||||
|
@ -618,12 +619,15 @@ long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode,
|
||||||
mode &= ~current->fs->umask;
|
mode &= ~current->fs->umask;
|
||||||
|
|
||||||
if (flags & SPU_CREATE_GANG)
|
if (flags & SPU_CREATE_GANG)
|
||||||
return spufs_create_gang(nd->path.dentry->d_inode,
|
ret = spufs_create_gang(nd->path.dentry->d_inode,
|
||||||
dentry, nd->path.mnt, mode);
|
dentry, nd->path.mnt, mode);
|
||||||
else
|
else
|
||||||
return spufs_create_context(nd->path.dentry->d_inode,
|
ret = spufs_create_context(nd->path.dentry->d_inode,
|
||||||
dentry, nd->path.mnt, flags, mode,
|
dentry, nd->path.mnt, flags, mode,
|
||||||
filp);
|
filp);
|
||||||
|
if (ret >= 0)
|
||||||
|
fsnotify_mkdir(nd->path.dentry->d_inode, dentry);
|
||||||
|
return ret;
|
||||||
|
|
||||||
out_dput:
|
out_dput:
|
||||||
dput(dentry);
|
dput(dentry);
|
||||||
|
|
|
@ -11,7 +11,7 @@
|
||||||
#include "spufs.h"
|
#include "spufs.h"
|
||||||
|
|
||||||
/* interrupt-level stop callback function. */
|
/* interrupt-level stop callback function. */
|
||||||
void spufs_stop_callback(struct spu *spu)
|
void spufs_stop_callback(struct spu *spu, int irq)
|
||||||
{
|
{
|
||||||
struct spu_context *ctx = spu->ctx;
|
struct spu_context *ctx = spu->ctx;
|
||||||
|
|
||||||
|
@ -24,9 +24,19 @@ void spufs_stop_callback(struct spu *spu)
|
||||||
*/
|
*/
|
||||||
if (ctx) {
|
if (ctx) {
|
||||||
/* Copy exception arguments into module specific structure */
|
/* Copy exception arguments into module specific structure */
|
||||||
ctx->csa.class_0_pending = spu->class_0_pending;
|
switch(irq) {
|
||||||
ctx->csa.dsisr = spu->dsisr;
|
case 0 :
|
||||||
ctx->csa.dar = spu->dar;
|
ctx->csa.class_0_pending = spu->class_0_pending;
|
||||||
|
ctx->csa.class_0_dsisr = spu->class_0_dsisr;
|
||||||
|
ctx->csa.class_0_dar = spu->class_0_dar;
|
||||||
|
break;
|
||||||
|
case 1 :
|
||||||
|
ctx->csa.class_1_dsisr = spu->class_1_dsisr;
|
||||||
|
ctx->csa.class_1_dar = spu->class_1_dar;
|
||||||
|
break;
|
||||||
|
case 2 :
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
/* ensure that the exception status has hit memory before a
|
/* ensure that the exception status has hit memory before a
|
||||||
* thread waiting on the context's stop queue is woken */
|
* thread waiting on the context's stop queue is woken */
|
||||||
|
@ -34,11 +44,6 @@ void spufs_stop_callback(struct spu *spu)
|
||||||
|
|
||||||
wake_up_all(&ctx->stop_wq);
|
wake_up_all(&ctx->stop_wq);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Clear callback arguments from spu structure */
|
|
||||||
spu->class_0_pending = 0;
|
|
||||||
spu->dsisr = 0;
|
|
||||||
spu->dar = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int spu_stopped(struct spu_context *ctx, u32 *stat)
|
int spu_stopped(struct spu_context *ctx, u32 *stat)
|
||||||
|
@ -56,7 +61,11 @@ int spu_stopped(struct spu_context *ctx, u32 *stat)
|
||||||
if (!(*stat & SPU_STATUS_RUNNING) && (*stat & stopped))
|
if (!(*stat & SPU_STATUS_RUNNING) && (*stat & stopped))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
dsisr = ctx->csa.dsisr;
|
dsisr = ctx->csa.class_0_dsisr;
|
||||||
|
if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
dsisr = ctx->csa.class_1_dsisr;
|
||||||
if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))
|
if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
|
@ -294,7 +303,7 @@ static int spu_process_callback(struct spu_context *ctx)
|
||||||
u32 ls_pointer, npc;
|
u32 ls_pointer, npc;
|
||||||
void __iomem *ls;
|
void __iomem *ls;
|
||||||
long spu_ret;
|
long spu_ret;
|
||||||
int ret, ret2;
|
int ret;
|
||||||
|
|
||||||
/* get syscall block from local store */
|
/* get syscall block from local store */
|
||||||
npc = ctx->ops->npc_read(ctx) & ~3;
|
npc = ctx->ops->npc_read(ctx) & ~3;
|
||||||
|
@ -316,11 +325,9 @@ static int spu_process_callback(struct spu_context *ctx)
|
||||||
if (spu_ret <= -ERESTARTSYS) {
|
if (spu_ret <= -ERESTARTSYS) {
|
||||||
ret = spu_handle_restartsys(ctx, &spu_ret, &npc);
|
ret = spu_handle_restartsys(ctx, &spu_ret, &npc);
|
||||||
}
|
}
|
||||||
ret2 = spu_acquire(ctx);
|
mutex_lock(&ctx->state_mutex);
|
||||||
if (ret == -ERESTARTSYS)
|
if (ret == -ERESTARTSYS)
|
||||||
return ret;
|
return ret;
|
||||||
if (ret2)
|
|
||||||
return -EINTR;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* need to re-get the ls, as it may have changed when we released the
|
/* need to re-get the ls, as it may have changed when we released the
|
||||||
|
@ -343,13 +350,14 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
|
||||||
if (mutex_lock_interruptible(&ctx->run_mutex))
|
if (mutex_lock_interruptible(&ctx->run_mutex))
|
||||||
return -ERESTARTSYS;
|
return -ERESTARTSYS;
|
||||||
|
|
||||||
spu_enable_spu(ctx);
|
|
||||||
ctx->event_return = 0;
|
ctx->event_return = 0;
|
||||||
|
|
||||||
ret = spu_acquire(ctx);
|
ret = spu_acquire(ctx);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
|
spu_enable_spu(ctx);
|
||||||
|
|
||||||
spu_update_sched_info(ctx);
|
spu_update_sched_info(ctx);
|
||||||
|
|
||||||
ret = spu_run_init(ctx, npc);
|
ret = spu_run_init(ctx, npc);
|
||||||
|
|
|
@ -140,6 +140,9 @@ void __spu_update_sched_info(struct spu_context *ctx)
|
||||||
* if it is timesliced or preempted.
|
* if it is timesliced or preempted.
|
||||||
*/
|
*/
|
||||||
ctx->cpus_allowed = current->cpus_allowed;
|
ctx->cpus_allowed = current->cpus_allowed;
|
||||||
|
|
||||||
|
/* Save the current cpu id for spu interrupt routing. */
|
||||||
|
ctx->last_ran = raw_smp_processor_id();
|
||||||
}
|
}
|
||||||
|
|
||||||
void spu_update_sched_info(struct spu_context *ctx)
|
void spu_update_sched_info(struct spu_context *ctx)
|
||||||
|
@ -243,7 +246,6 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
|
||||||
spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0);
|
spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0);
|
||||||
spu_restore(&ctx->csa, spu);
|
spu_restore(&ctx->csa, spu);
|
||||||
spu->timestamp = jiffies;
|
spu->timestamp = jiffies;
|
||||||
spu_cpu_affinity_set(spu, raw_smp_processor_id());
|
|
||||||
spu_switch_notify(spu, ctx);
|
spu_switch_notify(spu, ctx);
|
||||||
ctx->state = SPU_STATE_RUNNABLE;
|
ctx->state = SPU_STATE_RUNNABLE;
|
||||||
|
|
||||||
|
@ -657,7 +659,8 @@ static struct spu *find_victim(struct spu_context *ctx)
|
||||||
|
|
||||||
victim->stats.invol_ctx_switch++;
|
victim->stats.invol_ctx_switch++;
|
||||||
spu->stats.invol_ctx_switch++;
|
spu->stats.invol_ctx_switch++;
|
||||||
spu_add_to_rq(victim);
|
if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
|
||||||
|
spu_add_to_rq(victim);
|
||||||
|
|
||||||
mutex_unlock(&victim->state_mutex);
|
mutex_unlock(&victim->state_mutex);
|
||||||
|
|
||||||
|
|
|
@ -121,6 +121,7 @@ struct spu_context {
|
||||||
cpumask_t cpus_allowed;
|
cpumask_t cpus_allowed;
|
||||||
int policy;
|
int policy;
|
||||||
int prio;
|
int prio;
|
||||||
|
int last_ran;
|
||||||
|
|
||||||
/* statistics */
|
/* statistics */
|
||||||
struct {
|
struct {
|
||||||
|
@ -331,7 +332,7 @@ size_t spu_ibox_read(struct spu_context *ctx, u32 *data);
|
||||||
/* irq callback funcs. */
|
/* irq callback funcs. */
|
||||||
void spufs_ibox_callback(struct spu *spu);
|
void spufs_ibox_callback(struct spu *spu);
|
||||||
void spufs_wbox_callback(struct spu *spu);
|
void spufs_wbox_callback(struct spu *spu);
|
||||||
void spufs_stop_callback(struct spu *spu);
|
void spufs_stop_callback(struct spu *spu, int irq);
|
||||||
void spufs_mfc_callback(struct spu *spu);
|
void spufs_mfc_callback(struct spu *spu);
|
||||||
void spufs_dma_callback(struct spu *spu, int type);
|
void spufs_dma_callback(struct spu *spu, int type);
|
||||||
|
|
||||||
|
|
|
@ -132,6 +132,14 @@ static inline void disable_interrupts(struct spu_state *csa, struct spu *spu)
|
||||||
spu_int_mask_set(spu, 2, 0ul);
|
spu_int_mask_set(spu, 2, 0ul);
|
||||||
eieio();
|
eieio();
|
||||||
spin_unlock_irq(&spu->register_lock);
|
spin_unlock_irq(&spu->register_lock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This flag needs to be set before calling synchronize_irq so
|
||||||
|
* that the update will be visible to the relevant handlers
|
||||||
|
* via a simple load.
|
||||||
|
*/
|
||||||
|
set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
|
||||||
|
clear_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags);
|
||||||
synchronize_irq(spu->irqs[0]);
|
synchronize_irq(spu->irqs[0]);
|
||||||
synchronize_irq(spu->irqs[1]);
|
synchronize_irq(spu->irqs[1]);
|
||||||
synchronize_irq(spu->irqs[2]);
|
synchronize_irq(spu->irqs[2]);
|
||||||
|
@ -166,9 +174,8 @@ static inline void set_switch_pending(struct spu_state *csa, struct spu *spu)
|
||||||
/* Save, Step 7:
|
/* Save, Step 7:
|
||||||
* Restore, Step 5:
|
* Restore, Step 5:
|
||||||
* Set a software context switch pending flag.
|
* Set a software context switch pending flag.
|
||||||
|
* Done above in Step 3 - disable_interrupts().
|
||||||
*/
|
*/
|
||||||
set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
|
|
||||||
mb();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu)
|
static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu)
|
||||||
|
@ -186,20 +193,21 @@ static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu)
|
||||||
MFC_CNTL_SUSPEND_COMPLETE);
|
MFC_CNTL_SUSPEND_COMPLETE);
|
||||||
/* fall through */
|
/* fall through */
|
||||||
case MFC_CNTL_SUSPEND_COMPLETE:
|
case MFC_CNTL_SUSPEND_COMPLETE:
|
||||||
if (csa) {
|
if (csa)
|
||||||
csa->priv2.mfc_control_RW =
|
csa->priv2.mfc_control_RW =
|
||||||
MFC_CNTL_SUSPEND_MASK |
|
in_be64(&priv2->mfc_control_RW) |
|
||||||
MFC_CNTL_SUSPEND_DMA_QUEUE;
|
MFC_CNTL_SUSPEND_DMA_QUEUE;
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
case MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION:
|
case MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION:
|
||||||
out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
|
out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
|
||||||
POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
|
POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
|
||||||
MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
|
MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
|
||||||
MFC_CNTL_SUSPEND_COMPLETE);
|
MFC_CNTL_SUSPEND_COMPLETE);
|
||||||
if (csa) {
|
if (csa)
|
||||||
csa->priv2.mfc_control_RW = 0;
|
csa->priv2.mfc_control_RW =
|
||||||
}
|
in_be64(&priv2->mfc_control_RW) &
|
||||||
|
~MFC_CNTL_SUSPEND_DMA_QUEUE &
|
||||||
|
~MFC_CNTL_SUSPEND_MASK;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -249,16 +257,21 @@ static inline void save_spu_status(struct spu_state *csa, struct spu *spu)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void save_mfc_decr(struct spu_state *csa, struct spu *spu)
|
static inline void save_mfc_stopped_status(struct spu_state *csa,
|
||||||
|
struct spu *spu)
|
||||||
{
|
{
|
||||||
struct spu_priv2 __iomem *priv2 = spu->priv2;
|
struct spu_priv2 __iomem *priv2 = spu->priv2;
|
||||||
|
const u64 mask = MFC_CNTL_DECREMENTER_RUNNING |
|
||||||
|
MFC_CNTL_DMA_QUEUES_EMPTY;
|
||||||
|
|
||||||
/* Save, Step 12:
|
/* Save, Step 12:
|
||||||
* Read MFC_CNTL[Ds]. Update saved copy of
|
* Read MFC_CNTL[Ds]. Update saved copy of
|
||||||
* CSA.MFC_CNTL[Ds].
|
* CSA.MFC_CNTL[Ds].
|
||||||
|
*
|
||||||
|
* update: do the same with MFC_CNTL[Q].
|
||||||
*/
|
*/
|
||||||
csa->priv2.mfc_control_RW |=
|
csa->priv2.mfc_control_RW &= ~mask;
|
||||||
in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DECREMENTER_RUNNING;
|
csa->priv2.mfc_control_RW |= in_be64(&priv2->mfc_control_RW) & mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu)
|
static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu)
|
||||||
|
@ -462,7 +475,9 @@ static inline void purge_mfc_queue(struct spu_state *csa, struct spu *spu)
|
||||||
* Restore, Step 14.
|
* Restore, Step 14.
|
||||||
* Write MFC_CNTL[Pc]=1 (purge queue).
|
* Write MFC_CNTL[Pc]=1 (purge queue).
|
||||||
*/
|
*/
|
||||||
out_be64(&priv2->mfc_control_RW, MFC_CNTL_PURGE_DMA_REQUEST);
|
out_be64(&priv2->mfc_control_RW,
|
||||||
|
MFC_CNTL_PURGE_DMA_REQUEST |
|
||||||
|
MFC_CNTL_SUSPEND_MASK);
|
||||||
eieio();
|
eieio();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -725,10 +740,14 @@ static inline void set_switch_active(struct spu_state *csa, struct spu *spu)
|
||||||
/* Save, Step 48:
|
/* Save, Step 48:
|
||||||
* Restore, Step 23.
|
* Restore, Step 23.
|
||||||
* Change the software context switch pending flag
|
* Change the software context switch pending flag
|
||||||
* to context switch active.
|
* to context switch active. This implementation does
|
||||||
|
* not uses a switch active flag.
|
||||||
*
|
*
|
||||||
* This implementation does not uses a switch active flag.
|
* Now that we have saved the mfc in the csa, we can add in the
|
||||||
|
* restart command if an exception occurred.
|
||||||
*/
|
*/
|
||||||
|
if (test_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags))
|
||||||
|
csa->priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND;
|
||||||
clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
|
clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
|
||||||
mb();
|
mb();
|
||||||
}
|
}
|
||||||
|
@ -1690,6 +1709,13 @@ static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu)
|
||||||
eieio();
|
eieio();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void set_int_route(struct spu_state *csa, struct spu *spu)
|
||||||
|
{
|
||||||
|
struct spu_context *ctx = spu->ctx;
|
||||||
|
|
||||||
|
spu_cpu_affinity_set(spu, ctx->last_ran);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void restore_other_spu_access(struct spu_state *csa,
|
static inline void restore_other_spu_access(struct spu_state *csa,
|
||||||
struct spu *spu)
|
struct spu *spu)
|
||||||
{
|
{
|
||||||
|
@ -1721,15 +1747,15 @@ static inline void restore_mfc_cntl(struct spu_state *csa, struct spu *spu)
|
||||||
*/
|
*/
|
||||||
out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW);
|
out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW);
|
||||||
eieio();
|
eieio();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FIXME: this is to restart a DMA that we were processing
|
* The queue is put back into the same state that was evident prior to
|
||||||
* before the save. better remember the fault information
|
* the context switch. The suspend flag is added to the saved state in
|
||||||
* in the csa instead.
|
* the csa, if the operational state was suspending or suspended. In
|
||||||
|
* this case, the code that suspended the mfc is responsible for
|
||||||
|
* continuing it. Note that SPE faults do not change the operational
|
||||||
|
* state of the spu.
|
||||||
*/
|
*/
|
||||||
if ((csa->priv2.mfc_control_RW & MFC_CNTL_SUSPEND_DMA_QUEUE_MASK)) {
|
|
||||||
out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
|
|
||||||
eieio();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void enable_user_access(struct spu_state *csa, struct spu *spu)
|
static inline void enable_user_access(struct spu_state *csa, struct spu *spu)
|
||||||
|
@ -1788,7 +1814,7 @@ static int quiece_spu(struct spu_state *prev, struct spu *spu)
|
||||||
save_spu_runcntl(prev, spu); /* Step 9. */
|
save_spu_runcntl(prev, spu); /* Step 9. */
|
||||||
save_mfc_sr1(prev, spu); /* Step 10. */
|
save_mfc_sr1(prev, spu); /* Step 10. */
|
||||||
save_spu_status(prev, spu); /* Step 11. */
|
save_spu_status(prev, spu); /* Step 11. */
|
||||||
save_mfc_decr(prev, spu); /* Step 12. */
|
save_mfc_stopped_status(prev, spu); /* Step 12. */
|
||||||
halt_mfc_decr(prev, spu); /* Step 13. */
|
halt_mfc_decr(prev, spu); /* Step 13. */
|
||||||
save_timebase(prev, spu); /* Step 14. */
|
save_timebase(prev, spu); /* Step 14. */
|
||||||
remove_other_spu_access(prev, spu); /* Step 15. */
|
remove_other_spu_access(prev, spu); /* Step 15. */
|
||||||
|
@ -2000,6 +2026,7 @@ static void restore_csa(struct spu_state *next, struct spu *spu)
|
||||||
check_ppuint_mb_stat(next, spu); /* Step 67. */
|
check_ppuint_mb_stat(next, spu); /* Step 67. */
|
||||||
spu_invalidate_slbs(spu); /* Modified Step 68. */
|
spu_invalidate_slbs(spu); /* Modified Step 68. */
|
||||||
restore_mfc_sr1(next, spu); /* Step 69. */
|
restore_mfc_sr1(next, spu); /* Step 69. */
|
||||||
|
set_int_route(next, spu); /* NEW */
|
||||||
restore_other_spu_access(next, spu); /* Step 70. */
|
restore_other_spu_access(next, spu); /* Step 70. */
|
||||||
restore_spu_runcntl(next, spu); /* Step 71. */
|
restore_spu_runcntl(next, spu); /* Step 71. */
|
||||||
restore_mfc_cntl(next, spu); /* Step 72. */
|
restore_mfc_cntl(next, spu); /* Step 72. */
|
||||||
|
|
|
@ -1387,28 +1387,59 @@ static void __init ppc4xx_configure_pciex_PIMs(struct ppc4xx_pciex_port *port,
|
||||||
resource_size_t size = res->end - res->start + 1;
|
resource_size_t size = res->end - res->start + 1;
|
||||||
u64 sa;
|
u64 sa;
|
||||||
|
|
||||||
/* Calculate window size */
|
if (port->endpoint) {
|
||||||
sa = (0xffffffffffffffffull << ilog2(size));;
|
resource_size_t ep_addr = 0;
|
||||||
if (res->flags & IORESOURCE_PREFETCH)
|
resource_size_t ep_size = 32 << 20;
|
||||||
sa |= 0x8;
|
|
||||||
|
|
||||||
out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa));
|
/* Currently we map a fixed 64MByte window to PLB address
|
||||||
out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa));
|
* 0 (SDRAM). This should probably be configurable via a dts
|
||||||
|
* property.
|
||||||
|
*/
|
||||||
|
|
||||||
/* The setup of the split looks weird to me ... let's see if it works */
|
/* Calculate window size */
|
||||||
out_le32(mbase + PECFG_PIM0LAL, 0x00000000);
|
sa = (0xffffffffffffffffull << ilog2(ep_size));;
|
||||||
out_le32(mbase + PECFG_PIM0LAH, 0x00000000);
|
|
||||||
out_le32(mbase + PECFG_PIM1LAL, 0x00000000);
|
/* Setup BAR0 */
|
||||||
out_le32(mbase + PECFG_PIM1LAH, 0x00000000);
|
out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa));
|
||||||
out_le32(mbase + PECFG_PIM01SAH, 0xffff0000);
|
out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa) |
|
||||||
out_le32(mbase + PECFG_PIM01SAL, 0x00000000);
|
PCI_BASE_ADDRESS_MEM_TYPE_64);
|
||||||
|
|
||||||
|
/* Disable BAR1 & BAR2 */
|
||||||
|
out_le32(mbase + PECFG_BAR1MPA, 0);
|
||||||
|
out_le32(mbase + PECFG_BAR2HMPA, 0);
|
||||||
|
out_le32(mbase + PECFG_BAR2LMPA, 0);
|
||||||
|
|
||||||
|
out_le32(mbase + PECFG_PIM01SAH, RES_TO_U32_HIGH(sa));
|
||||||
|
out_le32(mbase + PECFG_PIM01SAL, RES_TO_U32_LOW(sa));
|
||||||
|
|
||||||
|
out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(ep_addr));
|
||||||
|
out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(ep_addr));
|
||||||
|
} else {
|
||||||
|
/* Calculate window size */
|
||||||
|
sa = (0xffffffffffffffffull << ilog2(size));;
|
||||||
|
if (res->flags & IORESOURCE_PREFETCH)
|
||||||
|
sa |= 0x8;
|
||||||
|
|
||||||
|
out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa));
|
||||||
|
out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa));
|
||||||
|
|
||||||
|
/* The setup of the split looks weird to me ... let's see
|
||||||
|
* if it works
|
||||||
|
*/
|
||||||
|
out_le32(mbase + PECFG_PIM0LAL, 0x00000000);
|
||||||
|
out_le32(mbase + PECFG_PIM0LAH, 0x00000000);
|
||||||
|
out_le32(mbase + PECFG_PIM1LAL, 0x00000000);
|
||||||
|
out_le32(mbase + PECFG_PIM1LAH, 0x00000000);
|
||||||
|
out_le32(mbase + PECFG_PIM01SAH, 0xffff0000);
|
||||||
|
out_le32(mbase + PECFG_PIM01SAL, 0x00000000);
|
||||||
|
|
||||||
|
out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(res->start));
|
||||||
|
out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(res->start));
|
||||||
|
}
|
||||||
|
|
||||||
/* Enable inbound mapping */
|
/* Enable inbound mapping */
|
||||||
out_le32(mbase + PECFG_PIMEN, 0x1);
|
out_le32(mbase + PECFG_PIMEN, 0x1);
|
||||||
|
|
||||||
out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(res->start));
|
|
||||||
out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(res->start));
|
|
||||||
|
|
||||||
/* Enable I/O, Mem, and Busmaster cycles */
|
/* Enable I/O, Mem, and Busmaster cycles */
|
||||||
out_le16(mbase + PCI_COMMAND,
|
out_le16(mbase + PCI_COMMAND,
|
||||||
in_le16(mbase + PCI_COMMAND) |
|
in_le16(mbase + PCI_COMMAND) |
|
||||||
|
@ -1422,13 +1453,8 @@ static void __init ppc4xx_pciex_port_setup_hose(struct ppc4xx_pciex_port *port)
|
||||||
const int *bus_range;
|
const int *bus_range;
|
||||||
int primary = 0, busses;
|
int primary = 0, busses;
|
||||||
void __iomem *mbase = NULL, *cfg_data = NULL;
|
void __iomem *mbase = NULL, *cfg_data = NULL;
|
||||||
|
const u32 *pval;
|
||||||
/* XXX FIXME: Handle endpoint mode properly */
|
u32 val;
|
||||||
if (port->endpoint) {
|
|
||||||
printk(KERN_WARNING "PCIE%d: Port in endpoint mode !\n",
|
|
||||||
port->index);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check if primary bridge */
|
/* Check if primary bridge */
|
||||||
if (of_get_property(port->node, "primary", NULL))
|
if (of_get_property(port->node, "primary", NULL))
|
||||||
|
@ -1462,21 +1488,30 @@ static void __init ppc4xx_pciex_port_setup_hose(struct ppc4xx_pciex_port *port)
|
||||||
hose->last_busno = hose->first_busno + busses;
|
hose->last_busno = hose->first_busno + busses;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* We map the external config space in cfg_data and the host config
|
if (!port->endpoint) {
|
||||||
* space in cfg_addr. External space is 1M per bus, internal space
|
/* Only map the external config space in cfg_data for
|
||||||
* is 4K
|
* PCIe root-complexes. External space is 1M per bus
|
||||||
|
*/
|
||||||
|
cfg_data = ioremap(port->cfg_space.start +
|
||||||
|
(hose->first_busno + 1) * 0x100000,
|
||||||
|
busses * 0x100000);
|
||||||
|
if (cfg_data == NULL) {
|
||||||
|
printk(KERN_ERR "%s: Can't map external config space !",
|
||||||
|
port->node->full_name);
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
hose->cfg_data = cfg_data;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Always map the host config space in cfg_addr.
|
||||||
|
* Internal space is 4K
|
||||||
*/
|
*/
|
||||||
cfg_data = ioremap(port->cfg_space.start +
|
|
||||||
(hose->first_busno + 1) * 0x100000,
|
|
||||||
busses * 0x100000);
|
|
||||||
mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000);
|
mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000);
|
||||||
if (cfg_data == NULL || mbase == NULL) {
|
if (mbase == NULL) {
|
||||||
printk(KERN_ERR "%s: Can't map config space !",
|
printk(KERN_ERR "%s: Can't map internal config space !",
|
||||||
port->node->full_name);
|
port->node->full_name);
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
hose->cfg_data = cfg_data;
|
|
||||||
hose->cfg_addr = mbase;
|
hose->cfg_addr = mbase;
|
||||||
|
|
||||||
pr_debug("PCIE %s, bus %d..%d\n", port->node->full_name,
|
pr_debug("PCIE %s, bus %d..%d\n", port->node->full_name,
|
||||||
|
@ -1489,12 +1524,14 @@ static void __init ppc4xx_pciex_port_setup_hose(struct ppc4xx_pciex_port *port)
|
||||||
port->hose = hose;
|
port->hose = hose;
|
||||||
mbase = (void __iomem *)hose->cfg_addr;
|
mbase = (void __iomem *)hose->cfg_addr;
|
||||||
|
|
||||||
/*
|
if (!port->endpoint) {
|
||||||
* Set bus numbers on our root port
|
/*
|
||||||
*/
|
* Set bus numbers on our root port
|
||||||
out_8(mbase + PCI_PRIMARY_BUS, hose->first_busno);
|
*/
|
||||||
out_8(mbase + PCI_SECONDARY_BUS, hose->first_busno + 1);
|
out_8(mbase + PCI_PRIMARY_BUS, hose->first_busno);
|
||||||
out_8(mbase + PCI_SUBORDINATE_BUS, hose->last_busno);
|
out_8(mbase + PCI_SECONDARY_BUS, hose->first_busno + 1);
|
||||||
|
out_8(mbase + PCI_SUBORDINATE_BUS, hose->last_busno);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* OMRs are already reset, also disable PIMs
|
* OMRs are already reset, also disable PIMs
|
||||||
|
@ -1515,17 +1552,49 @@ static void __init ppc4xx_pciex_port_setup_hose(struct ppc4xx_pciex_port *port)
|
||||||
ppc4xx_configure_pciex_PIMs(port, hose, mbase, &dma_window);
|
ppc4xx_configure_pciex_PIMs(port, hose, mbase, &dma_window);
|
||||||
|
|
||||||
/* The root complex doesn't show up if we don't set some vendor
|
/* The root complex doesn't show up if we don't set some vendor
|
||||||
* and device IDs into it. Those are the same bogus one that the
|
* and device IDs into it. The defaults below are the same bogus
|
||||||
* initial code in arch/ppc add. We might want to change that.
|
* one that the initial code in arch/ppc had. This can be
|
||||||
|
* overwritten by setting the "vendor-id/device-id" properties
|
||||||
|
* in the pciex node.
|
||||||
*/
|
*/
|
||||||
out_le16(mbase + 0x200, 0xaaa0 + port->index);
|
|
||||||
out_le16(mbase + 0x202, 0xbed0 + port->index);
|
|
||||||
|
|
||||||
/* Set Class Code to PCI-PCI bridge and Revision Id to 1 */
|
/* Get the (optional) vendor-/device-id from the device-tree */
|
||||||
out_le32(mbase + 0x208, 0x06040001);
|
pval = of_get_property(port->node, "vendor-id", NULL);
|
||||||
|
if (pval) {
|
||||||
|
val = *pval;
|
||||||
|
} else {
|
||||||
|
if (!port->endpoint)
|
||||||
|
val = 0xaaa0 + port->index;
|
||||||
|
else
|
||||||
|
val = 0xeee0 + port->index;
|
||||||
|
}
|
||||||
|
out_le16(mbase + 0x200, val);
|
||||||
|
|
||||||
|
pval = of_get_property(port->node, "device-id", NULL);
|
||||||
|
if (pval) {
|
||||||
|
val = *pval;
|
||||||
|
} else {
|
||||||
|
if (!port->endpoint)
|
||||||
|
val = 0xbed0 + port->index;
|
||||||
|
else
|
||||||
|
val = 0xfed0 + port->index;
|
||||||
|
}
|
||||||
|
out_le16(mbase + 0x202, val);
|
||||||
|
|
||||||
|
if (!port->endpoint) {
|
||||||
|
/* Set Class Code to PCI-PCI bridge and Revision Id to 1 */
|
||||||
|
out_le32(mbase + 0x208, 0x06040001);
|
||||||
|
|
||||||
|
printk(KERN_INFO "PCIE%d: successfully set as root-complex\n",
|
||||||
|
port->index);
|
||||||
|
} else {
|
||||||
|
/* Set Class Code to Processor/PPC */
|
||||||
|
out_le32(mbase + 0x208, 0x0b200001);
|
||||||
|
|
||||||
|
printk(KERN_INFO "PCIE%d: successfully set as endpoint\n",
|
||||||
|
port->index);
|
||||||
|
}
|
||||||
|
|
||||||
printk(KERN_INFO "PCIE%d: successfully set as root-complex\n",
|
|
||||||
port->index);
|
|
||||||
return;
|
return;
|
||||||
fail:
|
fail:
|
||||||
if (hose)
|
if (hose)
|
||||||
|
@ -1542,6 +1611,7 @@ static void __init ppc4xx_probe_pciex_bridge(struct device_node *np)
|
||||||
const u32 *pval;
|
const u32 *pval;
|
||||||
int portno;
|
int portno;
|
||||||
unsigned int dcrs;
|
unsigned int dcrs;
|
||||||
|
const char *val;
|
||||||
|
|
||||||
/* First, proceed to core initialization as we assume there's
|
/* First, proceed to core initialization as we assume there's
|
||||||
* only one PCIe core in the system
|
* only one PCIe core in the system
|
||||||
|
@ -1573,8 +1643,20 @@ static void __init ppc4xx_probe_pciex_bridge(struct device_node *np)
|
||||||
}
|
}
|
||||||
port->sdr_base = *pval;
|
port->sdr_base = *pval;
|
||||||
|
|
||||||
/* XXX Currently, we only support root complex mode */
|
/* Check if device_type property is set to "pci" or "pci-endpoint".
|
||||||
port->endpoint = 0;
|
* Resulting from this setup this PCIe port will be configured
|
||||||
|
* as root-complex or as endpoint.
|
||||||
|
*/
|
||||||
|
val = of_get_property(port->node, "device_type", NULL);
|
||||||
|
if (!strcmp(val, "pci-endpoint")) {
|
||||||
|
port->endpoint = 1;
|
||||||
|
} else if (!strcmp(val, "pci")) {
|
||||||
|
port->endpoint = 0;
|
||||||
|
} else {
|
||||||
|
printk(KERN_ERR "PCIE: missing or incorrect device_type for %s\n",
|
||||||
|
np->full_name);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/* Fetch config space registers address */
|
/* Fetch config space registers address */
|
||||||
if (of_address_to_resource(np, 0, &port->cfg_space)) {
|
if (of_address_to_resource(np, 0, &port->cfg_space)) {
|
||||||
|
|
|
@ -2842,9 +2842,11 @@ static void dump_spu_fields(struct spu *spu)
|
||||||
DUMP_FIELD(spu, "0x%lx", ls_size);
|
DUMP_FIELD(spu, "0x%lx", ls_size);
|
||||||
DUMP_FIELD(spu, "0x%x", node);
|
DUMP_FIELD(spu, "0x%x", node);
|
||||||
DUMP_FIELD(spu, "0x%lx", flags);
|
DUMP_FIELD(spu, "0x%lx", flags);
|
||||||
DUMP_FIELD(spu, "0x%lx", dar);
|
|
||||||
DUMP_FIELD(spu, "0x%lx", dsisr);
|
|
||||||
DUMP_FIELD(spu, "%d", class_0_pending);
|
DUMP_FIELD(spu, "%d", class_0_pending);
|
||||||
|
DUMP_FIELD(spu, "0x%lx", class_0_dar);
|
||||||
|
DUMP_FIELD(spu, "0x%lx", class_0_dsisr);
|
||||||
|
DUMP_FIELD(spu, "0x%lx", class_1_dar);
|
||||||
|
DUMP_FIELD(spu, "0x%lx", class_1_dsisr);
|
||||||
DUMP_FIELD(spu, "0x%lx", irqs[0]);
|
DUMP_FIELD(spu, "0x%lx", irqs[0]);
|
||||||
DUMP_FIELD(spu, "0x%lx", irqs[1]);
|
DUMP_FIELD(spu, "0x%lx", irqs[1]);
|
||||||
DUMP_FIELD(spu, "0x%lx", irqs[2]);
|
DUMP_FIELD(spu, "0x%lx", irqs[2]);
|
||||||
|
|
|
@ -209,6 +209,13 @@ extern int icache_44x_need_flush;
|
||||||
* 0 1 2 3 4 ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
|
* 0 1 2 3 4 ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
|
||||||
* - - - - - - U0 U1 U2 U3 W I M G E - UX UW UR SX SW SR
|
* - - - - - - U0 U1 U2 U3 W I M G E - UX UW UR SX SW SR
|
||||||
*
|
*
|
||||||
|
* Newer 440 cores (440x6 as used on AMCC 460EX/460GT) have additional
|
||||||
|
* TLB2 storage attibute fields. Those are:
|
||||||
|
*
|
||||||
|
* TLB2:
|
||||||
|
* 0...10 11 12 13 14 15 16...31
|
||||||
|
* no change WL1 IL1I IL1D IL2I IL2D no change
|
||||||
|
*
|
||||||
* There are some constrains and options, to decide mapping software bits
|
* There are some constrains and options, to decide mapping software bits
|
||||||
* into TLB entry.
|
* into TLB entry.
|
||||||
*
|
*
|
||||||
|
|
|
@ -100,6 +100,7 @@
|
||||||
|
|
||||||
/* Flag indicating progress during context switch. */
|
/* Flag indicating progress during context switch. */
|
||||||
#define SPU_CONTEXT_SWITCH_PENDING 0UL
|
#define SPU_CONTEXT_SWITCH_PENDING 0UL
|
||||||
|
#define SPU_CONTEXT_FAULT_PENDING 1UL
|
||||||
|
|
||||||
struct spu_context;
|
struct spu_context;
|
||||||
struct spu_runqueue;
|
struct spu_runqueue;
|
||||||
|
@ -128,9 +129,11 @@ struct spu {
|
||||||
unsigned int irqs[3];
|
unsigned int irqs[3];
|
||||||
u32 node;
|
u32 node;
|
||||||
u64 flags;
|
u64 flags;
|
||||||
u64 dar;
|
|
||||||
u64 dsisr;
|
|
||||||
u64 class_0_pending;
|
u64 class_0_pending;
|
||||||
|
u64 class_0_dar;
|
||||||
|
u64 class_0_dsisr;
|
||||||
|
u64 class_1_dar;
|
||||||
|
u64 class_1_dsisr;
|
||||||
size_t ls_size;
|
size_t ls_size;
|
||||||
unsigned int slb_replace;
|
unsigned int slb_replace;
|
||||||
struct mm_struct *mm;
|
struct mm_struct *mm;
|
||||||
|
@ -143,7 +146,7 @@ struct spu {
|
||||||
|
|
||||||
void (* wbox_callback)(struct spu *spu);
|
void (* wbox_callback)(struct spu *spu);
|
||||||
void (* ibox_callback)(struct spu *spu);
|
void (* ibox_callback)(struct spu *spu);
|
||||||
void (* stop_callback)(struct spu *spu);
|
void (* stop_callback)(struct spu *spu, int irq);
|
||||||
void (* mfc_callback)(struct spu *spu);
|
void (* mfc_callback)(struct spu *spu);
|
||||||
|
|
||||||
char irq_c0[8];
|
char irq_c0[8];
|
||||||
|
|
|
@ -254,7 +254,8 @@ struct spu_state {
|
||||||
u64 spu_chnldata_RW[32];
|
u64 spu_chnldata_RW[32];
|
||||||
u32 spu_mailbox_data[4];
|
u32 spu_mailbox_data[4];
|
||||||
u32 pu_mailbox_data[1];
|
u32 pu_mailbox_data[1];
|
||||||
u64 dar, dsisr, class_0_pending;
|
u64 class_0_dar, class_0_dsisr, class_0_pending;
|
||||||
|
u64 class_1_dar, class_1_dsisr;
|
||||||
unsigned long suspend_time;
|
unsigned long suspend_time;
|
||||||
spinlock_t register_lock;
|
spinlock_t register_lock;
|
||||||
};
|
};
|
||||||
|
|
|
@ -14,11 +14,7 @@
|
||||||
|
|
||||||
#include <linux/i2c.h>
|
#include <linux/i2c.h>
|
||||||
|
|
||||||
#ifdef CONFIG_OF_I2C
|
|
||||||
|
|
||||||
void of_register_i2c_devices(struct i2c_adapter *adap,
|
void of_register_i2c_devices(struct i2c_adapter *adap,
|
||||||
struct device_node *adap_node);
|
struct device_node *adap_node);
|
||||||
|
|
||||||
#endif /* CONFIG_OF_I2C */
|
|
||||||
|
|
||||||
#endif /* __LINUX_OF_I2C_H */
|
#endif /* __LINUX_OF_I2C_H */
|
||||||
|
|
Loading…
Reference in New Issue