Merge git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
* git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc: [POWERPC] cell: fix bugs found by sparse [POWERPC] spiderpic: enable new style devtree support [POWERPC] Update cell_defconfig [POWERPC] spufs: add infrastructure for finding elf objects [POWERPC] spufs: support new OF device tree format [POWERPC] spufs: add support for read/write on cntl [POWERPC] spufs: remove support for ancient firmware [POWERPC] spufs: make mailbox functions handle multiple elements [POWERPC] spufs: use correct pg_prot for mapping SPU local store [POWERPC] spufs: Add infrastructure needed for gang scheduling [POWERPC] spufs: implement error event delivery to user space [POWERPC] spufs: fix context switch during page fault [POWERPC] spufs: scheduler support for NUMA. [POWERPC] spufs: cell spu problem state mapping updates
This commit is contained in:
commit
a43cdf08a1
|
@ -1,7 +1,7 @@
|
|||
#
|
||||
# Automatically generated make config: don't edit
|
||||
# Linux kernel version: 2.6.18-rc6
|
||||
# Sun Sep 10 10:20:32 2006
|
||||
# Linux kernel version: 2.6.18
|
||||
# Wed Oct 4 15:30:50 2006
|
||||
#
|
||||
CONFIG_PPC64=y
|
||||
CONFIG_64BIT=y
|
||||
|
@ -22,6 +22,7 @@ CONFIG_ARCH_MAY_HAVE_PC_FDC=y
|
|||
CONFIG_PPC_OF=y
|
||||
CONFIG_PPC_UDBG_16550=y
|
||||
# CONFIG_GENERIC_TBSYNC is not set
|
||||
CONFIG_AUDIT_ARCH=y
|
||||
# CONFIG_DEFAULT_UIMAGE is not set
|
||||
|
||||
#
|
||||
|
@ -52,10 +53,11 @@ CONFIG_LOCALVERSION=""
|
|||
CONFIG_LOCALVERSION_AUTO=y
|
||||
CONFIG_SWAP=y
|
||||
CONFIG_SYSVIPC=y
|
||||
# CONFIG_IPC_NS is not set
|
||||
# CONFIG_POSIX_MQUEUE is not set
|
||||
# CONFIG_BSD_PROCESS_ACCT is not set
|
||||
# CONFIG_TASKSTATS is not set
|
||||
CONFIG_SYSCTL=y
|
||||
# CONFIG_UTS_NS is not set
|
||||
# CONFIG_AUDIT is not set
|
||||
CONFIG_IKCONFIG=y
|
||||
CONFIG_IKCONFIG_PROC=y
|
||||
|
@ -63,7 +65,9 @@ CONFIG_CPUSETS=y
|
|||
# CONFIG_RELAY is not set
|
||||
CONFIG_INITRAMFS_SOURCE=""
|
||||
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
|
||||
CONFIG_SYSCTL=y
|
||||
# CONFIG_EMBEDDED is not set
|
||||
# CONFIG_SYSCTL_SYSCALL is not set
|
||||
CONFIG_KALLSYMS=y
|
||||
# CONFIG_KALLSYMS_ALL is not set
|
||||
# CONFIG_KALLSYMS_EXTRA_PASS is not set
|
||||
|
@ -72,12 +76,12 @@ CONFIG_PRINTK=y
|
|||
CONFIG_BUG=y
|
||||
CONFIG_ELF_CORE=y
|
||||
CONFIG_BASE_FULL=y
|
||||
CONFIG_RT_MUTEXES=y
|
||||
CONFIG_FUTEX=y
|
||||
CONFIG_EPOLL=y
|
||||
CONFIG_SHMEM=y
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_VM_EVENT_COUNTERS=y
|
||||
CONFIG_RT_MUTEXES=y
|
||||
# CONFIG_TINY_SHMEM is not set
|
||||
CONFIG_BASE_SMALL=0
|
||||
# CONFIG_SLOB is not set
|
||||
|
@ -96,6 +100,7 @@ CONFIG_STOP_MACHINE=y
|
|||
#
|
||||
# Block layer
|
||||
#
|
||||
CONFIG_BLOCK=y
|
||||
# CONFIG_BLK_DEV_IO_TRACE is not set
|
||||
|
||||
#
|
||||
|
@ -115,12 +120,13 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
|
|||
# Platform support
|
||||
#
|
||||
CONFIG_PPC_MULTIPLATFORM=y
|
||||
# CONFIG_PPC_ISERIES is not set
|
||||
# CONFIG_EMBEDDED6xx is not set
|
||||
# CONFIG_APUS is not set
|
||||
# CONFIG_PPC_PSERIES is not set
|
||||
# CONFIG_PPC_ISERIES is not set
|
||||
# CONFIG_PPC_PMAC is not set
|
||||
# CONFIG_PPC_MAPLE is not set
|
||||
# CONFIG_PPC_PASEMI is not set
|
||||
CONFIG_PPC_CELL=y
|
||||
CONFIG_PPC_CELL_NATIVE=y
|
||||
CONFIG_PPC_IBM_CELL_BLADE=y
|
||||
|
@ -142,7 +148,6 @@ CONFIG_MMIO_NVRAM=y
|
|||
#
|
||||
CONFIG_SPU_FS=m
|
||||
CONFIG_SPU_BASE=y
|
||||
CONFIG_SPUFS_MMAP=y
|
||||
CONFIG_CBE_RAS=y
|
||||
|
||||
#
|
||||
|
@ -158,7 +163,7 @@ CONFIG_PREEMPT_NONE=y
|
|||
CONFIG_PREEMPT_BKL=y
|
||||
CONFIG_BINFMT_ELF=y
|
||||
CONFIG_BINFMT_MISC=m
|
||||
CONFIG_FORCE_MAX_ZONEORDER=13
|
||||
CONFIG_FORCE_MAX_ZONEORDER=9
|
||||
# CONFIG_IOMMU_VMERGE is not set
|
||||
CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
|
||||
CONFIG_KEXEC=y
|
||||
|
@ -168,6 +173,7 @@ CONFIG_NUMA=y
|
|||
CONFIG_NODES_SHIFT=4
|
||||
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
|
||||
CONFIG_ARCH_SPARSEMEM_ENABLE=y
|
||||
CONFIG_ARCH_POPULATES_NODE_MAP=y
|
||||
CONFIG_SELECT_MEMORY_MODEL=y
|
||||
# CONFIG_FLATMEM_MANUAL is not set
|
||||
# CONFIG_DISCONTIGMEM_MANUAL is not set
|
||||
|
@ -178,12 +184,12 @@ CONFIG_HAVE_MEMORY_PRESENT=y
|
|||
# CONFIG_SPARSEMEM_STATIC is not set
|
||||
CONFIG_SPARSEMEM_EXTREME=y
|
||||
CONFIG_MEMORY_HOTPLUG=y
|
||||
CONFIG_MEMORY_HOTPLUG_SPARSE=y
|
||||
CONFIG_SPLIT_PTLOCK_CPUS=4
|
||||
CONFIG_MIGRATION=y
|
||||
CONFIG_RESOURCES_64BIT=y
|
||||
CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
|
||||
CONFIG_ARCH_MEMORY_PROBE=y
|
||||
# CONFIG_PPC_64K_PAGES is not set
|
||||
CONFIG_PPC_64K_PAGES=y
|
||||
CONFIG_SCHED_SMT=y
|
||||
CONFIG_PROC_DEVICETREE=y
|
||||
# CONFIG_CMDLINE_BOOL is not set
|
||||
|
@ -201,6 +207,7 @@ CONFIG_GENERIC_ISA_DMA=y
|
|||
CONFIG_PCI=y
|
||||
CONFIG_PCI_DOMAINS=y
|
||||
CONFIG_PCIEPORTBUS=y
|
||||
# CONFIG_PCI_MULTITHREAD_PROBE is not set
|
||||
# CONFIG_PCI_DEBUG is not set
|
||||
|
||||
#
|
||||
|
@ -228,6 +235,7 @@ CONFIG_PACKET=y
|
|||
CONFIG_UNIX=y
|
||||
CONFIG_XFRM=y
|
||||
# CONFIG_XFRM_USER is not set
|
||||
# CONFIG_XFRM_SUB_POLICY is not set
|
||||
# CONFIG_NET_KEY is not set
|
||||
CONFIG_INET=y
|
||||
CONFIG_IP_MULTICAST=y
|
||||
|
@ -249,7 +257,8 @@ CONFIG_INET_XFRM_MODE_TUNNEL=y
|
|||
CONFIG_INET_DIAG=y
|
||||
CONFIG_INET_TCP_DIAG=y
|
||||
# CONFIG_TCP_CONG_ADVANCED is not set
|
||||
CONFIG_TCP_CONG_BIC=y
|
||||
CONFIG_TCP_CONG_CUBIC=y
|
||||
CONFIG_DEFAULT_TCP_CONG="cubic"
|
||||
|
||||
#
|
||||
# IP: Virtual Server Configuration
|
||||
|
@ -261,11 +270,15 @@ CONFIG_IPV6=y
|
|||
CONFIG_INET6_AH=m
|
||||
CONFIG_INET6_ESP=m
|
||||
CONFIG_INET6_IPCOMP=m
|
||||
# CONFIG_IPV6_MIP6 is not set
|
||||
CONFIG_INET6_XFRM_TUNNEL=m
|
||||
CONFIG_INET6_TUNNEL=m
|
||||
CONFIG_INET6_XFRM_MODE_TRANSPORT=y
|
||||
CONFIG_INET6_XFRM_MODE_TUNNEL=y
|
||||
# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
|
||||
CONFIG_IPV6_TUNNEL=m
|
||||
# CONFIG_IPV6_SUBTREES is not set
|
||||
# CONFIG_IPV6_MULTIPLE_TABLES is not set
|
||||
# CONFIG_NETWORK_SECMARK is not set
|
||||
CONFIG_NETFILTER=y
|
||||
# CONFIG_NETFILTER_DEBUG is not set
|
||||
|
@ -322,7 +335,6 @@ CONFIG_IP_NF_QUEUE=m
|
|||
# CONFIG_ATALK is not set
|
||||
# CONFIG_X25 is not set
|
||||
# CONFIG_LAPB is not set
|
||||
# CONFIG_NET_DIVERT is not set
|
||||
# CONFIG_ECONET is not set
|
||||
# CONFIG_WAN_ROUTER is not set
|
||||
|
||||
|
@ -434,6 +446,7 @@ CONFIG_BLK_DEV_AEC62XX=y
|
|||
# CONFIG_BLK_DEV_CS5530 is not set
|
||||
# CONFIG_BLK_DEV_HPT34X is not set
|
||||
# CONFIG_BLK_DEV_HPT366 is not set
|
||||
# CONFIG_BLK_DEV_JMICRON is not set
|
||||
# CONFIG_BLK_DEV_SC1200 is not set
|
||||
# CONFIG_BLK_DEV_PIIX is not set
|
||||
# CONFIG_BLK_DEV_IT821X is not set
|
||||
|
@ -456,6 +469,12 @@ CONFIG_IDEDMA_AUTO=y
|
|||
#
|
||||
# CONFIG_RAID_ATTRS is not set
|
||||
# CONFIG_SCSI is not set
|
||||
# CONFIG_SCSI_NETLINK is not set
|
||||
|
||||
#
|
||||
# Serial ATA (prod) and Parallel ATA (experimental) drivers
|
||||
#
|
||||
# CONFIG_ATA is not set
|
||||
|
||||
#
|
||||
# Multi-device support (RAID and LVM)
|
||||
|
@ -470,6 +489,7 @@ CONFIG_MD_RAID1=m
|
|||
# CONFIG_MD_MULTIPATH is not set
|
||||
# CONFIG_MD_FAULTY is not set
|
||||
CONFIG_BLK_DEV_DM=m
|
||||
# CONFIG_DM_DEBUG is not set
|
||||
CONFIG_DM_CRYPT=m
|
||||
CONFIG_DM_SNAPSHOT=m
|
||||
CONFIG_DM_MIRROR=m
|
||||
|
@ -504,7 +524,7 @@ CONFIG_NETDEVICES=y
|
|||
# CONFIG_DUMMY is not set
|
||||
CONFIG_BONDING=y
|
||||
# CONFIG_EQUALIZER is not set
|
||||
# CONFIG_TUN is not set
|
||||
CONFIG_TUN=y
|
||||
|
||||
#
|
||||
# ARCnet devices
|
||||
|
@ -552,7 +572,7 @@ CONFIG_SKGE=m
|
|||
# CONFIG_TIGON3 is not set
|
||||
# CONFIG_BNX2 is not set
|
||||
CONFIG_SPIDER_NET=m
|
||||
# CONFIG_MV643XX_ETH is not set
|
||||
# CONFIG_QLA3XXX is not set
|
||||
|
||||
#
|
||||
# Ethernet (10000 Mbit)
|
||||
|
@ -599,6 +619,7 @@ CONFIG_SPIDER_NET=m
|
|||
# Input device support
|
||||
#
|
||||
CONFIG_INPUT=y
|
||||
# CONFIG_INPUT_FF_MEMLESS is not set
|
||||
|
||||
#
|
||||
# Userland interfaces
|
||||
|
@ -865,6 +886,7 @@ CONFIG_INFINIBAND_USER_ACCESS=m
|
|||
CONFIG_INFINIBAND_ADDR_TRANS=y
|
||||
CONFIG_INFINIBAND_MTHCA=m
|
||||
CONFIG_INFINIBAND_MTHCA_DEBUG=y
|
||||
# CONFIG_INFINIBAND_AMSO1100 is not set
|
||||
CONFIG_INFINIBAND_IPOIB=m
|
||||
CONFIG_INFINIBAND_IPOIB_DEBUG=y
|
||||
CONFIG_INFINIBAND_IPOIB_DEBUG_DATA=y
|
||||
|
@ -916,7 +938,7 @@ CONFIG_INOTIFY_USER=y
|
|||
# CONFIG_QUOTA is not set
|
||||
CONFIG_DNOTIFY=y
|
||||
# CONFIG_AUTOFS_FS is not set
|
||||
# CONFIG_AUTOFS4_FS is not set
|
||||
CONFIG_AUTOFS4_FS=m
|
||||
# CONFIG_FUSE_FS is not set
|
||||
|
||||
#
|
||||
|
@ -943,8 +965,10 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
|
|||
#
|
||||
CONFIG_PROC_FS=y
|
||||
CONFIG_PROC_KCORE=y
|
||||
CONFIG_PROC_SYSCTL=y
|
||||
CONFIG_SYSFS=y
|
||||
CONFIG_TMPFS=y
|
||||
# CONFIG_TMPFS_POSIX_ACL is not set
|
||||
CONFIG_HUGETLBFS=y
|
||||
CONFIG_HUGETLB_PAGE=y
|
||||
CONFIG_RAMFS=y
|
||||
|
@ -1084,6 +1108,7 @@ CONFIG_PLIST=y
|
|||
# Kernel hacking
|
||||
#
|
||||
# CONFIG_PRINTK_TIME is not set
|
||||
# CONFIG_ENABLE_MUST_CHECK is not set
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
# CONFIG_UNUSED_SYMBOLS is not set
|
||||
CONFIG_DEBUG_KERNEL=y
|
||||
|
@ -1102,6 +1127,7 @@ CONFIG_DEBUG_SPINLOCK_SLEEP=y
|
|||
# CONFIG_DEBUG_INFO is not set
|
||||
CONFIG_DEBUG_FS=y
|
||||
# CONFIG_DEBUG_VM is not set
|
||||
# CONFIG_DEBUG_LIST is not set
|
||||
# CONFIG_FORCED_INLINING is not set
|
||||
# CONFIG_RCU_TORTURE_TEST is not set
|
||||
# CONFIG_DEBUG_STACKOVERFLOW is not set
|
||||
|
@ -1123,6 +1149,10 @@ CONFIG_IRQSTACKS=y
|
|||
# Cryptographic options
|
||||
#
|
||||
CONFIG_CRYPTO=y
|
||||
CONFIG_CRYPTO_ALGAPI=y
|
||||
CONFIG_CRYPTO_BLKCIPHER=m
|
||||
CONFIG_CRYPTO_HASH=y
|
||||
# CONFIG_CRYPTO_MANAGER is not set
|
||||
CONFIG_CRYPTO_HMAC=y
|
||||
# CONFIG_CRYPTO_NULL is not set
|
||||
# CONFIG_CRYPTO_MD4 is not set
|
||||
|
@ -1132,6 +1162,8 @@ CONFIG_CRYPTO_SHA1=m
|
|||
# CONFIG_CRYPTO_SHA512 is not set
|
||||
# CONFIG_CRYPTO_WP512 is not set
|
||||
# CONFIG_CRYPTO_TGR192 is not set
|
||||
CONFIG_CRYPTO_ECB=m
|
||||
CONFIG_CRYPTO_CBC=m
|
||||
CONFIG_CRYPTO_DES=m
|
||||
# CONFIG_CRYPTO_BLOWFISH is not set
|
||||
# CONFIG_CRYPTO_TWOFISH is not set
|
||||
|
|
|
@ -16,11 +16,6 @@ config SPU_BASE
|
|||
bool
|
||||
default n
|
||||
|
||||
config SPUFS_MMAP
|
||||
bool
|
||||
depends on SPU_FS && SPARSEMEM
|
||||
default y
|
||||
|
||||
config CBE_RAS
|
||||
bool "RAS features for bare metal Cell BE"
|
||||
default y
|
||||
|
|
|
@ -101,7 +101,7 @@ static void iic_ioexc_eoi(unsigned int irq)
|
|||
static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct cbe_iic_regs *node_iic = desc->handler_data;
|
||||
struct cbe_iic_regs __iomem *node_iic = (void __iomem *)desc->handler_data;
|
||||
unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC;
|
||||
unsigned long bits, ack;
|
||||
int cascade;
|
||||
|
@ -320,7 +320,7 @@ static int __init setup_iic(void)
|
|||
struct device_node *dn;
|
||||
struct resource r0, r1;
|
||||
unsigned int node, cascade, found = 0;
|
||||
struct cbe_iic_regs *node_iic;
|
||||
struct cbe_iic_regs __iomem *node_iic;
|
||||
const u32 *np;
|
||||
|
||||
for (dn = NULL;
|
||||
|
@ -357,7 +357,11 @@ static int __init setup_iic(void)
|
|||
cascade = irq_create_mapping(iic_host, cascade);
|
||||
if (cascade == NO_IRQ)
|
||||
continue;
|
||||
set_irq_data(cascade, node_iic);
|
||||
/*
|
||||
* irq_data is a generic pointer that gets passed back
|
||||
* to us later, so the forced cast is fine.
|
||||
*/
|
||||
set_irq_data(cascade, (void __force *)node_iic);
|
||||
set_irq_chained_handler(cascade , iic_ioexc_cascade);
|
||||
out_be64(&node_iic->iic_ir,
|
||||
(1 << 12) /* priority */ |
|
||||
|
|
|
@ -345,8 +345,8 @@ static int cell_map_iommu_hardcoded(int num_nodes)
|
|||
|
||||
/* node 0 */
|
||||
iommu = &cell_iommus[0];
|
||||
iommu->mapped_base = ioremap(0x20000511000, 0x1000);
|
||||
iommu->mapped_mmio_base = ioremap(0x20000510000, 0x1000);
|
||||
iommu->mapped_base = ioremap(0x20000511000ul, 0x1000);
|
||||
iommu->mapped_mmio_base = ioremap(0x20000510000ul, 0x1000);
|
||||
|
||||
enable_mapping(iommu->mapped_base, iommu->mapped_mmio_base);
|
||||
|
||||
|
@ -358,8 +358,8 @@ static int cell_map_iommu_hardcoded(int num_nodes)
|
|||
|
||||
/* node 1 */
|
||||
iommu = &cell_iommus[1];
|
||||
iommu->mapped_base = ioremap(0x30000511000, 0x1000);
|
||||
iommu->mapped_mmio_base = ioremap(0x30000510000, 0x1000);
|
||||
iommu->mapped_base = ioremap(0x30000511000ul, 0x1000);
|
||||
iommu->mapped_mmio_base = ioremap(0x30000510000ul, 0x1000);
|
||||
|
||||
enable_mapping(iommu->mapped_base, iommu->mapped_mmio_base);
|
||||
|
||||
|
|
|
@ -244,7 +244,6 @@ static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic)
|
|||
int imaplen, intsize, unit;
|
||||
struct device_node *iic;
|
||||
|
||||
#if 0 /* Enable that when we have a way to retreive the node as well */
|
||||
/* First, we check wether we have a real "interrupts" in the device
|
||||
* tree in case the device-tree is ever fixed
|
||||
*/
|
||||
|
@ -252,9 +251,8 @@ static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic)
|
|||
if (of_irq_map_one(pic->of_node, 0, &oirq) == 0) {
|
||||
virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
|
||||
oirq.size);
|
||||
goto bail;
|
||||
return virq;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Now do the horrible hacks */
|
||||
tmp = get_property(pic->of_node, "#interrupt-cells", NULL);
|
||||
|
@ -369,7 +367,7 @@ void __init spider_init_IRQ(void)
|
|||
} else if (device_is_compatible(dn, "sti,platform-spider-pic")
|
||||
&& (chip < 2)) {
|
||||
static long hard_coded_pics[] =
|
||||
{ 0x24000008000, 0x34000008000 };
|
||||
{ 0x24000008000ul, 0x34000008000ul};
|
||||
r.start = hard_coded_pics[chip];
|
||||
} else
|
||||
continue;
|
||||
|
|
|
@ -25,11 +25,13 @@
|
|||
#include <linux/interrupt.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/poll.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/wait.h>
|
||||
|
||||
#include <asm/firmware.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/prom.h>
|
||||
#include <linux/mutex.h>
|
||||
|
@ -46,21 +48,21 @@ EXPORT_SYMBOL_GPL(spu_priv1_ops);
|
|||
static int __spu_trap_invalid_dma(struct spu *spu)
|
||||
{
|
||||
pr_debug("%s\n", __FUNCTION__);
|
||||
force_sig(SIGBUS, /* info, */ current);
|
||||
spu->dma_callback(spu, SPE_EVENT_INVALID_DMA);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __spu_trap_dma_align(struct spu *spu)
|
||||
{
|
||||
pr_debug("%s\n", __FUNCTION__);
|
||||
force_sig(SIGBUS, /* info, */ current);
|
||||
spu->dma_callback(spu, SPE_EVENT_DMA_ALIGNMENT);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __spu_trap_error(struct spu *spu)
|
||||
{
|
||||
pr_debug("%s\n", __FUNCTION__);
|
||||
force_sig(SIGILL, /* info, */ current);
|
||||
spu->dma_callback(spu, SPE_EVENT_SPE_ERROR);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -317,7 +319,7 @@ static void spu_free_irqs(struct spu *spu)
|
|||
free_irq(spu->irqs[2], spu);
|
||||
}
|
||||
|
||||
static LIST_HEAD(spu_list);
|
||||
static struct list_head spu_list[MAX_NUMNODES];
|
||||
static DEFINE_MUTEX(spu_mutex);
|
||||
|
||||
static void spu_init_channels(struct spu *spu)
|
||||
|
@ -354,32 +356,42 @@ static void spu_init_channels(struct spu *spu)
|
|||
}
|
||||
}
|
||||
|
||||
struct spu *spu_alloc(void)
|
||||
struct spu *spu_alloc_node(int node)
|
||||
{
|
||||
struct spu *spu;
|
||||
struct spu *spu = NULL;
|
||||
|
||||
mutex_lock(&spu_mutex);
|
||||
if (!list_empty(&spu_list)) {
|
||||
spu = list_entry(spu_list.next, struct spu, list);
|
||||
if (!list_empty(&spu_list[node])) {
|
||||
spu = list_entry(spu_list[node].next, struct spu, list);
|
||||
list_del_init(&spu->list);
|
||||
pr_debug("Got SPU %x %d\n", spu->isrc, spu->number);
|
||||
} else {
|
||||
pr_debug("No SPU left\n");
|
||||
spu = NULL;
|
||||
pr_debug("Got SPU %x %d %d\n",
|
||||
spu->isrc, spu->number, spu->node);
|
||||
spu_init_channels(spu);
|
||||
}
|
||||
mutex_unlock(&spu_mutex);
|
||||
|
||||
return spu;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(spu_alloc_node);
|
||||
|
||||
struct spu *spu_alloc(void)
|
||||
{
|
||||
struct spu *spu = NULL;
|
||||
int node;
|
||||
|
||||
for (node = 0; node < MAX_NUMNODES; node++) {
|
||||
spu = spu_alloc_node(node);
|
||||
if (spu)
|
||||
spu_init_channels(spu);
|
||||
break;
|
||||
}
|
||||
|
||||
return spu;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(spu_alloc);
|
||||
|
||||
void spu_free(struct spu *spu)
|
||||
{
|
||||
mutex_lock(&spu_mutex);
|
||||
list_add_tail(&spu->list, &spu_list);
|
||||
list_add_tail(&spu->list, &spu_list[spu->node]);
|
||||
mutex_unlock(&spu_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(spu_free);
|
||||
|
@ -566,7 +578,7 @@ static void spu_unmap(struct spu *spu)
|
|||
}
|
||||
|
||||
/* This function shall be abstracted for HV platforms */
|
||||
static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
|
||||
static int __init spu_map_interrupts_old(struct spu *spu, struct device_node *np)
|
||||
{
|
||||
unsigned int isrc;
|
||||
const u32 *tmp;
|
||||
|
@ -590,7 +602,7 @@ static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
|
|||
return spu->irqs[2] == NO_IRQ ? -EINVAL : 0;
|
||||
}
|
||||
|
||||
static int __init spu_map_device(struct spu *spu, struct device_node *node)
|
||||
static int __init spu_map_device_old(struct spu *spu, struct device_node *node)
|
||||
{
|
||||
const char *prop;
|
||||
int ret;
|
||||
|
@ -635,6 +647,88 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
|
||||
{
|
||||
struct of_irq oirq;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
for (i=0; i < 3; i++) {
|
||||
ret = of_irq_map_one(np, i, &oirq);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = -EINVAL;
|
||||
spu->irqs[i] = irq_create_of_mapping(oirq.controller,
|
||||
oirq.specifier, oirq.size);
|
||||
if (spu->irqs[i] == NO_IRQ)
|
||||
goto err;
|
||||
}
|
||||
return 0;
|
||||
|
||||
err:
|
||||
pr_debug("failed to map irq %x for spu %s\n", *oirq.specifier, spu->name);
|
||||
for (; i >= 0; i--) {
|
||||
if (spu->irqs[i] != NO_IRQ)
|
||||
irq_dispose_mapping(spu->irqs[i]);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int spu_map_resource(struct device_node *node, int nr,
|
||||
void __iomem** virt, unsigned long *phys)
|
||||
{
|
||||
struct resource resource = { };
|
||||
int ret;
|
||||
|
||||
ret = of_address_to_resource(node, 0, &resource);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (phys)
|
||||
*phys = resource.start;
|
||||
*virt = ioremap(resource.start, resource.end - resource.start);
|
||||
if (!*virt)
|
||||
ret = -EINVAL;
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __init spu_map_device(struct spu *spu, struct device_node *node)
|
||||
{
|
||||
int ret = -ENODEV;
|
||||
spu->name = get_property(node, "name", NULL);
|
||||
if (!spu->name)
|
||||
goto out;
|
||||
|
||||
ret = spu_map_resource(node, 0, (void __iomem**)&spu->local_store,
|
||||
&spu->local_store_phys);
|
||||
if (ret)
|
||||
goto out;
|
||||
ret = spu_map_resource(node, 1, (void __iomem**)&spu->problem,
|
||||
&spu->problem_phys);
|
||||
if (ret)
|
||||
goto out_unmap;
|
||||
ret = spu_map_resource(node, 2, (void __iomem**)&spu->priv2,
|
||||
NULL);
|
||||
if (ret)
|
||||
goto out_unmap;
|
||||
|
||||
if (!firmware_has_feature(FW_FEATURE_LPAR))
|
||||
ret = spu_map_resource(node, 3, (void __iomem**)&spu->priv1,
|
||||
NULL);
|
||||
if (ret)
|
||||
goto out_unmap;
|
||||
return 0;
|
||||
|
||||
out_unmap:
|
||||
spu_unmap(spu);
|
||||
out:
|
||||
pr_debug("failed to map spe %s: %d\n", spu->name, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct sysdev_class spu_sysdev_class = {
|
||||
set_kset_name("spu")
|
||||
};
|
||||
|
@ -688,6 +782,9 @@ static int __init create_spu(struct device_node *spe)
|
|||
goto out;
|
||||
|
||||
ret = spu_map_device(spu, spe);
|
||||
/* try old method */
|
||||
if (ret)
|
||||
ret = spu_map_device_old(spu, spe);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
|
@ -696,6 +793,8 @@ static int __init create_spu(struct device_node *spe)
|
|||
if (spu->nid == -1)
|
||||
spu->nid = 0;
|
||||
ret = spu_map_interrupts(spu, spe);
|
||||
if (ret)
|
||||
ret = spu_map_interrupts_old(spu, spe);
|
||||
if (ret)
|
||||
goto out_unmap;
|
||||
spin_lock_init(&spu->register_lock);
|
||||
|
@ -706,13 +805,13 @@ static int __init create_spu(struct device_node *spe)
|
|||
spu->number = number++;
|
||||
ret = spu_request_irqs(spu);
|
||||
if (ret)
|
||||
goto out_unmap;
|
||||
goto out_unlock;
|
||||
|
||||
ret = spu_create_sysdev(spu);
|
||||
if (ret)
|
||||
goto out_free_irqs;
|
||||
|
||||
list_add(&spu->list, &spu_list);
|
||||
list_add(&spu->list, &spu_list[spu->node]);
|
||||
mutex_unlock(&spu_mutex);
|
||||
|
||||
pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n",
|
||||
|
@ -722,9 +821,9 @@ static int __init create_spu(struct device_node *spe)
|
|||
|
||||
out_free_irqs:
|
||||
spu_free_irqs(spu);
|
||||
|
||||
out_unmap:
|
||||
out_unlock:
|
||||
mutex_unlock(&spu_mutex);
|
||||
out_unmap:
|
||||
spu_unmap(spu);
|
||||
out_free:
|
||||
kfree(spu);
|
||||
|
@ -745,9 +844,13 @@ static void destroy_spu(struct spu *spu)
|
|||
static void cleanup_spu_base(void)
|
||||
{
|
||||
struct spu *spu, *tmp;
|
||||
int node;
|
||||
|
||||
mutex_lock(&spu_mutex);
|
||||
list_for_each_entry_safe(spu, tmp, &spu_list, list)
|
||||
for (node = 0; node < MAX_NUMNODES; node++) {
|
||||
list_for_each_entry_safe(spu, tmp, &spu_list[node], list)
|
||||
destroy_spu(spu);
|
||||
}
|
||||
mutex_unlock(&spu_mutex);
|
||||
sysdev_class_unregister(&spu_sysdev_class);
|
||||
}
|
||||
|
@ -756,13 +859,16 @@ module_exit(cleanup_spu_base);
|
|||
static int __init init_spu_base(void)
|
||||
{
|
||||
struct device_node *node;
|
||||
int ret;
|
||||
int i, ret;
|
||||
|
||||
/* create sysdev class for spus */
|
||||
ret = sysdev_class_register(&spu_sysdev_class);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < MAX_NUMNODES; i++)
|
||||
INIT_LIST_HEAD(&spu_list[i]);
|
||||
|
||||
ret = -ENODEV;
|
||||
for (node = of_find_node_by_type(NULL, "spe");
|
||||
node; node = of_find_node_by_type(node, "spe")) {
|
||||
|
@ -774,18 +880,6 @@ static int __init init_spu_base(void)
|
|||
break;
|
||||
}
|
||||
}
|
||||
/* in some old firmware versions, the spe is called 'spc', so we
|
||||
look for that as well */
|
||||
for (node = of_find_node_by_type(NULL, "spc");
|
||||
node; node = of_find_node_by_type(node, "spc")) {
|
||||
ret = create_spu(node);
|
||||
if (ret) {
|
||||
printk(KERN_WARNING "%s: Error initializing %s\n",
|
||||
__FUNCTION__, node->name);
|
||||
cleanup_spu_base();
|
||||
break;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
module_init(init_spu_base);
|
||||
|
|
|
@ -2,7 +2,7 @@ obj-y += switch.o
|
|||
|
||||
obj-$(CONFIG_SPU_FS) += spufs.o
|
||||
spufs-y += inode.o file.o context.o syscalls.o
|
||||
spufs-y += sched.o backing_ops.o hw_ops.o run.o
|
||||
spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o
|
||||
|
||||
# Rules to build switch.o with the help of SPU tool chain
|
||||
SPU_CROSS := spu-
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
#include <asm/spu_csa.h>
|
||||
#include "spufs.h"
|
||||
|
||||
struct spu_context *alloc_spu_context(void)
|
||||
struct spu_context *alloc_spu_context(struct spu_gang *gang)
|
||||
{
|
||||
struct spu_context *ctx;
|
||||
ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
|
||||
|
@ -51,6 +51,8 @@ struct spu_context *alloc_spu_context(void)
|
|||
ctx->state = SPU_STATE_SAVED;
|
||||
ctx->ops = &spu_backing_ops;
|
||||
ctx->owner = get_task_mm(current);
|
||||
if (gang)
|
||||
spu_gang_add_ctx(gang, ctx);
|
||||
goto out;
|
||||
out_free:
|
||||
kfree(ctx);
|
||||
|
@ -67,6 +69,8 @@ void destroy_spu_context(struct kref *kref)
|
|||
spu_deactivate(ctx);
|
||||
up_write(&ctx->state_sema);
|
||||
spu_fini_csa(&ctx->csa);
|
||||
if (ctx->gang)
|
||||
spu_gang_remove_ctx(ctx->gang, ctx);
|
||||
kfree(ctx);
|
||||
}
|
||||
|
||||
|
|
|
@ -36,6 +36,8 @@
|
|||
|
||||
#include "spufs.h"
|
||||
|
||||
#define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
|
||||
|
||||
|
||||
static int
|
||||
spufs_mem_open(struct inode *inode, struct file *file)
|
||||
|
@ -88,7 +90,6 @@ spufs_mem_write(struct file *file, const char __user *buffer,
|
|||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SPUFS_MMAP
|
||||
static struct page *
|
||||
spufs_mem_mmap_nopage(struct vm_area_struct *vma,
|
||||
unsigned long address, int *type)
|
||||
|
@ -101,12 +102,16 @@ spufs_mem_mmap_nopage(struct vm_area_struct *vma,
|
|||
|
||||
spu_acquire(ctx);
|
||||
|
||||
if (ctx->state == SPU_STATE_SAVED)
|
||||
if (ctx->state == SPU_STATE_SAVED) {
|
||||
vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
|
||||
& ~(_PAGE_NO_CACHE | _PAGE_GUARDED));
|
||||
page = vmalloc_to_page(ctx->csa.lscsa->ls + offset);
|
||||
else
|
||||
} else {
|
||||
vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
|
||||
| _PAGE_NO_CACHE | _PAGE_GUARDED);
|
||||
page = pfn_to_page((ctx->spu->local_store_phys + offset)
|
||||
>> PAGE_SHIFT);
|
||||
|
||||
}
|
||||
spu_release(ctx);
|
||||
|
||||
if (type)
|
||||
|
@ -133,22 +138,19 @@ spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
vma->vm_ops = &spufs_mem_mmap_vmops;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct file_operations spufs_mem_fops = {
|
||||
.open = spufs_mem_open,
|
||||
.read = spufs_mem_read,
|
||||
.write = spufs_mem_write,
|
||||
.llseek = generic_file_llseek,
|
||||
#ifdef CONFIG_SPUFS_MMAP
|
||||
.mmap = spufs_mem_mmap,
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SPUFS_MMAP
|
||||
static struct page *spufs_ps_nopage(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
int *type, unsigned long ps_offs)
|
||||
int *type, unsigned long ps_offs,
|
||||
unsigned long ps_size)
|
||||
{
|
||||
struct page *page = NOPAGE_SIGBUS;
|
||||
int fault_type = VM_FAULT_SIGBUS;
|
||||
|
@ -158,7 +160,7 @@ static struct page *spufs_ps_nopage(struct vm_area_struct *vma,
|
|||
int ret;
|
||||
|
||||
offset += vma->vm_pgoff << PAGE_SHIFT;
|
||||
if (offset >= 0x4000)
|
||||
if (offset >= ps_size)
|
||||
goto out;
|
||||
|
||||
ret = spu_acquire_runnable(ctx);
|
||||
|
@ -179,10 +181,11 @@ static struct page *spufs_ps_nopage(struct vm_area_struct *vma,
|
|||
return page;
|
||||
}
|
||||
|
||||
#if SPUFS_MMAP_4K
|
||||
static struct page *spufs_cntl_mmap_nopage(struct vm_area_struct *vma,
|
||||
unsigned long address, int *type)
|
||||
{
|
||||
return spufs_ps_nopage(vma, address, type, 0x4000);
|
||||
return spufs_ps_nopage(vma, address, type, 0x4000, 0x1000);
|
||||
}
|
||||
|
||||
static struct vm_operations_struct spufs_cntl_mmap_vmops = {
|
||||
|
@ -191,17 +194,12 @@ static struct vm_operations_struct spufs_cntl_mmap_vmops = {
|
|||
|
||||
/*
|
||||
* mmap support for problem state control area [0x4000 - 0x4fff].
|
||||
* Mapping this area requires that the application have CAP_SYS_RAWIO,
|
||||
* as these registers require special care when read/writing.
|
||||
*/
|
||||
static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
if (!(vma->vm_flags & VM_SHARED))
|
||||
return -EINVAL;
|
||||
|
||||
if (!capable(CAP_SYS_RAWIO))
|
||||
return -EPERM;
|
||||
|
||||
vma->vm_flags |= VM_RESERVED;
|
||||
vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
|
||||
| _PAGE_NO_CACHE | _PAGE_GUARDED);
|
||||
|
@ -209,7 +207,30 @@ static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
vma->vm_ops = &spufs_cntl_mmap_vmops;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#else /* SPUFS_MMAP_4K */
|
||||
#define spufs_cntl_mmap NULL
|
||||
#endif /* !SPUFS_MMAP_4K */
|
||||
|
||||
static u64 spufs_cntl_get(void *data)
|
||||
{
|
||||
struct spu_context *ctx = data;
|
||||
u64 val;
|
||||
|
||||
spu_acquire(ctx);
|
||||
val = ctx->ops->status_read(ctx);
|
||||
spu_release(ctx);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static void spufs_cntl_set(void *data, u64 val)
|
||||
{
|
||||
struct spu_context *ctx = data;
|
||||
|
||||
spu_acquire(ctx);
|
||||
ctx->ops->runcntl_write(ctx, val);
|
||||
spu_release(ctx);
|
||||
}
|
||||
|
||||
static int spufs_cntl_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
|
@ -219,32 +240,15 @@ static int spufs_cntl_open(struct inode *inode, struct file *file)
|
|||
file->private_data = ctx;
|
||||
file->f_mapping = inode->i_mapping;
|
||||
ctx->cntl = inode->i_mapping;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
spufs_cntl_read(struct file *file, char __user *buffer,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
/* FIXME: read from spu status */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
spufs_cntl_write(struct file *file, const char __user *buffer,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
/* FIXME: write to runctl bit */
|
||||
return -EINVAL;
|
||||
return simple_attr_open(inode, file, spufs_cntl_get,
|
||||
spufs_cntl_set, "0x%08lx");
|
||||
}
|
||||
|
||||
static struct file_operations spufs_cntl_fops = {
|
||||
.open = spufs_cntl_open,
|
||||
.read = spufs_cntl_read,
|
||||
.write = spufs_cntl_write,
|
||||
#ifdef CONFIG_SPUFS_MMAP
|
||||
.read = simple_attr_read,
|
||||
.write = simple_attr_write,
|
||||
.mmap = spufs_cntl_mmap,
|
||||
#endif
|
||||
};
|
||||
|
||||
static int
|
||||
|
@ -356,27 +360,54 @@ static int spufs_pipe_open(struct inode *inode, struct file *file)
|
|||
return nonseekable_open(inode, file);
|
||||
}
|
||||
|
||||
/*
|
||||
* Read as many bytes from the mailbox as possible, until
|
||||
* one of the conditions becomes true:
|
||||
*
|
||||
* - no more data available in the mailbox
|
||||
* - end of the user provided buffer
|
||||
* - end of the mapped area
|
||||
*/
|
||||
static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
|
||||
size_t len, loff_t *pos)
|
||||
{
|
||||
struct spu_context *ctx = file->private_data;
|
||||
u32 mbox_data;
|
||||
int ret;
|
||||
u32 mbox_data, __user *udata;
|
||||
ssize_t count;
|
||||
|
||||
if (len < 4)
|
||||
return -EINVAL;
|
||||
|
||||
spu_acquire(ctx);
|
||||
ret = ctx->ops->mbox_read(ctx, &mbox_data);
|
||||
spu_release(ctx);
|
||||
|
||||
if (!ret)
|
||||
return -EAGAIN;
|
||||
|
||||
if (copy_to_user(buf, &mbox_data, sizeof mbox_data))
|
||||
if (!access_ok(VERIFY_WRITE, buf, len))
|
||||
return -EFAULT;
|
||||
|
||||
return 4;
|
||||
udata = (void __user *)buf;
|
||||
|
||||
spu_acquire(ctx);
|
||||
for (count = 0; count <= len; count += 4, udata++) {
|
||||
int ret;
|
||||
ret = ctx->ops->mbox_read(ctx, &mbox_data);
|
||||
if (ret == 0)
|
||||
break;
|
||||
|
||||
/*
|
||||
* at the end of the mapped area, we can fault
|
||||
* but still need to return the data we have
|
||||
* read successfully so far.
|
||||
*/
|
||||
ret = __put_user(mbox_data, udata);
|
||||
if (ret) {
|
||||
if (!count)
|
||||
count = -EFAULT;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spu_release(ctx);
|
||||
|
||||
if (!count)
|
||||
count = -EAGAIN;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static struct file_operations spufs_mbox_fops = {
|
||||
|
@ -432,36 +463,70 @@ void spufs_ibox_callback(struct spu *spu)
|
|||
kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
|
||||
}
|
||||
|
||||
/*
|
||||
* Read as many bytes from the interrupt mailbox as possible, until
|
||||
* one of the conditions becomes true:
|
||||
*
|
||||
* - no more data available in the mailbox
|
||||
* - end of the user provided buffer
|
||||
* - end of the mapped area
|
||||
*
|
||||
* If the file is opened without O_NONBLOCK, we wait here until
|
||||
* any data is available, but return when we have been able to
|
||||
* read something.
|
||||
*/
|
||||
static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
|
||||
size_t len, loff_t *pos)
|
||||
{
|
||||
struct spu_context *ctx = file->private_data;
|
||||
u32 ibox_data;
|
||||
ssize_t ret;
|
||||
u32 ibox_data, __user *udata;
|
||||
ssize_t count;
|
||||
|
||||
if (len < 4)
|
||||
return -EINVAL;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, buf, len))
|
||||
return -EFAULT;
|
||||
|
||||
udata = (void __user *)buf;
|
||||
|
||||
spu_acquire(ctx);
|
||||
|
||||
ret = 0;
|
||||
/* wait only for the first element */
|
||||
count = 0;
|
||||
if (file->f_flags & O_NONBLOCK) {
|
||||
if (!spu_ibox_read(ctx, &ibox_data))
|
||||
ret = -EAGAIN;
|
||||
count = -EAGAIN;
|
||||
} else {
|
||||
ret = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
|
||||
count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
|
||||
}
|
||||
if (count)
|
||||
goto out;
|
||||
|
||||
/* if we can't write at all, return -EFAULT */
|
||||
count = __put_user(ibox_data, udata);
|
||||
if (count)
|
||||
goto out;
|
||||
|
||||
for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
|
||||
int ret;
|
||||
ret = ctx->ops->ibox_read(ctx, &ibox_data);
|
||||
if (ret == 0)
|
||||
break;
|
||||
/*
|
||||
* at the end of the mapped area, we can fault
|
||||
* but still need to return the data we have
|
||||
* read successfully so far.
|
||||
*/
|
||||
ret = __put_user(ibox_data, udata);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
spu_release(ctx);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = 4;
|
||||
if (copy_to_user(buf, &ibox_data, sizeof ibox_data))
|
||||
ret = -EFAULT;
|
||||
|
||||
return ret;
|
||||
return count;
|
||||
}
|
||||
|
||||
static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
|
||||
|
@ -534,32 +599,67 @@ void spufs_wbox_callback(struct spu *spu)
|
|||
kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
|
||||
}
|
||||
|
||||
/*
|
||||
* Write as many bytes to the interrupt mailbox as possible, until
|
||||
* one of the conditions becomes true:
|
||||
*
|
||||
* - the mailbox is full
|
||||
* - end of the user provided buffer
|
||||
* - end of the mapped area
|
||||
*
|
||||
* If the file is opened without O_NONBLOCK, we wait here until
|
||||
* space is availabyl, but return when we have been able to
|
||||
* write something.
|
||||
*/
|
||||
static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
|
||||
size_t len, loff_t *pos)
|
||||
{
|
||||
struct spu_context *ctx = file->private_data;
|
||||
u32 wbox_data;
|
||||
int ret;
|
||||
u32 wbox_data, __user *udata;
|
||||
ssize_t count;
|
||||
|
||||
if (len < 4)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&wbox_data, buf, sizeof wbox_data))
|
||||
udata = (void __user *)buf;
|
||||
if (!access_ok(VERIFY_READ, buf, len))
|
||||
return -EFAULT;
|
||||
|
||||
if (__get_user(wbox_data, udata))
|
||||
return -EFAULT;
|
||||
|
||||
spu_acquire(ctx);
|
||||
|
||||
ret = 0;
|
||||
/*
|
||||
* make sure we can at least write one element, by waiting
|
||||
* in case of !O_NONBLOCK
|
||||
*/
|
||||
count = 0;
|
||||
if (file->f_flags & O_NONBLOCK) {
|
||||
if (!spu_wbox_write(ctx, wbox_data))
|
||||
ret = -EAGAIN;
|
||||
count = -EAGAIN;
|
||||
} else {
|
||||
ret = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
|
||||
count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
|
||||
}
|
||||
|
||||
spu_release(ctx);
|
||||
if (count)
|
||||
goto out;
|
||||
|
||||
return ret ? ret : sizeof wbox_data;
|
||||
/* write aѕ much as possible */
|
||||
for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
|
||||
int ret;
|
||||
ret = __get_user(wbox_data, udata);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
ret = spu_wbox_write(ctx, wbox_data);
|
||||
if (ret == 0)
|
||||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
spu_release(ctx);
|
||||
return count;
|
||||
}
|
||||
|
||||
static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
|
||||
|
@ -657,11 +757,19 @@ static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
|
|||
return 4;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SPUFS_MMAP
|
||||
static struct page *spufs_signal1_mmap_nopage(struct vm_area_struct *vma,
|
||||
unsigned long address, int *type)
|
||||
{
|
||||
return spufs_ps_nopage(vma, address, type, 0x14000);
|
||||
#if PAGE_SIZE == 0x1000
|
||||
return spufs_ps_nopage(vma, address, type, 0x14000, 0x1000);
|
||||
#elif PAGE_SIZE == 0x10000
|
||||
/* For 64k pages, both signal1 and signal2 can be used to mmap the whole
|
||||
* signal 1 and 2 area
|
||||
*/
|
||||
return spufs_ps_nopage(vma, address, type, 0x10000, 0x10000);
|
||||
#else
|
||||
#error unsupported page size
|
||||
#endif
|
||||
}
|
||||
|
||||
static struct vm_operations_struct spufs_signal1_mmap_vmops = {
|
||||
|
@ -680,15 +788,12 @@ static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
vma->vm_ops = &spufs_signal1_mmap_vmops;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct file_operations spufs_signal1_fops = {
|
||||
.open = spufs_signal1_open,
|
||||
.read = spufs_signal1_read,
|
||||
.write = spufs_signal1_write,
|
||||
#ifdef CONFIG_SPUFS_MMAP
|
||||
.mmap = spufs_signal1_mmap,
|
||||
#endif
|
||||
};
|
||||
|
||||
static int spufs_signal2_open(struct inode *inode, struct file *file)
|
||||
|
@ -743,11 +848,20 @@ static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
|
|||
return 4;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SPUFS_MMAP
|
||||
#if SPUFS_MMAP_4K
|
||||
static struct page *spufs_signal2_mmap_nopage(struct vm_area_struct *vma,
|
||||
unsigned long address, int *type)
|
||||
{
|
||||
return spufs_ps_nopage(vma, address, type, 0x1c000);
|
||||
#if PAGE_SIZE == 0x1000
|
||||
return spufs_ps_nopage(vma, address, type, 0x1c000, 0x1000);
|
||||
#elif PAGE_SIZE == 0x10000
|
||||
/* For 64k pages, both signal1 and signal2 can be used to mmap the whole
|
||||
* signal 1 and 2 area
|
||||
*/
|
||||
return spufs_ps_nopage(vma, address, type, 0x10000, 0x10000);
|
||||
#else
|
||||
#error unsupported page size
|
||||
#endif
|
||||
}
|
||||
|
||||
static struct vm_operations_struct spufs_signal2_mmap_vmops = {
|
||||
|
@ -767,15 +881,15 @@ static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
vma->vm_ops = &spufs_signal2_mmap_vmops;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#else /* SPUFS_MMAP_4K */
|
||||
#define spufs_signal2_mmap NULL
|
||||
#endif /* !SPUFS_MMAP_4K */
|
||||
|
||||
static struct file_operations spufs_signal2_fops = {
|
||||
.open = spufs_signal2_open,
|
||||
.read = spufs_signal2_read,
|
||||
.write = spufs_signal2_write,
|
||||
#ifdef CONFIG_SPUFS_MMAP
|
||||
.mmap = spufs_signal2_mmap,
|
||||
#endif
|
||||
};
|
||||
|
||||
static void spufs_signal1_type_set(void *data, u64 val)
|
||||
|
@ -824,11 +938,11 @@ static u64 spufs_signal2_type_get(void *data)
|
|||
DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
|
||||
spufs_signal2_type_set, "%llu");
|
||||
|
||||
#ifdef CONFIG_SPUFS_MMAP
|
||||
#if SPUFS_MMAP_4K
|
||||
static struct page *spufs_mss_mmap_nopage(struct vm_area_struct *vma,
|
||||
unsigned long address, int *type)
|
||||
{
|
||||
return spufs_ps_nopage(vma, address, type, 0x0000);
|
||||
return spufs_ps_nopage(vma, address, type, 0x0000, 0x1000);
|
||||
}
|
||||
|
||||
static struct vm_operations_struct spufs_mss_mmap_vmops = {
|
||||
|
@ -837,17 +951,12 @@ static struct vm_operations_struct spufs_mss_mmap_vmops = {
|
|||
|
||||
/*
|
||||
* mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
|
||||
* Mapping this area requires that the application have CAP_SYS_RAWIO,
|
||||
* as these registers require special care when read/writing.
|
||||
*/
|
||||
static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
if (!(vma->vm_flags & VM_SHARED))
|
||||
return -EINVAL;
|
||||
|
||||
if (!capable(CAP_SYS_RAWIO))
|
||||
return -EPERM;
|
||||
|
||||
vma->vm_flags |= VM_RESERVED;
|
||||
vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
|
||||
| _PAGE_NO_CACHE | _PAGE_GUARDED);
|
||||
|
@ -855,7 +964,9 @@ static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
vma->vm_ops = &spufs_mss_mmap_vmops;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#else /* SPUFS_MMAP_4K */
|
||||
#define spufs_mss_mmap NULL
|
||||
#endif /* !SPUFS_MMAP_4K */
|
||||
|
||||
static int spufs_mss_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
|
@ -867,17 +978,54 @@ static int spufs_mss_open(struct inode *inode, struct file *file)
|
|||
|
||||
static struct file_operations spufs_mss_fops = {
|
||||
.open = spufs_mss_open,
|
||||
#ifdef CONFIG_SPUFS_MMAP
|
||||
.mmap = spufs_mss_mmap,
|
||||
#endif
|
||||
};
|
||||
|
||||
static struct page *spufs_psmap_mmap_nopage(struct vm_area_struct *vma,
|
||||
unsigned long address, int *type)
|
||||
{
|
||||
return spufs_ps_nopage(vma, address, type, 0x0000, 0x20000);
|
||||
}
|
||||
|
||||
static struct vm_operations_struct spufs_psmap_mmap_vmops = {
|
||||
.nopage = spufs_psmap_mmap_nopage,
|
||||
};
|
||||
|
||||
/*
|
||||
* mmap support for full problem state area [0x00000 - 0x1ffff].
|
||||
*/
|
||||
static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
if (!(vma->vm_flags & VM_SHARED))
|
||||
return -EINVAL;
|
||||
|
||||
vma->vm_flags |= VM_RESERVED;
|
||||
vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
|
||||
| _PAGE_NO_CACHE | _PAGE_GUARDED);
|
||||
|
||||
vma->vm_ops = &spufs_psmap_mmap_vmops;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int spufs_psmap_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct spufs_inode_info *i = SPUFS_I(inode);
|
||||
|
||||
file->private_data = i->i_ctx;
|
||||
return nonseekable_open(inode, file);
|
||||
}
|
||||
|
||||
static struct file_operations spufs_psmap_fops = {
|
||||
.open = spufs_psmap_open,
|
||||
.mmap = spufs_psmap_mmap,
|
||||
};
|
||||
|
||||
|
||||
#ifdef CONFIG_SPUFS_MMAP
|
||||
#if SPUFS_MMAP_4K
|
||||
static struct page *spufs_mfc_mmap_nopage(struct vm_area_struct *vma,
|
||||
unsigned long address, int *type)
|
||||
{
|
||||
return spufs_ps_nopage(vma, address, type, 0x3000);
|
||||
return spufs_ps_nopage(vma, address, type, 0x3000, 0x1000);
|
||||
}
|
||||
|
||||
static struct vm_operations_struct spufs_mfc_mmap_vmops = {
|
||||
|
@ -886,17 +1034,12 @@ static struct vm_operations_struct spufs_mfc_mmap_vmops = {
|
|||
|
||||
/*
|
||||
* mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
|
||||
* Mapping this area requires that the application have CAP_SYS_RAWIO,
|
||||
* as these registers require special care when read/writing.
|
||||
*/
|
||||
static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
if (!(vma->vm_flags & VM_SHARED))
|
||||
return -EINVAL;
|
||||
|
||||
if (!capable(CAP_SYS_RAWIO))
|
||||
return -EPERM;
|
||||
|
||||
vma->vm_flags |= VM_RESERVED;
|
||||
vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
|
||||
| _PAGE_NO_CACHE | _PAGE_GUARDED);
|
||||
|
@ -904,7 +1047,9 @@ static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
vma->vm_ops = &spufs_mfc_mmap_vmops;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#else /* SPUFS_MMAP_4K */
|
||||
#define spufs_mfc_mmap NULL
|
||||
#endif /* !SPUFS_MMAP_4K */
|
||||
|
||||
static int spufs_mfc_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
|
@ -1194,9 +1339,7 @@ static struct file_operations spufs_mfc_fops = {
|
|||
.flush = spufs_mfc_flush,
|
||||
.fsync = spufs_mfc_fsync,
|
||||
.fasync = spufs_mfc_fasync,
|
||||
#ifdef CONFIG_SPUFS_MMAP
|
||||
.mmap = spufs_mfc_mmap,
|
||||
#endif
|
||||
};
|
||||
|
||||
static void spufs_npc_set(void *data, u64 val)
|
||||
|
@ -1344,6 +1487,21 @@ static u64 spufs_id_get(void *data)
|
|||
}
|
||||
DEFINE_SIMPLE_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n")
|
||||
|
||||
static u64 spufs_object_id_get(void *data)
|
||||
{
|
||||
struct spu_context *ctx = data;
|
||||
return ctx->object_id;
|
||||
}
|
||||
|
||||
static void spufs_object_id_set(void *data, u64 id)
|
||||
{
|
||||
struct spu_context *ctx = data;
|
||||
ctx->object_id = id;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
|
||||
spufs_object_id_set, "0x%llx\n");
|
||||
|
||||
struct tree_descr spufs_dir_contents[] = {
|
||||
{ "mem", &spufs_mem_fops, 0666, },
|
||||
{ "regs", &spufs_regs_fops, 0666, },
|
||||
|
@ -1367,6 +1525,8 @@ struct tree_descr spufs_dir_contents[] = {
|
|||
{ "spu_tag_mask", &spufs_spu_tag_mask_ops, 0666, },
|
||||
{ "event_mask", &spufs_event_mask_ops, 0666, },
|
||||
{ "srr0", &spufs_srr0_ops, 0666, },
|
||||
{ "psmap", &spufs_psmap_fops, 0666, },
|
||||
{ "phys-id", &spufs_id_ops, 0666, },
|
||||
{ "object-id", &spufs_object_id_ops, 0666, },
|
||||
{},
|
||||
};
|
||||
|
|
|
@ -0,0 +1,81 @@
|
|||
/*
|
||||
* SPU file system
|
||||
*
|
||||
* (C) Copyright IBM Deutschland Entwicklung GmbH 2005
|
||||
*
|
||||
* Author: Arnd Bergmann <arndb@de.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*/
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "spufs.h"
|
||||
|
||||
struct spu_gang *alloc_spu_gang(void)
|
||||
{
|
||||
struct spu_gang *gang;
|
||||
|
||||
gang = kzalloc(sizeof *gang, GFP_KERNEL);
|
||||
if (!gang)
|
||||
goto out;
|
||||
|
||||
kref_init(&gang->kref);
|
||||
mutex_init(&gang->mutex);
|
||||
INIT_LIST_HEAD(&gang->list);
|
||||
|
||||
out:
|
||||
return gang;
|
||||
}
|
||||
|
||||
static void destroy_spu_gang(struct kref *kref)
|
||||
{
|
||||
struct spu_gang *gang;
|
||||
gang = container_of(kref, struct spu_gang, kref);
|
||||
WARN_ON(gang->contexts || !list_empty(&gang->list));
|
||||
kfree(gang);
|
||||
}
|
||||
|
||||
struct spu_gang *get_spu_gang(struct spu_gang *gang)
|
||||
{
|
||||
kref_get(&gang->kref);
|
||||
return gang;
|
||||
}
|
||||
|
||||
int put_spu_gang(struct spu_gang *gang)
|
||||
{
|
||||
return kref_put(&gang->kref, &destroy_spu_gang);
|
||||
}
|
||||
|
||||
void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx)
|
||||
{
|
||||
mutex_lock(&gang->mutex);
|
||||
ctx->gang = get_spu_gang(gang);
|
||||
list_add(&ctx->gang_list, &gang->list);
|
||||
gang->contexts++;
|
||||
mutex_unlock(&gang->mutex);
|
||||
}
|
||||
|
||||
void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx)
|
||||
{
|
||||
mutex_lock(&gang->mutex);
|
||||
WARN_ON(ctx->gang != gang);
|
||||
list_del_init(&ctx->gang_list);
|
||||
gang->contexts--;
|
||||
mutex_unlock(&gang->mutex);
|
||||
|
||||
put_spu_gang(gang);
|
||||
}
|
|
@ -50,6 +50,10 @@ spufs_alloc_inode(struct super_block *sb)
|
|||
ei = kmem_cache_alloc(spufs_inode_cache, SLAB_KERNEL);
|
||||
if (!ei)
|
||||
return NULL;
|
||||
|
||||
ei->i_gang = NULL;
|
||||
ei->i_ctx = NULL;
|
||||
|
||||
return &ei->vfs_inode;
|
||||
}
|
||||
|
||||
|
@ -128,14 +132,19 @@ out:
|
|||
static void
|
||||
spufs_delete_inode(struct inode *inode)
|
||||
{
|
||||
if (SPUFS_I(inode)->i_ctx)
|
||||
put_spu_context(SPUFS_I(inode)->i_ctx);
|
||||
struct spufs_inode_info *ei = SPUFS_I(inode);
|
||||
|
||||
if (ei->i_ctx)
|
||||
put_spu_context(ei->i_ctx);
|
||||
if (ei->i_gang)
|
||||
put_spu_gang(ei->i_gang);
|
||||
clear_inode(inode);
|
||||
}
|
||||
|
||||
static void spufs_prune_dir(struct dentry *dir)
|
||||
{
|
||||
struct dentry *dentry, *tmp;
|
||||
|
||||
mutex_lock(&dir->d_inode->i_mutex);
|
||||
list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) {
|
||||
spin_lock(&dcache_lock);
|
||||
|
@ -156,13 +165,13 @@ static void spufs_prune_dir(struct dentry *dir)
|
|||
mutex_unlock(&dir->d_inode->i_mutex);
|
||||
}
|
||||
|
||||
/* Caller must hold root->i_mutex */
|
||||
static int spufs_rmdir(struct inode *root, struct dentry *dir_dentry)
|
||||
/* Caller must hold parent->i_mutex */
|
||||
static int spufs_rmdir(struct inode *parent, struct dentry *dir)
|
||||
{
|
||||
/* remove all entries */
|
||||
spufs_prune_dir(dir_dentry);
|
||||
spufs_prune_dir(dir);
|
||||
|
||||
return simple_rmdir(root, dir_dentry);
|
||||
return simple_rmdir(parent, dir);
|
||||
}
|
||||
|
||||
static int spufs_fill_dir(struct dentry *dir, struct tree_descr *files,
|
||||
|
@ -191,17 +200,17 @@ out:
|
|||
static int spufs_dir_close(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct spu_context *ctx;
|
||||
struct inode *dir;
|
||||
struct dentry *dentry;
|
||||
struct inode *parent;
|
||||
struct dentry *dir;
|
||||
int ret;
|
||||
|
||||
dentry = file->f_dentry;
|
||||
dir = dentry->d_parent->d_inode;
|
||||
ctx = SPUFS_I(dentry->d_inode)->i_ctx;
|
||||
dir = file->f_dentry;
|
||||
parent = dir->d_parent->d_inode;
|
||||
ctx = SPUFS_I(dir->d_inode)->i_ctx;
|
||||
|
||||
mutex_lock(&dir->i_mutex);
|
||||
ret = spufs_rmdir(dir, dentry);
|
||||
mutex_unlock(&dir->i_mutex);
|
||||
mutex_lock(&parent->i_mutex);
|
||||
ret = spufs_rmdir(parent, dir);
|
||||
mutex_unlock(&parent->i_mutex);
|
||||
WARN_ON(ret);
|
||||
|
||||
/* We have to give up the mm_struct */
|
||||
|
@ -224,7 +233,8 @@ struct file_operations spufs_context_fops = {
|
|||
};
|
||||
|
||||
static int
|
||||
spufs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
|
||||
spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
|
||||
int mode)
|
||||
{
|
||||
int ret;
|
||||
struct inode *inode;
|
||||
|
@ -239,11 +249,13 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
|
|||
inode->i_gid = dir->i_gid;
|
||||
inode->i_mode &= S_ISGID;
|
||||
}
|
||||
ctx = alloc_spu_context();
|
||||
ctx = alloc_spu_context(SPUFS_I(dir)->i_gang); /* XXX gang */
|
||||
SPUFS_I(inode)->i_ctx = ctx;
|
||||
if (!ctx)
|
||||
goto out_iput;
|
||||
|
||||
ctx->flags = flags;
|
||||
|
||||
inode->i_op = &spufs_dir_inode_operations;
|
||||
inode->i_fop = &simple_dir_operations;
|
||||
ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx);
|
||||
|
@ -289,24 +301,177 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int spufs_create_context(struct inode *inode,
|
||||
struct dentry *dentry,
|
||||
struct vfsmount *mnt, int flags, int mode)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = spufs_mkdir(inode, dentry, flags, mode & S_IRWXUGO);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
* get references for dget and mntget, will be released
|
||||
* in error path of *_open().
|
||||
*/
|
||||
ret = spufs_context_open(dget(dentry), mntget(mnt));
|
||||
if (ret < 0) {
|
||||
WARN_ON(spufs_rmdir(inode, dentry));
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
spu_forget(SPUFS_I(dentry->d_inode)->i_ctx);
|
||||
goto out;
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
out:
|
||||
dput(dentry);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int spufs_rmgang(struct inode *root, struct dentry *dir)
|
||||
{
|
||||
/* FIXME: this fails if the dir is not empty,
|
||||
which causes a leak of gangs. */
|
||||
return simple_rmdir(root, dir);
|
||||
}
|
||||
|
||||
static int spufs_gang_close(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct inode *parent;
|
||||
struct dentry *dir;
|
||||
int ret;
|
||||
|
||||
dir = file->f_dentry;
|
||||
parent = dir->d_parent->d_inode;
|
||||
|
||||
ret = spufs_rmgang(parent, dir);
|
||||
WARN_ON(ret);
|
||||
|
||||
return dcache_dir_close(inode, file);
|
||||
}
|
||||
|
||||
struct file_operations spufs_gang_fops = {
|
||||
.open = dcache_dir_open,
|
||||
.release = spufs_gang_close,
|
||||
.llseek = dcache_dir_lseek,
|
||||
.read = generic_read_dir,
|
||||
.readdir = dcache_readdir,
|
||||
.fsync = simple_sync_file,
|
||||
};
|
||||
|
||||
static int
|
||||
spufs_mkgang(struct inode *dir, struct dentry *dentry, int mode)
|
||||
{
|
||||
int ret;
|
||||
struct inode *inode;
|
||||
struct spu_gang *gang;
|
||||
|
||||
ret = -ENOSPC;
|
||||
inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR);
|
||||
if (!inode)
|
||||
goto out;
|
||||
|
||||
ret = 0;
|
||||
if (dir->i_mode & S_ISGID) {
|
||||
inode->i_gid = dir->i_gid;
|
||||
inode->i_mode &= S_ISGID;
|
||||
}
|
||||
gang = alloc_spu_gang();
|
||||
SPUFS_I(inode)->i_ctx = NULL;
|
||||
SPUFS_I(inode)->i_gang = gang;
|
||||
if (!gang)
|
||||
goto out_iput;
|
||||
|
||||
inode->i_op = &spufs_dir_inode_operations;
|
||||
inode->i_fop = &simple_dir_operations;
|
||||
|
||||
d_instantiate(dentry, inode);
|
||||
dget(dentry);
|
||||
dir->i_nlink++;
|
||||
dentry->d_inode->i_nlink++;
|
||||
return ret;
|
||||
|
||||
out_iput:
|
||||
iput(inode);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int spufs_gang_open(struct dentry *dentry, struct vfsmount *mnt)
|
||||
{
|
||||
int ret;
|
||||
struct file *filp;
|
||||
|
||||
ret = get_unused_fd();
|
||||
if (ret < 0) {
|
||||
dput(dentry);
|
||||
mntput(mnt);
|
||||
goto out;
|
||||
}
|
||||
|
||||
filp = dentry_open(dentry, mnt, O_RDONLY);
|
||||
if (IS_ERR(filp)) {
|
||||
put_unused_fd(ret);
|
||||
ret = PTR_ERR(filp);
|
||||
goto out;
|
||||
}
|
||||
|
||||
filp->f_op = &spufs_gang_fops;
|
||||
fd_install(ret, filp);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int spufs_create_gang(struct inode *inode,
|
||||
struct dentry *dentry,
|
||||
struct vfsmount *mnt, int mode)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = spufs_mkgang(inode, dentry, mode & S_IRWXUGO);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* get references for dget and mntget, will be released
|
||||
* in error path of *_open().
|
||||
*/
|
||||
ret = spufs_gang_open(dget(dentry), mntget(mnt));
|
||||
if (ret < 0)
|
||||
WARN_ON(spufs_rmgang(inode, dentry));
|
||||
|
||||
out:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
dput(dentry);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static struct file_system_type spufs_type;
|
||||
|
||||
long spufs_create_thread(struct nameidata *nd,
|
||||
unsigned int flags, mode_t mode)
|
||||
long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode)
|
||||
{
|
||||
struct dentry *dentry;
|
||||
int ret;
|
||||
|
||||
/* need to be at the root of spufs */
|
||||
ret = -EINVAL;
|
||||
if (nd->dentry->d_sb->s_type != &spufs_type ||
|
||||
nd->dentry != nd->dentry->d_sb->s_root)
|
||||
/* check if we are on spufs */
|
||||
if (nd->dentry->d_sb->s_type != &spufs_type)
|
||||
goto out;
|
||||
|
||||
/* all flags are reserved */
|
||||
if (flags)
|
||||
/* don't accept undefined flags */
|
||||
if (flags & (~SPU_CREATE_FLAG_ALL))
|
||||
goto out;
|
||||
|
||||
/* only threads can be underneath a gang */
|
||||
if (nd->dentry != nd->dentry->d_sb->s_root) {
|
||||
if ((flags & SPU_CREATE_GANG) ||
|
||||
!SPUFS_I(nd->dentry->d_inode)->i_gang)
|
||||
goto out;
|
||||
}
|
||||
|
||||
dentry = lookup_create(nd, 1);
|
||||
ret = PTR_ERR(dentry);
|
||||
if (IS_ERR(dentry))
|
||||
|
@ -317,22 +482,13 @@ long spufs_create_thread(struct nameidata *nd,
|
|||
goto out_dput;
|
||||
|
||||
mode &= ~current->fs->umask;
|
||||
ret = spufs_mkdir(nd->dentry->d_inode, dentry, mode & S_IRWXUGO);
|
||||
if (ret)
|
||||
goto out_dput;
|
||||
|
||||
/*
|
||||
* get references for dget and mntget, will be released
|
||||
* in error path of *_open().
|
||||
*/
|
||||
ret = spufs_context_open(dget(dentry), mntget(nd->mnt));
|
||||
if (ret < 0) {
|
||||
WARN_ON(spufs_rmdir(nd->dentry->d_inode, dentry));
|
||||
mutex_unlock(&nd->dentry->d_inode->i_mutex);
|
||||
spu_forget(SPUFS_I(dentry->d_inode)->i_ctx);
|
||||
dput(dentry);
|
||||
goto out;
|
||||
}
|
||||
if (flags & SPU_CREATE_GANG)
|
||||
return spufs_create_gang(nd->dentry->d_inode,
|
||||
dentry, nd->mnt, mode);
|
||||
else
|
||||
return spufs_create_context(nd->dentry->d_inode,
|
||||
dentry, nd->mnt, flags, mode);
|
||||
|
||||
out_dput:
|
||||
dput(dentry);
|
||||
|
|
|
@ -14,6 +14,26 @@ void spufs_stop_callback(struct spu *spu)
|
|||
wake_up_all(&ctx->stop_wq);
|
||||
}
|
||||
|
||||
void spufs_dma_callback(struct spu *spu, int type)
|
||||
{
|
||||
struct spu_context *ctx = spu->ctx;
|
||||
|
||||
if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) {
|
||||
ctx->event_return |= type;
|
||||
wake_up_all(&ctx->stop_wq);
|
||||
} else {
|
||||
switch (type) {
|
||||
case SPE_EVENT_DMA_ALIGNMENT:
|
||||
case SPE_EVENT_INVALID_DMA:
|
||||
force_sig(SIGBUS, /* info, */ current);
|
||||
break;
|
||||
case SPE_EVENT_SPE_ERROR:
|
||||
force_sig(SIGILL, /* info */ current);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static inline int spu_stopped(struct spu_context *ctx, u32 * stat)
|
||||
{
|
||||
struct spu *spu;
|
||||
|
@ -28,8 +48,7 @@ static inline int spu_stopped(struct spu_context *ctx, u32 * stat)
|
|||
return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0;
|
||||
}
|
||||
|
||||
static inline int spu_run_init(struct spu_context *ctx, u32 * npc,
|
||||
u32 * status)
|
||||
static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -72,7 +91,7 @@ static inline int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc,
|
|||
SPU_STATUS_STOPPED_BY_HALT)) {
|
||||
return *status;
|
||||
}
|
||||
if ((ret = spu_run_init(ctx, npc, status)) != 0)
|
||||
if ((ret = spu_run_init(ctx, npc)) != 0)
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
@ -177,46 +196,49 @@ static inline int spu_process_events(struct spu_context *ctx)
|
|||
}
|
||||
|
||||
long spufs_run_spu(struct file *file, struct spu_context *ctx,
|
||||
u32 * npc, u32 * status)
|
||||
u32 *npc, u32 *event)
|
||||
{
|
||||
int ret;
|
||||
u32 status;
|
||||
|
||||
if (down_interruptible(&ctx->run_sema))
|
||||
return -ERESTARTSYS;
|
||||
|
||||
ret = spu_run_init(ctx, npc, status);
|
||||
ctx->event_return = 0;
|
||||
ret = spu_run_init(ctx, npc);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
do {
|
||||
ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, status));
|
||||
ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status));
|
||||
if (unlikely(ret))
|
||||
break;
|
||||
if ((*status & SPU_STATUS_STOPPED_BY_STOP) &&
|
||||
(*status >> SPU_STOP_STATUS_SHIFT == 0x2104)) {
|
||||
if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
|
||||
(status >> SPU_STOP_STATUS_SHIFT == 0x2104)) {
|
||||
ret = spu_process_callback(ctx);
|
||||
if (ret)
|
||||
break;
|
||||
*status &= ~SPU_STATUS_STOPPED_BY_STOP;
|
||||
status &= ~SPU_STATUS_STOPPED_BY_STOP;
|
||||
}
|
||||
if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
|
||||
ret = spu_reacquire_runnable(ctx, npc, status);
|
||||
ret = spu_reacquire_runnable(ctx, npc, &status);
|
||||
if (ret)
|
||||
goto out;
|
||||
continue;
|
||||
}
|
||||
ret = spu_process_events(ctx);
|
||||
|
||||
} while (!ret && !(*status & (SPU_STATUS_STOPPED_BY_STOP |
|
||||
} while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
|
||||
SPU_STATUS_STOPPED_BY_HALT)));
|
||||
|
||||
ctx->ops->runcntl_stop(ctx);
|
||||
ret = spu_run_fini(ctx, npc, status);
|
||||
ret = spu_run_fini(ctx, npc, &status);
|
||||
if (!ret)
|
||||
ret = *status;
|
||||
ret = status;
|
||||
spu_yield(ctx);
|
||||
|
||||
out:
|
||||
*event = ctx->event_return;
|
||||
up(&ctx->run_sema);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -3,11 +3,7 @@
|
|||
* Copyright (C) IBM 2005
|
||||
* Author: Mark Nutter <mnutter@us.ibm.com>
|
||||
*
|
||||
* SPU scheduler, based on Linux thread priority. For now use
|
||||
* a simple "cooperative" yield model with no preemption. SPU
|
||||
* scheduling will eventually be preemptive: When a thread with
|
||||
* a higher static priority gets ready to run, then an active SPU
|
||||
* context will be preempted and returned to the waitq.
|
||||
* 2006-03-31 NUMA domains added.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
|
@ -37,6 +33,9 @@
|
|||
#include <linux/smp_lock.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/unistd.h>
|
||||
#include <linux/numa.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/notifier.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
@ -49,128 +48,59 @@
|
|||
|
||||
#define SPU_BITMAP_SIZE (((MAX_PRIO+BITS_PER_LONG)/BITS_PER_LONG)+1)
|
||||
struct spu_prio_array {
|
||||
atomic_t nr_blocked;
|
||||
unsigned long bitmap[SPU_BITMAP_SIZE];
|
||||
wait_queue_head_t waitq[MAX_PRIO];
|
||||
struct list_head active_list[MAX_NUMNODES];
|
||||
struct mutex active_mutex[MAX_NUMNODES];
|
||||
};
|
||||
|
||||
/* spu_runqueue - This is the main runqueue data structure for SPUs. */
|
||||
struct spu_runqueue {
|
||||
struct semaphore sem;
|
||||
unsigned long nr_active;
|
||||
unsigned long nr_idle;
|
||||
unsigned long nr_switches;
|
||||
struct list_head active_list;
|
||||
struct list_head idle_list;
|
||||
struct spu_prio_array prio;
|
||||
};
|
||||
static struct spu_prio_array *spu_prio;
|
||||
|
||||
static struct spu_runqueue *spu_runqueues = NULL;
|
||||
|
||||
static inline struct spu_runqueue *spu_rq(void)
|
||||
static inline int node_allowed(int node)
|
||||
{
|
||||
/* Future: make this a per-NODE array,
|
||||
* and use cpu_to_node(smp_processor_id())
|
||||
*/
|
||||
return spu_runqueues;
|
||||
}
|
||||
cpumask_t mask;
|
||||
|
||||
static inline struct spu *del_idle(struct spu_runqueue *rq)
|
||||
{
|
||||
struct spu *spu;
|
||||
|
||||
BUG_ON(rq->nr_idle <= 0);
|
||||
BUG_ON(list_empty(&rq->idle_list));
|
||||
/* Future: Move SPU out of low-power SRI state. */
|
||||
spu = list_entry(rq->idle_list.next, struct spu, sched_list);
|
||||
list_del_init(&spu->sched_list);
|
||||
rq->nr_idle--;
|
||||
return spu;
|
||||
}
|
||||
|
||||
static inline void del_active(struct spu_runqueue *rq, struct spu *spu)
|
||||
{
|
||||
BUG_ON(rq->nr_active <= 0);
|
||||
BUG_ON(list_empty(&rq->active_list));
|
||||
list_del_init(&spu->sched_list);
|
||||
rq->nr_active--;
|
||||
}
|
||||
|
||||
static inline void add_idle(struct spu_runqueue *rq, struct spu *spu)
|
||||
{
|
||||
/* Future: Put SPU into low-power SRI state. */
|
||||
list_add_tail(&spu->sched_list, &rq->idle_list);
|
||||
rq->nr_idle++;
|
||||
}
|
||||
|
||||
static inline void add_active(struct spu_runqueue *rq, struct spu *spu)
|
||||
{
|
||||
rq->nr_active++;
|
||||
rq->nr_switches++;
|
||||
list_add_tail(&spu->sched_list, &rq->active_list);
|
||||
}
|
||||
|
||||
static void prio_wakeup(struct spu_runqueue *rq)
|
||||
{
|
||||
if (atomic_read(&rq->prio.nr_blocked) && rq->nr_idle) {
|
||||
int best = sched_find_first_bit(rq->prio.bitmap);
|
||||
if (best < MAX_PRIO) {
|
||||
wait_queue_head_t *wq = &rq->prio.waitq[best];
|
||||
wake_up_interruptible_nr(wq, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void prio_wait(struct spu_runqueue *rq, struct spu_context *ctx,
|
||||
u64 flags)
|
||||
{
|
||||
int prio = current->prio;
|
||||
wait_queue_head_t *wq = &rq->prio.waitq[prio];
|
||||
DEFINE_WAIT(wait);
|
||||
|
||||
__set_bit(prio, rq->prio.bitmap);
|
||||
atomic_inc(&rq->prio.nr_blocked);
|
||||
prepare_to_wait_exclusive(wq, &wait, TASK_INTERRUPTIBLE);
|
||||
if (!signal_pending(current)) {
|
||||
up(&rq->sem);
|
||||
up_write(&ctx->state_sema);
|
||||
pr_debug("%s: pid=%d prio=%d\n", __FUNCTION__,
|
||||
current->pid, current->prio);
|
||||
schedule();
|
||||
down_write(&ctx->state_sema);
|
||||
down(&rq->sem);
|
||||
}
|
||||
finish_wait(wq, &wait);
|
||||
atomic_dec(&rq->prio.nr_blocked);
|
||||
if (!waitqueue_active(wq))
|
||||
__clear_bit(prio, rq->prio.bitmap);
|
||||
}
|
||||
|
||||
static inline int is_best_prio(struct spu_runqueue *rq)
|
||||
{
|
||||
int best_prio;
|
||||
|
||||
best_prio = sched_find_first_bit(rq->prio.bitmap);
|
||||
return (current->prio < best_prio) ? 1 : 0;
|
||||
if (!nr_cpus_node(node))
|
||||
return 0;
|
||||
mask = node_to_cpumask(node);
|
||||
if (!cpus_intersects(mask, current->cpus_allowed))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline void mm_needs_global_tlbie(struct mm_struct *mm)
|
||||
{
|
||||
int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
|
||||
|
||||
/* Global TLBIE broadcast required with SPEs. */
|
||||
#if (NR_CPUS > 1)
|
||||
__cpus_setall(&mm->cpu_vm_mask, NR_CPUS);
|
||||
#else
|
||||
__cpus_setall(&mm->cpu_vm_mask, NR_CPUS+1); /* is this ok? */
|
||||
#endif
|
||||
__cpus_setall(&mm->cpu_vm_mask, nr);
|
||||
}
|
||||
|
||||
static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier);
|
||||
|
||||
static void spu_switch_notify(struct spu *spu, struct spu_context *ctx)
|
||||
{
|
||||
blocking_notifier_call_chain(&spu_switch_notifier,
|
||||
ctx ? ctx->object_id : 0, spu);
|
||||
}
|
||||
|
||||
int spu_switch_event_register(struct notifier_block * n)
|
||||
{
|
||||
return blocking_notifier_chain_register(&spu_switch_notifier, n);
|
||||
}
|
||||
|
||||
int spu_switch_event_unregister(struct notifier_block * n)
|
||||
{
|
||||
return blocking_notifier_chain_unregister(&spu_switch_notifier, n);
|
||||
}
|
||||
|
||||
|
||||
static inline void bind_context(struct spu *spu, struct spu_context *ctx)
|
||||
{
|
||||
pr_debug("%s: pid=%d SPU=%d\n", __FUNCTION__, current->pid,
|
||||
spu->number);
|
||||
pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
|
||||
spu->number, spu->node);
|
||||
spu->ctx = ctx;
|
||||
spu->flags = 0;
|
||||
ctx->flags = 0;
|
||||
ctx->spu = spu;
|
||||
ctx->ops = &spu_hw_ops;
|
||||
spu->pid = current->pid;
|
||||
|
@ -181,16 +111,20 @@ static inline void bind_context(struct spu *spu, struct spu_context *ctx)
|
|||
spu->wbox_callback = spufs_wbox_callback;
|
||||
spu->stop_callback = spufs_stop_callback;
|
||||
spu->mfc_callback = spufs_mfc_callback;
|
||||
spu->dma_callback = spufs_dma_callback;
|
||||
mb();
|
||||
spu_unmap_mappings(ctx);
|
||||
spu_restore(&ctx->csa, spu);
|
||||
spu->timestamp = jiffies;
|
||||
spu_cpu_affinity_set(spu, raw_smp_processor_id());
|
||||
spu_switch_notify(spu, ctx);
|
||||
}
|
||||
|
||||
static inline void unbind_context(struct spu *spu, struct spu_context *ctx)
|
||||
{
|
||||
pr_debug("%s: unbind pid=%d SPU=%d\n", __FUNCTION__,
|
||||
spu->pid, spu->number);
|
||||
pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
|
||||
spu->pid, spu->number, spu->node);
|
||||
spu_switch_notify(spu, NULL);
|
||||
spu_unmap_mappings(ctx);
|
||||
spu_save(&ctx->csa, spu);
|
||||
spu->timestamp = jiffies;
|
||||
|
@ -199,173 +133,158 @@ static inline void unbind_context(struct spu *spu, struct spu_context *ctx)
|
|||
spu->wbox_callback = NULL;
|
||||
spu->stop_callback = NULL;
|
||||
spu->mfc_callback = NULL;
|
||||
spu->dma_callback = NULL;
|
||||
spu->mm = NULL;
|
||||
spu->pid = 0;
|
||||
spu->prio = MAX_PRIO;
|
||||
ctx->ops = &spu_backing_ops;
|
||||
ctx->spu = NULL;
|
||||
ctx->flags = 0;
|
||||
spu->flags = 0;
|
||||
spu->ctx = NULL;
|
||||
}
|
||||
|
||||
static void spu_reaper(void *data)
|
||||
static inline void spu_add_wq(wait_queue_head_t * wq, wait_queue_t * wait,
|
||||
int prio)
|
||||
{
|
||||
struct spu_context *ctx = data;
|
||||
struct spu *spu;
|
||||
prepare_to_wait_exclusive(wq, wait, TASK_INTERRUPTIBLE);
|
||||
set_bit(prio, spu_prio->bitmap);
|
||||
}
|
||||
|
||||
down_write(&ctx->state_sema);
|
||||
spu = ctx->spu;
|
||||
if (spu && test_bit(SPU_CONTEXT_PREEMPT, &ctx->flags)) {
|
||||
if (atomic_read(&spu->rq->prio.nr_blocked)) {
|
||||
pr_debug("%s: spu=%d\n", __func__, spu->number);
|
||||
ctx->ops->runcntl_stop(ctx);
|
||||
spu_deactivate(ctx);
|
||||
wake_up_all(&ctx->stop_wq);
|
||||
} else {
|
||||
clear_bit(SPU_CONTEXT_PREEMPT, &ctx->flags);
|
||||
}
|
||||
static inline void spu_del_wq(wait_queue_head_t * wq, wait_queue_t * wait,
|
||||
int prio)
|
||||
{
|
||||
u64 flags;
|
||||
|
||||
__set_current_state(TASK_RUNNING);
|
||||
|
||||
spin_lock_irqsave(&wq->lock, flags);
|
||||
|
||||
remove_wait_queue_locked(wq, wait);
|
||||
if (list_empty(&wq->task_list))
|
||||
clear_bit(prio, spu_prio->bitmap);
|
||||
|
||||
spin_unlock_irqrestore(&wq->lock, flags);
|
||||
}
|
||||
|
||||
static void spu_prio_wait(struct spu_context *ctx, u64 flags)
|
||||
{
|
||||
int prio = current->prio;
|
||||
wait_queue_head_t *wq = &spu_prio->waitq[prio];
|
||||
DEFINE_WAIT(wait);
|
||||
|
||||
if (ctx->spu)
|
||||
return;
|
||||
|
||||
spu_add_wq(wq, &wait, prio);
|
||||
|
||||
if (!signal_pending(current)) {
|
||||
up_write(&ctx->state_sema);
|
||||
put_spu_context(ctx);
|
||||
pr_debug("%s: pid=%d prio=%d\n", __FUNCTION__,
|
||||
current->pid, current->prio);
|
||||
schedule();
|
||||
down_write(&ctx->state_sema);
|
||||
}
|
||||
|
||||
static void schedule_spu_reaper(struct spu_runqueue *rq, struct spu *spu)
|
||||
spu_del_wq(wq, &wait, prio);
|
||||
}
|
||||
|
||||
static void spu_prio_wakeup(void)
|
||||
{
|
||||
struct spu_context *ctx = get_spu_context(spu->ctx);
|
||||
unsigned long now = jiffies;
|
||||
unsigned long expire = spu->timestamp + SPU_MIN_TIMESLICE;
|
||||
|
||||
set_bit(SPU_CONTEXT_PREEMPT, &ctx->flags);
|
||||
INIT_WORK(&ctx->reap_work, spu_reaper, ctx);
|
||||
if (time_after(now, expire))
|
||||
schedule_work(&ctx->reap_work);
|
||||
else
|
||||
schedule_delayed_work(&ctx->reap_work, expire - now);
|
||||
int best = sched_find_first_bit(spu_prio->bitmap);
|
||||
if (best < MAX_PRIO) {
|
||||
wait_queue_head_t *wq = &spu_prio->waitq[best];
|
||||
wake_up_interruptible_nr(wq, 1);
|
||||
}
|
||||
|
||||
static void check_preempt_active(struct spu_runqueue *rq)
|
||||
{
|
||||
struct list_head *p;
|
||||
struct spu *worst = NULL;
|
||||
|
||||
list_for_each(p, &rq->active_list) {
|
||||
struct spu *spu = list_entry(p, struct spu, sched_list);
|
||||
struct spu_context *ctx = spu->ctx;
|
||||
if (!test_bit(SPU_CONTEXT_PREEMPT, &ctx->flags)) {
|
||||
if (!worst || (spu->prio > worst->prio)) {
|
||||
worst = spu;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (worst && (current->prio < worst->prio))
|
||||
schedule_spu_reaper(rq, worst);
|
||||
}
|
||||
|
||||
static struct spu *get_idle_spu(struct spu_context *ctx, u64 flags)
|
||||
{
|
||||
struct spu_runqueue *rq;
|
||||
struct spu *spu = NULL;
|
||||
|
||||
rq = spu_rq();
|
||||
down(&rq->sem);
|
||||
for (;;) {
|
||||
if (rq->nr_idle > 0) {
|
||||
if (is_best_prio(rq)) {
|
||||
/* Fall through. */
|
||||
spu = del_idle(rq);
|
||||
break;
|
||||
} else {
|
||||
prio_wakeup(rq);
|
||||
up(&rq->sem);
|
||||
yield();
|
||||
if (signal_pending(current)) {
|
||||
return NULL;
|
||||
}
|
||||
rq = spu_rq();
|
||||
down(&rq->sem);
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
check_preempt_active(rq);
|
||||
prio_wait(rq, ctx, flags);
|
||||
if (signal_pending(current)) {
|
||||
prio_wakeup(rq);
|
||||
spu = NULL;
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
up(&rq->sem);
|
||||
return spu;
|
||||
}
|
||||
|
||||
static void put_idle_spu(struct spu *spu)
|
||||
{
|
||||
struct spu_runqueue *rq = spu->rq;
|
||||
|
||||
down(&rq->sem);
|
||||
add_idle(rq, spu);
|
||||
prio_wakeup(rq);
|
||||
up(&rq->sem);
|
||||
}
|
||||
|
||||
static int get_active_spu(struct spu *spu)
|
||||
{
|
||||
struct spu_runqueue *rq = spu->rq;
|
||||
struct list_head *p;
|
||||
int node = spu->node;
|
||||
struct spu *tmp;
|
||||
int rc = 0;
|
||||
|
||||
down(&rq->sem);
|
||||
list_for_each(p, &rq->active_list) {
|
||||
tmp = list_entry(p, struct spu, sched_list);
|
||||
mutex_lock(&spu_prio->active_mutex[node]);
|
||||
list_for_each_entry(tmp, &spu_prio->active_list[node], list) {
|
||||
if (tmp == spu) {
|
||||
del_active(rq, spu);
|
||||
list_del_init(&spu->list);
|
||||
rc = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
up(&rq->sem);
|
||||
mutex_unlock(&spu_prio->active_mutex[node]);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void put_active_spu(struct spu *spu)
|
||||
{
|
||||
struct spu_runqueue *rq = spu->rq;
|
||||
int node = spu->node;
|
||||
|
||||
down(&rq->sem);
|
||||
add_active(rq, spu);
|
||||
up(&rq->sem);
|
||||
mutex_lock(&spu_prio->active_mutex[node]);
|
||||
list_add_tail(&spu->list, &spu_prio->active_list[node]);
|
||||
mutex_unlock(&spu_prio->active_mutex[node]);
|
||||
}
|
||||
|
||||
/* Lock order:
|
||||
* spu_activate() & spu_deactivate() require the
|
||||
* caller to have down_write(&ctx->state_sema).
|
||||
static struct spu *spu_get_idle(struct spu_context *ctx, u64 flags)
|
||||
{
|
||||
struct spu *spu = NULL;
|
||||
int node = cpu_to_node(raw_smp_processor_id());
|
||||
int n;
|
||||
|
||||
for (n = 0; n < MAX_NUMNODES; n++, node++) {
|
||||
node = (node < MAX_NUMNODES) ? node : 0;
|
||||
if (!node_allowed(node))
|
||||
continue;
|
||||
spu = spu_alloc_node(node);
|
||||
if (spu)
|
||||
break;
|
||||
}
|
||||
return spu;
|
||||
}
|
||||
|
||||
static inline struct spu *spu_get(struct spu_context *ctx, u64 flags)
|
||||
{
|
||||
/* Future: spu_get_idle() if possible,
|
||||
* otherwise try to preempt an active
|
||||
* context.
|
||||
*/
|
||||
return spu_get_idle(ctx, flags);
|
||||
}
|
||||
|
||||
/* The three externally callable interfaces
|
||||
* for the scheduler begin here.
|
||||
*
|
||||
* The rq->sem is breifly held (inside or outside a
|
||||
* given ctx lock) for list management, but is never
|
||||
* held during save/restore.
|
||||
* spu_activate - bind a context to SPU, waiting as needed.
|
||||
* spu_deactivate - unbind a context from its SPU.
|
||||
* spu_yield - yield an SPU if others are waiting.
|
||||
*/
|
||||
|
||||
int spu_activate(struct spu_context *ctx, u64 flags)
|
||||
{
|
||||
struct spu *spu;
|
||||
int ret = 0;
|
||||
|
||||
for (;;) {
|
||||
if (ctx->spu)
|
||||
return 0;
|
||||
spu = get_idle_spu(ctx, flags);
|
||||
if (!spu)
|
||||
return (signal_pending(current)) ? -ERESTARTSYS : -EAGAIN;
|
||||
spu = spu_get(ctx, flags);
|
||||
if (spu != NULL) {
|
||||
if (ctx->spu != NULL) {
|
||||
spu_free(spu);
|
||||
spu_prio_wakeup();
|
||||
break;
|
||||
}
|
||||
bind_context(spu, ctx);
|
||||
/*
|
||||
* We're likely to wait for interrupts on the same
|
||||
* CPU that we are now on, so send them here.
|
||||
*/
|
||||
spu_cpu_affinity_set(spu, raw_smp_processor_id());
|
||||
put_active_spu(spu);
|
||||
return 0;
|
||||
break;
|
||||
}
|
||||
spu_prio_wait(ctx, flags);
|
||||
if (signal_pending(current)) {
|
||||
ret = -ERESTARTSYS;
|
||||
spu_prio_wakeup();
|
||||
break;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
void spu_deactivate(struct spu_context *ctx)
|
||||
|
@ -378,8 +297,10 @@ void spu_deactivate(struct spu_context *ctx)
|
|||
return;
|
||||
needs_idle = get_active_spu(spu);
|
||||
unbind_context(spu, ctx);
|
||||
if (needs_idle)
|
||||
put_idle_spu(spu);
|
||||
if (needs_idle) {
|
||||
spu_free(spu);
|
||||
spu_prio_wakeup();
|
||||
}
|
||||
}
|
||||
|
||||
void spu_yield(struct spu_context *ctx)
|
||||
|
@ -387,77 +308,60 @@ void spu_yield(struct spu_context *ctx)
|
|||
struct spu *spu;
|
||||
int need_yield = 0;
|
||||
|
||||
down_write(&ctx->state_sema);
|
||||
spu = ctx->spu;
|
||||
if (spu && (sched_find_first_bit(spu->rq->prio.bitmap) < MAX_PRIO)) {
|
||||
pr_debug("%s: yielding SPU %d\n", __FUNCTION__, spu->number);
|
||||
if (down_write_trylock(&ctx->state_sema)) {
|
||||
if ((spu = ctx->spu) != NULL) {
|
||||
int best = sched_find_first_bit(spu_prio->bitmap);
|
||||
if (best < MAX_PRIO) {
|
||||
pr_debug("%s: yielding SPU %d NODE %d\n",
|
||||
__FUNCTION__, spu->number, spu->node);
|
||||
spu_deactivate(ctx);
|
||||
ctx->state = SPU_STATE_SAVED;
|
||||
need_yield = 1;
|
||||
} else if (spu) {
|
||||
} else {
|
||||
spu->prio = MAX_PRIO;
|
||||
}
|
||||
}
|
||||
up_write(&ctx->state_sema);
|
||||
}
|
||||
if (unlikely(need_yield))
|
||||
yield();
|
||||
}
|
||||
|
||||
int __init spu_sched_init(void)
|
||||
{
|
||||
struct spu_runqueue *rq;
|
||||
struct spu *spu;
|
||||
int i;
|
||||
|
||||
rq = spu_runqueues = kmalloc(sizeof(struct spu_runqueue), GFP_KERNEL);
|
||||
if (!rq) {
|
||||
printk(KERN_WARNING "%s: Unable to allocate runqueues.\n",
|
||||
spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
|
||||
if (!spu_prio) {
|
||||
printk(KERN_WARNING "%s: Unable to allocate priority queue.\n",
|
||||
__FUNCTION__);
|
||||
return 1;
|
||||
}
|
||||
memset(rq, 0, sizeof(struct spu_runqueue));
|
||||
init_MUTEX(&rq->sem);
|
||||
INIT_LIST_HEAD(&rq->active_list);
|
||||
INIT_LIST_HEAD(&rq->idle_list);
|
||||
rq->nr_active = 0;
|
||||
rq->nr_idle = 0;
|
||||
rq->nr_switches = 0;
|
||||
atomic_set(&rq->prio.nr_blocked, 0);
|
||||
for (i = 0; i < MAX_PRIO; i++) {
|
||||
init_waitqueue_head(&rq->prio.waitq[i]);
|
||||
__clear_bit(i, rq->prio.bitmap);
|
||||
init_waitqueue_head(&spu_prio->waitq[i]);
|
||||
__clear_bit(i, spu_prio->bitmap);
|
||||
}
|
||||
__set_bit(MAX_PRIO, rq->prio.bitmap);
|
||||
for (;;) {
|
||||
spu = spu_alloc();
|
||||
if (!spu)
|
||||
break;
|
||||
pr_debug("%s: adding SPU[%d]\n", __FUNCTION__, spu->number);
|
||||
add_idle(rq, spu);
|
||||
spu->rq = rq;
|
||||
spu->timestamp = jiffies;
|
||||
}
|
||||
if (!rq->nr_idle) {
|
||||
printk(KERN_WARNING "%s: No available SPUs.\n", __FUNCTION__);
|
||||
kfree(rq);
|
||||
return 1;
|
||||
__set_bit(MAX_PRIO, spu_prio->bitmap);
|
||||
for (i = 0; i < MAX_NUMNODES; i++) {
|
||||
mutex_init(&spu_prio->active_mutex[i]);
|
||||
INIT_LIST_HEAD(&spu_prio->active_list[i]);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __exit spu_sched_exit(void)
|
||||
{
|
||||
struct spu_runqueue *rq = spu_rq();
|
||||
struct spu *spu;
|
||||
struct spu *spu, *tmp;
|
||||
int node;
|
||||
|
||||
if (!rq) {
|
||||
printk(KERN_WARNING "%s: no runqueues!\n", __FUNCTION__);
|
||||
return;
|
||||
}
|
||||
while (rq->nr_idle > 0) {
|
||||
spu = del_idle(rq);
|
||||
if (!spu)
|
||||
break;
|
||||
for (node = 0; node < MAX_NUMNODES; node++) {
|
||||
mutex_lock(&spu_prio->active_mutex[node]);
|
||||
list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node],
|
||||
list) {
|
||||
list_del_init(&spu->list);
|
||||
spu_free(spu);
|
||||
}
|
||||
kfree(rq);
|
||||
mutex_unlock(&spu_prio->active_mutex[node]);
|
||||
}
|
||||
kfree(spu_prio);
|
||||
}
|
||||
|
|
|
@ -39,6 +39,8 @@ struct spu_context_ops;
|
|||
|
||||
#define SPU_CONTEXT_PREEMPT 0UL
|
||||
|
||||
struct spu_gang;
|
||||
|
||||
struct spu_context {
|
||||
struct spu *spu; /* pointer to a physical SPU */
|
||||
struct spu_state csa; /* SPU context save area. */
|
||||
|
@ -48,6 +50,7 @@ struct spu_context {
|
|||
struct address_space *cntl; /* 'control' area mappings. */
|
||||
struct address_space *signal1; /* 'signal1' area mappings. */
|
||||
struct address_space *signal2; /* 'signal2' area mappings. */
|
||||
u64 object_id; /* user space pointer for oprofile */
|
||||
|
||||
enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state;
|
||||
struct rw_semaphore state_sema;
|
||||
|
@ -66,7 +69,18 @@ struct spu_context {
|
|||
u32 tagwait;
|
||||
struct spu_context_ops *ops;
|
||||
struct work_struct reap_work;
|
||||
u64 flags;
|
||||
unsigned long flags;
|
||||
unsigned long event_return;
|
||||
|
||||
struct list_head gang_list;
|
||||
struct spu_gang *gang;
|
||||
};
|
||||
|
||||
struct spu_gang {
|
||||
struct list_head list;
|
||||
struct mutex mutex;
|
||||
struct kref kref;
|
||||
int contexts;
|
||||
};
|
||||
|
||||
struct mfc_dma_command {
|
||||
|
@ -114,6 +128,7 @@ extern struct spu_context_ops spu_backing_ops;
|
|||
|
||||
struct spufs_inode_info {
|
||||
struct spu_context *i_ctx;
|
||||
struct spu_gang *i_gang;
|
||||
struct inode vfs_inode;
|
||||
};
|
||||
#define SPUFS_I(inode) \
|
||||
|
@ -124,12 +139,19 @@ extern struct tree_descr spufs_dir_contents[];
|
|||
/* system call implementation */
|
||||
long spufs_run_spu(struct file *file,
|
||||
struct spu_context *ctx, u32 *npc, u32 *status);
|
||||
long spufs_create_thread(struct nameidata *nd,
|
||||
long spufs_create(struct nameidata *nd,
|
||||
unsigned int flags, mode_t mode);
|
||||
extern struct file_operations spufs_context_fops;
|
||||
|
||||
/* gang management */
|
||||
struct spu_gang *alloc_spu_gang(void);
|
||||
struct spu_gang *get_spu_gang(struct spu_gang *gang);
|
||||
int put_spu_gang(struct spu_gang *gang);
|
||||
void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx);
|
||||
void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx);
|
||||
|
||||
/* context management */
|
||||
struct spu_context * alloc_spu_context(void);
|
||||
struct spu_context * alloc_spu_context(struct spu_gang *gang);
|
||||
void destroy_spu_context(struct kref *kref);
|
||||
struct spu_context * get_spu_context(struct spu_context *ctx);
|
||||
int put_spu_context(struct spu_context *ctx);
|
||||
|
@ -183,5 +205,6 @@ void spufs_ibox_callback(struct spu *spu);
|
|||
void spufs_wbox_callback(struct spu *spu);
|
||||
void spufs_stop_callback(struct spu *spu);
|
||||
void spufs_mfc_callback(struct spu *spu);
|
||||
void spufs_dma_callback(struct spu *spu, int type);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1779,6 +1779,15 @@ static inline void restore_mfc_cntl(struct spu_state *csa, struct spu *spu)
|
|||
*/
|
||||
out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW);
|
||||
eieio();
|
||||
/*
|
||||
* FIXME: this is to restart a DMA that we were processing
|
||||
* before the save. better remember the fault information
|
||||
* in the csa instead.
|
||||
*/
|
||||
if ((csa->priv2.mfc_control_RW & MFC_CNTL_SUSPEND_DMA_QUEUE_MASK)) {
|
||||
out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
|
||||
eieio();
|
||||
}
|
||||
}
|
||||
|
||||
static inline void enable_user_access(struct spu_state *csa, struct spu *spu)
|
||||
|
|
|
@ -38,7 +38,7 @@ static long do_spu_run(struct file *filp,
|
|||
u32 npc, status;
|
||||
|
||||
ret = -EFAULT;
|
||||
if (get_user(npc, unpc) || get_user(status, ustatus))
|
||||
if (get_user(npc, unpc))
|
||||
goto out;
|
||||
|
||||
/* check if this file was created by spu_create */
|
||||
|
@ -49,7 +49,10 @@ static long do_spu_run(struct file *filp,
|
|||
i = SPUFS_I(filp->f_dentry->d_inode);
|
||||
ret = spufs_run_spu(filp, i->i_ctx, &npc, &status);
|
||||
|
||||
if (put_user(npc, unpc) || put_user(status, ustatus))
|
||||
if (put_user(npc, unpc))
|
||||
ret = -EFAULT;
|
||||
|
||||
if (ustatus && put_user(status, ustatus))
|
||||
ret = -EFAULT;
|
||||
out:
|
||||
return ret;
|
||||
|
@ -87,7 +90,7 @@ asmlinkage long sys_spu_create(const char __user *pathname,
|
|||
ret = path_lookup(tmp, LOOKUP_PARENT|
|
||||
LOOKUP_OPEN|LOOKUP_CREATE, &nd);
|
||||
if (!ret) {
|
||||
ret = spufs_create_thread(&nd, flags, mode);
|
||||
ret = spufs_create(&nd, flags, mode);
|
||||
path_release(&nd);
|
||||
}
|
||||
putname(tmp);
|
||||
|
|
|
@ -138,6 +138,7 @@ struct spu {
|
|||
void (* ibox_callback)(struct spu *spu);
|
||||
void (* stop_callback)(struct spu *spu);
|
||||
void (* mfc_callback)(struct spu *spu);
|
||||
void (* dma_callback)(struct spu *spu, int type);
|
||||
|
||||
char irq_c0[8];
|
||||
char irq_c1[8];
|
||||
|
@ -147,6 +148,7 @@ struct spu {
|
|||
};
|
||||
|
||||
struct spu *spu_alloc(void);
|
||||
struct spu *spu_alloc_node(int node);
|
||||
void spu_free(struct spu *spu);
|
||||
int spu_irq_class_0_bottom(struct spu *spu);
|
||||
int spu_irq_class_1_bottom(struct spu *spu);
|
||||
|
@ -168,6 +170,22 @@ extern struct spufs_calls {
|
|||
struct module *owner;
|
||||
} spufs_calls;
|
||||
|
||||
/* return status from spu_run, same as in libspe */
|
||||
#define SPE_EVENT_DMA_ALIGNMENT 0x0008 /*A DMA alignment error */
|
||||
#define SPE_EVENT_SPE_ERROR 0x0010 /*An illegal instruction error*/
|
||||
#define SPE_EVENT_SPE_DATA_SEGMENT 0x0020 /*A DMA segmentation error */
|
||||
#define SPE_EVENT_SPE_DATA_STORAGE 0x0040 /*A DMA storage error */
|
||||
#define SPE_EVENT_INVALID_DMA 0x0800 /* Invalid MFC DMA */
|
||||
|
||||
/*
|
||||
* Flags for sys_spu_create.
|
||||
*/
|
||||
#define SPU_CREATE_EVENTS_ENABLED 0x0001
|
||||
#define SPU_CREATE_GANG 0x0002
|
||||
|
||||
#define SPU_CREATE_FLAG_ALL 0x0003 /* mask of all valid flags */
|
||||
|
||||
|
||||
#ifdef CONFIG_SPU_FS_MODULE
|
||||
int register_spu_syscalls(struct spufs_calls *calls);
|
||||
void unregister_spu_syscalls(struct spufs_calls *calls);
|
||||
|
@ -182,6 +200,24 @@ static inline void unregister_spu_syscalls(struct spufs_calls *calls)
|
|||
#endif /* MODULE */
|
||||
|
||||
|
||||
/*
|
||||
* Notifier blocks:
|
||||
*
|
||||
* oprofile can get notified when a context switch is performed
|
||||
* on an spe. The notifer function that gets called is passed
|
||||
* a pointer to the SPU structure as well as the object-id that
|
||||
* identifies the binary running on that SPU now.
|
||||
*
|
||||
* For a context save, the object-id that is passed is zero,
|
||||
* identifying that the kernel will run from that moment on.
|
||||
*
|
||||
* For a context restore, the object-id is the value written
|
||||
* to object-id spufs file from user space and the notifer
|
||||
* function can assume that spu->ctx is valid.
|
||||
*/
|
||||
int spu_switch_event_register(struct notifier_block * n);
|
||||
int spu_switch_event_unregister(struct notifier_block * n);
|
||||
|
||||
/*
|
||||
* This defines the Local Store, Problem Area and Privlege Area of an SPU.
|
||||
*/
|
||||
|
|
Loading…
Reference in New Issue