Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
* 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc: [POWERPC] Fix building of COFF zImages [POWERPC] spufs: Fix error handling in spufs_fill_dir() [POWERPC] Add table of contents to booting-without-of.txt [POWERPC] spufs: Don't yield nosched context [POWERPC] Fix typo in booting-without-of-txt section numbering [POWERPC] scc_sio: Fix link failure [POWERPC] cbe_cpufreq: Limit frequency via cpufreq notifier chain [POWERPC] Fix pci_setup_phb_io_dynamic for pci_iomap [POWERPC] spufs scheduler: Fix wakeup races [POWERPC] spufs: Synchronize pte invalidation vs ps close [POWERPC] spufs: Free mm if spufs_fill_dir() failed [POWERPC] spufs: Fix gang destroy leaks [POWERPC] spufs: Hook up spufs_release_mem [POWERPC] spufs: Refuse to load the module when not running on cell [POWERPC] pasemi: Fix iommu + 64K PAGE_SIZE bug
This commit is contained in:
commit
143a275984
|
@ -1,7 +1,6 @@
|
|||
Booting the Linux/ppc kernel without Open Firmware
|
||||
--------------------------------------------------
|
||||
|
||||
|
||||
(c) 2005 Benjamin Herrenschmidt <benh at kernel.crashing.org>,
|
||||
IBM Corp.
|
||||
(c) 2005 Becky Bruce <becky.bruce at freescale.com>,
|
||||
|
@ -9,6 +8,62 @@
|
|||
(c) 2006 MontaVista Software, Inc.
|
||||
Flash chip node definition
|
||||
|
||||
Table of Contents
|
||||
=================
|
||||
|
||||
I - Introduction
|
||||
1) Entry point for arch/powerpc
|
||||
2) Board support
|
||||
|
||||
II - The DT block format
|
||||
1) Header
|
||||
2) Device tree generalities
|
||||
3) Device tree "structure" block
|
||||
4) Device tree "strings" block
|
||||
|
||||
III - Required content of the device tree
|
||||
1) Note about cells and address representation
|
||||
2) Note about "compatible" properties
|
||||
3) Note about "name" properties
|
||||
4) Note about node and property names and character set
|
||||
5) Required nodes and properties
|
||||
a) The root node
|
||||
b) The /cpus node
|
||||
c) The /cpus/* nodes
|
||||
d) the /memory node(s)
|
||||
e) The /chosen node
|
||||
f) the /soc<SOCname> node
|
||||
|
||||
IV - "dtc", the device tree compiler
|
||||
|
||||
V - Recommendations for a bootloader
|
||||
|
||||
VI - System-on-a-chip devices and nodes
|
||||
1) Defining child nodes of an SOC
|
||||
2) Representing devices without a current OF specification
|
||||
a) MDIO IO device
|
||||
c) PHY nodes
|
||||
b) Gianfar-compatible ethernet nodes
|
||||
d) Interrupt controllers
|
||||
e) I2C
|
||||
f) Freescale SOC USB controllers
|
||||
g) Freescale SOC SEC Security Engines
|
||||
h) Board Control and Status (BCSR)
|
||||
i) Freescale QUICC Engine module (QE)
|
||||
g) Flash chip nodes
|
||||
|
||||
VII - Specifying interrupt information for devices
|
||||
1) interrupts property
|
||||
2) interrupt-parent property
|
||||
3) OpenPIC Interrupt Controllers
|
||||
4) ISA Interrupt Controllers
|
||||
|
||||
Appendix A - Sample SOC node for MPC8540
|
||||
|
||||
|
||||
Revision Information
|
||||
====================
|
||||
|
||||
May 18, 2005: Rev 0.1 - Initial draft, no chapter III yet.
|
||||
|
||||
May 19, 2005: Rev 0.2 - Add chapter III and bits & pieces here or
|
||||
|
@ -1687,7 +1742,7 @@ platforms are moved over to use the flattened-device-tree model.
|
|||
};
|
||||
};
|
||||
|
||||
g) Flash chip nodes
|
||||
j) Flash chip nodes
|
||||
|
||||
Flash chips (Memory Technology Devices) are often used for solid state
|
||||
file systems on embedded devices.
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
|
||||
.text
|
||||
/* a procedure descriptor used when booting this as a COFF file */
|
||||
.globl _zimage_start_opd
|
||||
_zimage_start_opd:
|
||||
.long _zimage_start, 0, 0, 0
|
||||
|
||||
|
|
|
@ -433,7 +433,7 @@ static int __devinit of_pci_phb_probe(struct of_device *dev,
|
|||
* Note also that we don't do ISA, this will also be fixed with a
|
||||
* more massive rework.
|
||||
*/
|
||||
pci_setup_phb_io(phb, 0);
|
||||
pci_setup_phb_io(phb, pci_io_base == 0);
|
||||
|
||||
/* Init pci_dn data structures */
|
||||
pci_devs_phb_init_dynamic(phb);
|
||||
|
|
|
@ -67,6 +67,7 @@ static u64 MIC_Slow_Next_Timer_table[] = {
|
|||
0x00003FC000000000ull,
|
||||
};
|
||||
|
||||
static unsigned int pmi_frequency_limit = 0;
|
||||
/*
|
||||
* hardware specific functions
|
||||
*/
|
||||
|
@ -164,7 +165,6 @@ static int set_pmode(int cpu, unsigned int slow_mode) {
|
|||
|
||||
static void cbe_cpufreq_handle_pmi(struct of_device *dev, pmi_message_t pmi_msg)
|
||||
{
|
||||
struct cpufreq_policy policy;
|
||||
u8 cpu;
|
||||
u8 cbe_pmode_new;
|
||||
|
||||
|
@ -173,15 +173,27 @@ static void cbe_cpufreq_handle_pmi(struct of_device *dev, pmi_message_t pmi_msg)
|
|||
cpu = cbe_node_to_cpu(pmi_msg.data1);
|
||||
cbe_pmode_new = pmi_msg.data2;
|
||||
|
||||
cpufreq_get_policy(&policy, cpu);
|
||||
pmi_frequency_limit = cbe_freqs[cbe_pmode_new].frequency;
|
||||
|
||||
policy.max = min(policy.max, cbe_freqs[cbe_pmode_new].frequency);
|
||||
policy.min = min(policy.min, policy.max);
|
||||
|
||||
pr_debug("cbe_handle_pmi: new policy.min=%d policy.max=%d\n", policy.min, policy.max);
|
||||
cpufreq_set_policy(&policy);
|
||||
pr_debug("cbe_handle_pmi: max freq=%d\n", pmi_frequency_limit);
|
||||
}
|
||||
|
||||
static int pmi_notifier(struct notifier_block *nb,
|
||||
unsigned long event, void *data)
|
||||
{
|
||||
struct cpufreq_policy *policy = data;
|
||||
|
||||
if (event != CPUFREQ_INCOMPATIBLE)
|
||||
return 0;
|
||||
|
||||
cpufreq_verify_within_limits(policy, 0, pmi_frequency_limit);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct notifier_block pmi_notifier_block = {
|
||||
.notifier_call = pmi_notifier,
|
||||
};
|
||||
|
||||
static struct pmi_handler cbe_pmi_handler = {
|
||||
.type = PMI_TYPE_FREQ_CHANGE,
|
||||
.handle_pmi_message = cbe_cpufreq_handle_pmi,
|
||||
|
@ -238,12 +250,21 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
|||
|
||||
cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);
|
||||
|
||||
if (pmi_dev) {
|
||||
/* frequency might get limited later, initialize limit with max_freq */
|
||||
pmi_frequency_limit = max_freq;
|
||||
cpufreq_register_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
|
||||
}
|
||||
|
||||
/* this ensures that policy->cpuinfo_min and policy->cpuinfo_max are set correctly */
|
||||
return cpufreq_frequency_table_cpuinfo(policy, cbe_freqs);
|
||||
}
|
||||
|
||||
static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
if (pmi_dev)
|
||||
cpufreq_unregister_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
|
||||
|
||||
cpufreq_frequency_table_put_attr(policy->cpu);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
|
|||
if (spu_init_csa(&ctx->csa))
|
||||
goto out_free;
|
||||
spin_lock_init(&ctx->mmio_lock);
|
||||
spin_lock_init(&ctx->mapping_lock);
|
||||
mutex_init(&ctx->mapping_lock);
|
||||
kref_init(&ctx->kref);
|
||||
mutex_init(&ctx->state_mutex);
|
||||
mutex_init(&ctx->run_mutex);
|
||||
|
@ -103,6 +103,7 @@ void spu_forget(struct spu_context *ctx)
|
|||
|
||||
void spu_unmap_mappings(struct spu_context *ctx)
|
||||
{
|
||||
mutex_lock(&ctx->mapping_lock);
|
||||
if (ctx->local_store)
|
||||
unmap_mapping_range(ctx->local_store, 0, LS_SIZE, 1);
|
||||
if (ctx->mfc)
|
||||
|
@ -117,6 +118,7 @@ void spu_unmap_mappings(struct spu_context *ctx)
|
|||
unmap_mapping_range(ctx->mss, 0, 0x1000, 1);
|
||||
if (ctx->psmap)
|
||||
unmap_mapping_range(ctx->psmap, 0, 0x20000, 1);
|
||||
mutex_unlock(&ctx->mapping_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -45,11 +45,11 @@ spufs_mem_open(struct inode *inode, struct file *file)
|
|||
struct spufs_inode_info *i = SPUFS_I(inode);
|
||||
struct spu_context *ctx = i->i_ctx;
|
||||
|
||||
spin_lock(&ctx->mapping_lock);
|
||||
mutex_lock(&ctx->mapping_lock);
|
||||
file->private_data = ctx;
|
||||
if (!i->i_openers++)
|
||||
ctx->local_store = inode->i_mapping;
|
||||
spin_unlock(&ctx->mapping_lock);
|
||||
mutex_unlock(&ctx->mapping_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -59,10 +59,10 @@ spufs_mem_release(struct inode *inode, struct file *file)
|
|||
struct spufs_inode_info *i = SPUFS_I(inode);
|
||||
struct spu_context *ctx = i->i_ctx;
|
||||
|
||||
spin_lock(&ctx->mapping_lock);
|
||||
mutex_lock(&ctx->mapping_lock);
|
||||
if (!--i->i_openers)
|
||||
ctx->local_store = NULL;
|
||||
spin_unlock(&ctx->mapping_lock);
|
||||
mutex_unlock(&ctx->mapping_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -217,6 +217,7 @@ unsigned long spufs_get_unmapped_area(struct file *file, unsigned long addr,
|
|||
|
||||
static const struct file_operations spufs_mem_fops = {
|
||||
.open = spufs_mem_open,
|
||||
.release = spufs_mem_release,
|
||||
.read = spufs_mem_read,
|
||||
.write = spufs_mem_write,
|
||||
.llseek = generic_file_llseek,
|
||||
|
@ -309,11 +310,11 @@ static int spufs_cntl_open(struct inode *inode, struct file *file)
|
|||
struct spufs_inode_info *i = SPUFS_I(inode);
|
||||
struct spu_context *ctx = i->i_ctx;
|
||||
|
||||
spin_lock(&ctx->mapping_lock);
|
||||
mutex_lock(&ctx->mapping_lock);
|
||||
file->private_data = ctx;
|
||||
if (!i->i_openers++)
|
||||
ctx->cntl = inode->i_mapping;
|
||||
spin_unlock(&ctx->mapping_lock);
|
||||
mutex_unlock(&ctx->mapping_lock);
|
||||
return simple_attr_open(inode, file, spufs_cntl_get,
|
||||
spufs_cntl_set, "0x%08lx");
|
||||
}
|
||||
|
@ -326,10 +327,10 @@ spufs_cntl_release(struct inode *inode, struct file *file)
|
|||
|
||||
simple_attr_close(inode, file);
|
||||
|
||||
spin_lock(&ctx->mapping_lock);
|
||||
mutex_lock(&ctx->mapping_lock);
|
||||
if (!--i->i_openers)
|
||||
ctx->cntl = NULL;
|
||||
spin_unlock(&ctx->mapping_lock);
|
||||
mutex_unlock(&ctx->mapping_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -812,11 +813,11 @@ static int spufs_signal1_open(struct inode *inode, struct file *file)
|
|||
struct spufs_inode_info *i = SPUFS_I(inode);
|
||||
struct spu_context *ctx = i->i_ctx;
|
||||
|
||||
spin_lock(&ctx->mapping_lock);
|
||||
mutex_lock(&ctx->mapping_lock);
|
||||
file->private_data = ctx;
|
||||
if (!i->i_openers++)
|
||||
ctx->signal1 = inode->i_mapping;
|
||||
spin_unlock(&ctx->mapping_lock);
|
||||
mutex_unlock(&ctx->mapping_lock);
|
||||
return nonseekable_open(inode, file);
|
||||
}
|
||||
|
||||
|
@ -826,10 +827,10 @@ spufs_signal1_release(struct inode *inode, struct file *file)
|
|||
struct spufs_inode_info *i = SPUFS_I(inode);
|
||||
struct spu_context *ctx = i->i_ctx;
|
||||
|
||||
spin_lock(&ctx->mapping_lock);
|
||||
mutex_lock(&ctx->mapping_lock);
|
||||
if (!--i->i_openers)
|
||||
ctx->signal1 = NULL;
|
||||
spin_unlock(&ctx->mapping_lock);
|
||||
mutex_unlock(&ctx->mapping_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -936,11 +937,11 @@ static int spufs_signal2_open(struct inode *inode, struct file *file)
|
|||
struct spufs_inode_info *i = SPUFS_I(inode);
|
||||
struct spu_context *ctx = i->i_ctx;
|
||||
|
||||
spin_lock(&ctx->mapping_lock);
|
||||
mutex_lock(&ctx->mapping_lock);
|
||||
file->private_data = ctx;
|
||||
if (!i->i_openers++)
|
||||
ctx->signal2 = inode->i_mapping;
|
||||
spin_unlock(&ctx->mapping_lock);
|
||||
mutex_unlock(&ctx->mapping_lock);
|
||||
return nonseekable_open(inode, file);
|
||||
}
|
||||
|
||||
|
@ -950,10 +951,10 @@ spufs_signal2_release(struct inode *inode, struct file *file)
|
|||
struct spufs_inode_info *i = SPUFS_I(inode);
|
||||
struct spu_context *ctx = i->i_ctx;
|
||||
|
||||
spin_lock(&ctx->mapping_lock);
|
||||
mutex_lock(&ctx->mapping_lock);
|
||||
if (!--i->i_openers)
|
||||
ctx->signal2 = NULL;
|
||||
spin_unlock(&ctx->mapping_lock);
|
||||
mutex_unlock(&ctx->mapping_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1154,10 +1155,10 @@ static int spufs_mss_open(struct inode *inode, struct file *file)
|
|||
|
||||
file->private_data = i->i_ctx;
|
||||
|
||||
spin_lock(&ctx->mapping_lock);
|
||||
mutex_lock(&ctx->mapping_lock);
|
||||
if (!i->i_openers++)
|
||||
ctx->mss = inode->i_mapping;
|
||||
spin_unlock(&ctx->mapping_lock);
|
||||
mutex_unlock(&ctx->mapping_lock);
|
||||
return nonseekable_open(inode, file);
|
||||
}
|
||||
|
||||
|
@ -1167,10 +1168,10 @@ spufs_mss_release(struct inode *inode, struct file *file)
|
|||
struct spufs_inode_info *i = SPUFS_I(inode);
|
||||
struct spu_context *ctx = i->i_ctx;
|
||||
|
||||
spin_lock(&ctx->mapping_lock);
|
||||
mutex_lock(&ctx->mapping_lock);
|
||||
if (!--i->i_openers)
|
||||
ctx->mss = NULL;
|
||||
spin_unlock(&ctx->mapping_lock);
|
||||
mutex_unlock(&ctx->mapping_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1211,11 +1212,11 @@ static int spufs_psmap_open(struct inode *inode, struct file *file)
|
|||
struct spufs_inode_info *i = SPUFS_I(inode);
|
||||
struct spu_context *ctx = i->i_ctx;
|
||||
|
||||
spin_lock(&ctx->mapping_lock);
|
||||
mutex_lock(&ctx->mapping_lock);
|
||||
file->private_data = i->i_ctx;
|
||||
if (!i->i_openers++)
|
||||
ctx->psmap = inode->i_mapping;
|
||||
spin_unlock(&ctx->mapping_lock);
|
||||
mutex_unlock(&ctx->mapping_lock);
|
||||
return nonseekable_open(inode, file);
|
||||
}
|
||||
|
||||
|
@ -1225,10 +1226,10 @@ spufs_psmap_release(struct inode *inode, struct file *file)
|
|||
struct spufs_inode_info *i = SPUFS_I(inode);
|
||||
struct spu_context *ctx = i->i_ctx;
|
||||
|
||||
spin_lock(&ctx->mapping_lock);
|
||||
mutex_lock(&ctx->mapping_lock);
|
||||
if (!--i->i_openers)
|
||||
ctx->psmap = NULL;
|
||||
spin_unlock(&ctx->mapping_lock);
|
||||
mutex_unlock(&ctx->mapping_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1281,11 +1282,11 @@ static int spufs_mfc_open(struct inode *inode, struct file *file)
|
|||
if (atomic_read(&inode->i_count) != 1)
|
||||
return -EBUSY;
|
||||
|
||||
spin_lock(&ctx->mapping_lock);
|
||||
mutex_lock(&ctx->mapping_lock);
|
||||
file->private_data = ctx;
|
||||
if (!i->i_openers++)
|
||||
ctx->mfc = inode->i_mapping;
|
||||
spin_unlock(&ctx->mapping_lock);
|
||||
mutex_unlock(&ctx->mapping_lock);
|
||||
return nonseekable_open(inode, file);
|
||||
}
|
||||
|
||||
|
@ -1295,10 +1296,10 @@ spufs_mfc_release(struct inode *inode, struct file *file)
|
|||
struct spufs_inode_info *i = SPUFS_I(inode);
|
||||
struct spu_context *ctx = i->i_ctx;
|
||||
|
||||
spin_lock(&ctx->mapping_lock);
|
||||
mutex_lock(&ctx->mapping_lock);
|
||||
if (!--i->i_openers)
|
||||
ctx->mfc = NULL;
|
||||
spin_unlock(&ctx->mapping_lock);
|
||||
mutex_unlock(&ctx->mapping_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -177,7 +177,7 @@ static int spufs_rmdir(struct inode *parent, struct dentry *dir)
|
|||
static int spufs_fill_dir(struct dentry *dir, struct tree_descr *files,
|
||||
int mode, struct spu_context *ctx)
|
||||
{
|
||||
struct dentry *dentry;
|
||||
struct dentry *dentry, *tmp;
|
||||
int ret;
|
||||
|
||||
while (files->name && files->name[0]) {
|
||||
|
@ -193,7 +193,20 @@ static int spufs_fill_dir(struct dentry *dir, struct tree_descr *files,
|
|||
}
|
||||
return 0;
|
||||
out:
|
||||
spufs_prune_dir(dir);
|
||||
/*
|
||||
* remove all children from dir. dir->inode is not set so don't
|
||||
* just simply use spufs_prune_dir() and panic afterwards :)
|
||||
* dput() looks like it will do the right thing:
|
||||
* - dec parent's ref counter
|
||||
* - remove child from parent's child list
|
||||
* - free child's inode if possible
|
||||
* - free child
|
||||
*/
|
||||
list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) {
|
||||
dput(dentry);
|
||||
}
|
||||
|
||||
shrink_dcache_parent(dir);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -274,6 +287,7 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
|
|||
goto out;
|
||||
|
||||
out_free_ctx:
|
||||
spu_forget(ctx);
|
||||
put_spu_context(ctx);
|
||||
out_iput:
|
||||
iput(inode);
|
||||
|
@ -349,37 +363,6 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int spufs_rmgang(struct inode *root, struct dentry *dir)
|
||||
{
|
||||
/* FIXME: this fails if the dir is not empty,
|
||||
which causes a leak of gangs. */
|
||||
return simple_rmdir(root, dir);
|
||||
}
|
||||
|
||||
static int spufs_gang_close(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct inode *parent;
|
||||
struct dentry *dir;
|
||||
int ret;
|
||||
|
||||
dir = file->f_path.dentry;
|
||||
parent = dir->d_parent->d_inode;
|
||||
|
||||
ret = spufs_rmgang(parent, dir);
|
||||
WARN_ON(ret);
|
||||
|
||||
return dcache_dir_close(inode, file);
|
||||
}
|
||||
|
||||
const struct file_operations spufs_gang_fops = {
|
||||
.open = dcache_dir_open,
|
||||
.release = spufs_gang_close,
|
||||
.llseek = dcache_dir_lseek,
|
||||
.read = generic_read_dir,
|
||||
.readdir = dcache_readdir,
|
||||
.fsync = simple_sync_file,
|
||||
};
|
||||
|
||||
static int
|
||||
spufs_mkgang(struct inode *dir, struct dentry *dentry, int mode)
|
||||
{
|
||||
|
@ -407,7 +390,6 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, int mode)
|
|||
inode->i_fop = &simple_dir_operations;
|
||||
|
||||
d_instantiate(dentry, inode);
|
||||
dget(dentry);
|
||||
dir->i_nlink++;
|
||||
dentry->d_inode->i_nlink++;
|
||||
return ret;
|
||||
|
@ -437,7 +419,7 @@ static int spufs_gang_open(struct dentry *dentry, struct vfsmount *mnt)
|
|||
goto out;
|
||||
}
|
||||
|
||||
filp->f_op = &spufs_gang_fops;
|
||||
filp->f_op = &simple_dir_operations;
|
||||
fd_install(ret, filp);
|
||||
out:
|
||||
return ret;
|
||||
|
@ -458,8 +440,10 @@ static int spufs_create_gang(struct inode *inode,
|
|||
* in error path of *_open().
|
||||
*/
|
||||
ret = spufs_gang_open(dget(dentry), mntget(mnt));
|
||||
if (ret < 0)
|
||||
WARN_ON(spufs_rmgang(inode, dentry));
|
||||
if (ret < 0) {
|
||||
int err = simple_rmdir(inode, dentry);
|
||||
WARN_ON(err);
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
|
@ -600,6 +584,10 @@ spufs_create_root(struct super_block *sb, void *data)
|
|||
struct inode *inode;
|
||||
int ret;
|
||||
|
||||
ret = -ENODEV;
|
||||
if (!spu_management_ops)
|
||||
goto out;
|
||||
|
||||
ret = -ENOMEM;
|
||||
inode = spufs_new_inode(sb, S_IFDIR | 0775);
|
||||
if (!inode)
|
||||
|
|
|
@ -93,43 +93,6 @@ void spu_stop_tick(struct spu_context *ctx)
|
|||
}
|
||||
}
|
||||
|
||||
void spu_sched_tick(struct work_struct *work)
|
||||
{
|
||||
struct spu_context *ctx =
|
||||
container_of(work, struct spu_context, sched_work.work);
|
||||
struct spu *spu;
|
||||
int preempted = 0;
|
||||
|
||||
/*
|
||||
* If this context is being stopped avoid rescheduling from the
|
||||
* scheduler tick because we would block on the state_mutex.
|
||||
* The caller will yield the spu later on anyway.
|
||||
*/
|
||||
if (test_bit(SPU_SCHED_EXITING, &ctx->sched_flags))
|
||||
return;
|
||||
|
||||
mutex_lock(&ctx->state_mutex);
|
||||
spu = ctx->spu;
|
||||
if (spu) {
|
||||
int best = sched_find_first_bit(spu_prio->bitmap);
|
||||
if (best <= ctx->prio) {
|
||||
spu_deactivate(ctx);
|
||||
preempted = 1;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&ctx->state_mutex);
|
||||
|
||||
if (preempted) {
|
||||
/*
|
||||
* We need to break out of the wait loop in spu_run manually
|
||||
* to ensure this context gets put on the runqueue again
|
||||
* ASAP.
|
||||
*/
|
||||
wake_up(&ctx->stop_wq);
|
||||
} else
|
||||
spu_start_tick(ctx);
|
||||
}
|
||||
|
||||
/**
|
||||
* spu_add_to_active_list - add spu to active list
|
||||
* @spu: spu to add to the active list
|
||||
|
@ -273,34 +236,6 @@ static void spu_prio_wait(struct spu_context *ctx)
|
|||
remove_wait_queue(&ctx->stop_wq, &wait);
|
||||
}
|
||||
|
||||
/**
|
||||
* spu_reschedule - try to find a runnable context for a spu
|
||||
* @spu: spu available
|
||||
*
|
||||
* This function is called whenever a spu becomes idle. It looks for the
|
||||
* most suitable runnable spu context and schedules it for execution.
|
||||
*/
|
||||
static void spu_reschedule(struct spu *spu)
|
||||
{
|
||||
int best;
|
||||
|
||||
spu_free(spu);
|
||||
|
||||
spin_lock(&spu_prio->runq_lock);
|
||||
best = sched_find_first_bit(spu_prio->bitmap);
|
||||
if (best < MAX_PRIO) {
|
||||
struct list_head *rq = &spu_prio->runq[best];
|
||||
struct spu_context *ctx;
|
||||
|
||||
BUG_ON(list_empty(rq));
|
||||
|
||||
ctx = list_entry(rq->next, struct spu_context, rq);
|
||||
__spu_del_from_rq(ctx);
|
||||
wake_up(&ctx->stop_wq);
|
||||
}
|
||||
spin_unlock(&spu_prio->runq_lock);
|
||||
}
|
||||
|
||||
static struct spu *spu_get_idle(struct spu_context *ctx)
|
||||
{
|
||||
struct spu *spu = NULL;
|
||||
|
@ -428,6 +363,51 @@ int spu_activate(struct spu_context *ctx, unsigned long flags)
|
|||
return -ERESTARTSYS;
|
||||
}
|
||||
|
||||
/**
|
||||
* grab_runnable_context - try to find a runnable context
|
||||
*
|
||||
* Remove the highest priority context on the runqueue and return it
|
||||
* to the caller. Returns %NULL if no runnable context was found.
|
||||
*/
|
||||
static struct spu_context *grab_runnable_context(int prio)
|
||||
{
|
||||
struct spu_context *ctx = NULL;
|
||||
int best;
|
||||
|
||||
spin_lock(&spu_prio->runq_lock);
|
||||
best = sched_find_first_bit(spu_prio->bitmap);
|
||||
if (best < prio) {
|
||||
struct list_head *rq = &spu_prio->runq[best];
|
||||
|
||||
BUG_ON(list_empty(rq));
|
||||
|
||||
ctx = list_entry(rq->next, struct spu_context, rq);
|
||||
__spu_del_from_rq(ctx);
|
||||
}
|
||||
spin_unlock(&spu_prio->runq_lock);
|
||||
|
||||
return ctx;
|
||||
}
|
||||
|
||||
static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
|
||||
{
|
||||
struct spu *spu = ctx->spu;
|
||||
struct spu_context *new = NULL;
|
||||
|
||||
if (spu) {
|
||||
new = grab_runnable_context(max_prio);
|
||||
if (new || force) {
|
||||
spu_unbind_context(spu, ctx);
|
||||
spu_free(spu);
|
||||
if (new)
|
||||
wake_up(&new->stop_wq);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return new != NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* spu_deactivate - unbind a context from it's physical spu
|
||||
* @ctx: spu context to unbind
|
||||
|
@ -437,12 +417,7 @@ int spu_activate(struct spu_context *ctx, unsigned long flags)
|
|||
*/
|
||||
void spu_deactivate(struct spu_context *ctx)
|
||||
{
|
||||
struct spu *spu = ctx->spu;
|
||||
|
||||
if (spu) {
|
||||
spu_unbind_context(spu, ctx);
|
||||
spu_reschedule(spu);
|
||||
}
|
||||
__spu_deactivate(ctx, 1, MAX_PRIO);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -455,21 +430,43 @@ void spu_deactivate(struct spu_context *ctx)
|
|||
*/
|
||||
void spu_yield(struct spu_context *ctx)
|
||||
{
|
||||
struct spu *spu;
|
||||
|
||||
if (mutex_trylock(&ctx->state_mutex)) {
|
||||
if ((spu = ctx->spu) != NULL) {
|
||||
int best = sched_find_first_bit(spu_prio->bitmap);
|
||||
if (best < MAX_PRIO) {
|
||||
pr_debug("%s: yielding SPU %d NODE %d\n",
|
||||
__FUNCTION__, spu->number, spu->node);
|
||||
spu_deactivate(ctx);
|
||||
}
|
||||
}
|
||||
if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
|
||||
mutex_lock(&ctx->state_mutex);
|
||||
__spu_deactivate(ctx, 0, MAX_PRIO);
|
||||
mutex_unlock(&ctx->state_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
void spu_sched_tick(struct work_struct *work)
|
||||
{
|
||||
struct spu_context *ctx =
|
||||
container_of(work, struct spu_context, sched_work.work);
|
||||
int preempted;
|
||||
|
||||
/*
|
||||
* If this context is being stopped avoid rescheduling from the
|
||||
* scheduler tick because we would block on the state_mutex.
|
||||
* The caller will yield the spu later on anyway.
|
||||
*/
|
||||
if (test_bit(SPU_SCHED_EXITING, &ctx->sched_flags))
|
||||
return;
|
||||
|
||||
mutex_lock(&ctx->state_mutex);
|
||||
preempted = __spu_deactivate(ctx, 0, ctx->prio + 1);
|
||||
mutex_unlock(&ctx->state_mutex);
|
||||
|
||||
if (preempted) {
|
||||
/*
|
||||
* We need to break out of the wait loop in spu_run manually
|
||||
* to ensure this context gets put on the runqueue again
|
||||
* ASAP.
|
||||
*/
|
||||
wake_up(&ctx->stop_wq);
|
||||
} else {
|
||||
spu_start_tick(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
int __init spu_sched_init(void)
|
||||
{
|
||||
int i;
|
||||
|
|
|
@ -55,7 +55,7 @@ struct spu_context {
|
|||
struct address_space *signal2; /* 'signal2' area mappings. */
|
||||
struct address_space *mss; /* 'mss' area mappings. */
|
||||
struct address_space *psmap; /* 'psmap' area mappings. */
|
||||
spinlock_t mapping_lock;
|
||||
struct mutex mapping_lock;
|
||||
u64 object_id; /* user space pointer for oprofile */
|
||||
|
||||
enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state;
|
||||
|
|
|
@ -4,5 +4,5 @@ obj-y += interrupt.o iommu.o setup.o \
|
|||
|
||||
obj-$(CONFIG_SMP) += smp.o
|
||||
obj-$(CONFIG_PPC_UDBG_BEAT) += udbg_beat.o
|
||||
obj-$(CONFIG_HAS_TXX9_SERIAL) += scc_sio.o
|
||||
obj-$(CONFIG_SERIAL_TXX9) += scc_sio.o
|
||||
obj-$(CONFIG_SPU_BASE) += spu_priv1.o
|
||||
|
|
|
@ -31,8 +31,6 @@
|
|||
#define IOBMAP_PAGE_SIZE (1 << IOBMAP_PAGE_SHIFT)
|
||||
#define IOBMAP_PAGE_MASK (IOBMAP_PAGE_SIZE - 1)
|
||||
|
||||
#define IOBMAP_PAGE_FACTOR (PAGE_SHIFT - IOBMAP_PAGE_SHIFT)
|
||||
|
||||
#define IOB_BASE 0xe0000000
|
||||
#define IOB_SIZE 0x3000
|
||||
/* Configuration registers */
|
||||
|
@ -97,9 +95,6 @@ static void iobmap_build(struct iommu_table *tbl, long index,
|
|||
|
||||
bus_addr = (tbl->it_offset + index) << PAGE_SHIFT;
|
||||
|
||||
npages <<= IOBMAP_PAGE_FACTOR;
|
||||
index <<= IOBMAP_PAGE_FACTOR;
|
||||
|
||||
ip = ((u32 *)tbl->it_base) + index;
|
||||
|
||||
while (npages--) {
|
||||
|
@ -125,9 +120,6 @@ static void iobmap_free(struct iommu_table *tbl, long index,
|
|||
|
||||
bus_addr = (tbl->it_offset + index) << PAGE_SHIFT;
|
||||
|
||||
npages <<= IOBMAP_PAGE_FACTOR;
|
||||
index <<= IOBMAP_PAGE_FACTOR;
|
||||
|
||||
ip = ((u32 *)tbl->it_base) + index;
|
||||
|
||||
while (npages--) {
|
||||
|
|
Loading…
Reference in New Issue