Merge branch 'master'
This commit is contained in:
commit
9707b27100
|
@ -0,0 +1,22 @@
|
|||
As of the Linux 2.6.10 kernel, it is now possible to change the
|
||||
IO scheduler for a given block device on the fly (thus making it possible,
|
||||
for instance, to set the CFQ scheduler for the system default, but
|
||||
set a specific device to use the anticipatory or noop schedulers - which
|
||||
can improve that device's throughput).
|
||||
|
||||
To set a specific scheduler, simply do this:
|
||||
|
||||
echo SCHEDNAME > /sys/block/DEV/queue/scheduler
|
||||
|
||||
where SCHEDNAME is the name of a defined IO scheduler, and DEV is the
|
||||
device name (hda, hdb, sga, or whatever you happen to have).
|
||||
|
||||
The list of defined schedulers can be found by simply doing
|
||||
a "cat /sys/block/DEV/queue/scheduler" - the list of valid names
|
||||
will be displayed, with the currently selected scheduler in brackets:
|
||||
|
||||
# cat /sys/block/hda/queue/scheduler
|
||||
noop anticipatory deadline [cfq]
|
||||
# echo anticipatory > /sys/block/hda/queue/scheduler
|
||||
# cat /sys/block/hda/queue/scheduler
|
||||
noop [anticipatory] deadline cfq
|
|
@ -53,4 +53,4 @@ the CPUFreq Mailing list:
|
|||
* http://lists.linux.org.uk/mailman/listinfo/cpufreq
|
||||
|
||||
Clock and voltage scaling for the SA-1100:
|
||||
* http://www.lart.tudelft.nl/projects/scaling
|
||||
* http://www.lartmaker.nl/projects/scaling
|
||||
|
|
|
@ -25,8 +25,9 @@ Who: Adrian Bunk <bunk@stusta.de>
|
|||
|
||||
---------------------------
|
||||
|
||||
What: drivers depending on OBSOLETE_OSS_DRIVER
|
||||
When: January 2006
|
||||
What: drivers that were depending on OBSOLETE_OSS_DRIVER
|
||||
(config options already removed)
|
||||
When: before 2.6.19
|
||||
Why: OSS drivers with ALSA replacements
|
||||
Who: Adrian Bunk <bunk@stusta.de>
|
||||
|
||||
|
|
|
@ -3,14 +3,11 @@
|
|||
--------------------
|
||||
|
||||
|
||||
$Id: driver,v 1.10 2002/07/22 15:27:30 rmk Exp $
|
||||
|
||||
|
||||
This document is meant as a brief overview of some aspects of the new serial
|
||||
driver. It is not complete, any questions you have should be directed to
|
||||
<rmk@arm.linux.org.uk>
|
||||
|
||||
The reference implementation is contained within serial_amba.c.
|
||||
The reference implementation is contained within amba_pl011.c.
|
||||
|
||||
|
||||
|
||||
|
@ -31,6 +28,11 @@ The serial core provides a few helper functions. This includes identifing
|
|||
the correct port structure (via uart_get_console) and decoding command line
|
||||
arguments (uart_parse_options).
|
||||
|
||||
There is also a helper function (uart_write_console) which performs a
|
||||
character by character write, translating newlines to CRLF sequences.
|
||||
Driver writers are recommended to use this function rather than implementing
|
||||
their own version.
|
||||
|
||||
|
||||
Locking
|
||||
-------
|
||||
|
@ -86,6 +88,7 @@ hardware.
|
|||
- TIOCM_DTR DTR signal.
|
||||
- TIOCM_OUT1 OUT1 signal.
|
||||
- TIOCM_OUT2 OUT2 signal.
|
||||
- TIOCM_LOOP Set the port into loopback mode.
|
||||
If the appropriate bit is set, the signal should be driven
|
||||
active. If the bit is clear, the signal should be driven
|
||||
inactive.
|
||||
|
@ -141,6 +144,10 @@ hardware.
|
|||
enable_ms(port)
|
||||
Enable the modem status interrupts.
|
||||
|
||||
This method may be called multiple times. Modem status
|
||||
interrupts should be disabled when the shutdown method is
|
||||
called.
|
||||
|
||||
Locking: port->lock taken.
|
||||
Interrupts: locally disabled.
|
||||
This call must not sleep
|
||||
|
@ -160,6 +167,8 @@ hardware.
|
|||
state. Enable the port for reception. It should not activate
|
||||
RTS nor DTR; this will be done via a separate call to set_mctrl.
|
||||
|
||||
This method will only be called when the port is initially opened.
|
||||
|
||||
Locking: port_sem taken.
|
||||
Interrupts: globally disabled.
|
||||
|
||||
|
@ -169,6 +178,11 @@ hardware.
|
|||
RTS nor DTR; this will have already been done via a separate
|
||||
call to set_mctrl.
|
||||
|
||||
Drivers must not access port->info once this call has completed.
|
||||
|
||||
This method will only be called when there are no more users of
|
||||
this port.
|
||||
|
||||
Locking: port_sem taken.
|
||||
Interrupts: caller dependent.
|
||||
|
||||
|
|
|
@ -32,7 +32,16 @@ The output of "cat /proc/meminfo" will have lines like:
|
|||
.....
|
||||
HugePages_Total: xxx
|
||||
HugePages_Free: yyy
|
||||
Hugepagesize: zzz KB
|
||||
HugePages_Rsvd: www
|
||||
Hugepagesize: zzz kB
|
||||
|
||||
where:
|
||||
HugePages_Total is the size of the pool of hugepages.
|
||||
HugePages_Free is the number of hugepages in the pool that are not yet
|
||||
allocated.
|
||||
HugePages_Rsvd is short for "reserved," and is the number of hugepages
|
||||
for which a commitment to allocate from the pool has been made, but no
|
||||
allocation has yet been made. It's vaguely analogous to overcommit.
|
||||
|
||||
/proc/filesystems should also show a filesystem of type "hugetlbfs" configured
|
||||
in the kernel.
|
||||
|
|
10
MAINTAINERS
10
MAINTAINERS
|
@ -411,6 +411,7 @@ AX.25 NETWORK LAYER
|
|||
P: Ralf Baechle
|
||||
M: ralf@linux-mips.org
|
||||
L: linux-hams@vger.kernel.org
|
||||
W: http://www.linux-ax25.org/
|
||||
S: Maintained
|
||||
|
||||
BAYCOM/HDLCDRV DRIVERS FOR AX.25
|
||||
|
@ -1463,6 +1464,13 @@ M: support@pathscale.com
|
|||
L: openib-general@openib.org
|
||||
S: Supported
|
||||
|
||||
IPMI SUBSYSTEM
|
||||
P: Corey Minyard
|
||||
M: minyard@acm.org
|
||||
L: openipmi-developer@lists.sourceforge.net
|
||||
W: http://openipmi.sourceforge.net/
|
||||
S: Supported
|
||||
|
||||
IPX NETWORK LAYER
|
||||
P: Arnaldo Carvalho de Melo
|
||||
M: acme@conectiva.com.br
|
||||
|
@ -1875,6 +1883,7 @@ NETROM NETWORK LAYER
|
|||
P: Ralf Baechle
|
||||
M: ralf@linux-mips.org
|
||||
L: linux-hams@vger.kernel.org
|
||||
W: http://www.linux-ax25.org/
|
||||
S: Maintained
|
||||
|
||||
NETWORK BLOCK DEVICE
|
||||
|
@ -2266,6 +2275,7 @@ ROSE NETWORK LAYER
|
|||
P: Ralf Baechle
|
||||
M: ralf@linux-mips.org
|
||||
L: linux-hams@vger.kernel.org
|
||||
W: http://www.linux-ax25.org/
|
||||
S: Maintained
|
||||
|
||||
RISCOM8 DRIVER
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 2
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 17
|
||||
EXTRAVERSION =-rc1
|
||||
EXTRAVERSION =-rc2
|
||||
NAME=Sliding Snow Leopard
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
23
README
23
README
|
@ -165,10 +165,31 @@ CONFIGURING the kernel:
|
|||
"make xconfig" X windows (Qt) based configuration tool.
|
||||
"make gconfig" X windows (Gtk) based configuration tool.
|
||||
"make oldconfig" Default all questions based on the contents of
|
||||
your existing ./.config file.
|
||||
your existing ./.config file and asking about
|
||||
new config symbols.
|
||||
"make silentoldconfig"
|
||||
Like above, but avoids cluttering the screen
|
||||
with questions already answered.
|
||||
"make defconfig" Create a ./.config file by using the default
|
||||
symbol values from arch/$ARCH/defconfig.
|
||||
"make allyesconfig"
|
||||
Create a ./.config file by setting symbol
|
||||
values to 'y' as much as possible.
|
||||
"make allmodconfig"
|
||||
Create a ./.config file by setting symbol
|
||||
values to 'm' as much as possible.
|
||||
"make allnoconfig" Create a ./.config file by setting symbol
|
||||
values to 'n' as much as possible.
|
||||
"make randconfig" Create a ./.config file by setting symbol
|
||||
values to random values.
|
||||
|
||||
The allyesconfig/allmodconfig/allnoconfig/randconfig variants can
|
||||
also use the environment variable KCONFIG_ALLCONFIG to specify a
|
||||
filename that contains config options that the user requires to be
|
||||
set to a specific value. If KCONFIG_ALLCONFIG=filename is not used,
|
||||
"make *config" checks for a file named "all{yes/mod/no/random}.config"
|
||||
for symbol values that are to be forced. If this file is not found,
|
||||
it checks for a file named "all.config" to contain forced values.
|
||||
|
||||
NOTES on "make config":
|
||||
- having unnecessary drivers will make the kernel bigger, and can
|
||||
|
|
|
@ -18,6 +18,18 @@
|
|||
#include <asm/io.h>
|
||||
#include <asm/hardware/scoop.h>
|
||||
|
||||
/* PCMCIA to Scoop linkage
|
||||
|
||||
There is no easy way to link multiple scoop devices into one
|
||||
single entity for the pxa2xx_pcmcia device so this structure
|
||||
is used which is setup by the platform code.
|
||||
|
||||
This file is never modular so this symbol is always
|
||||
accessile to the board support files.
|
||||
*/
|
||||
struct scoop_pcmcia_config *platform_scoop_config;
|
||||
EXPORT_SYMBOL(platform_scoop_config);
|
||||
|
||||
#define SCOOP_REG(d,adr) (*(volatile unsigned short*)(d +(adr)))
|
||||
|
||||
struct scoop_dev {
|
||||
|
|
|
@ -194,13 +194,23 @@ void __init at91_add_device_eth(struct at91_eth_data *data) {}
|
|||
#if defined(CONFIG_AT91_CF) || defined(CONFIG_AT91_CF_MODULE)
|
||||
static struct at91_cf_data cf_data;
|
||||
|
||||
static struct resource at91_cf_resources[] = {
|
||||
[0] = {
|
||||
.start = AT91_CF_BASE,
|
||||
/* ties up CS4, CS5, and CS6 */
|
||||
.end = AT91_CF_BASE + (0x30000000 - 1),
|
||||
.flags = IORESOURCE_MEM | IORESOURCE_MEM_8AND16BIT,
|
||||
},
|
||||
};
|
||||
|
||||
static struct platform_device at91rm9200_cf_device = {
|
||||
.name = "at91_cf",
|
||||
.id = -1,
|
||||
.dev = {
|
||||
.platform_data = &cf_data,
|
||||
},
|
||||
.num_resources = 0,
|
||||
.resource = at91_cf_resources,
|
||||
.num_resources = ARRAY_SIZE(at91_cf_resources),
|
||||
};
|
||||
|
||||
void __init at91_add_device_cf(struct at91_cf_data *data)
|
||||
|
|
|
@ -196,12 +196,9 @@ static int __init corgi_ssp_probe(struct platform_device *dev)
|
|||
int ret;
|
||||
|
||||
/* Chip Select - Disable All */
|
||||
GPDR(ssp_machinfo->cs_lcdcon) |= GPIO_bit(ssp_machinfo->cs_lcdcon); /* output */
|
||||
GPSR(ssp_machinfo->cs_lcdcon) = GPIO_bit(ssp_machinfo->cs_lcdcon); /* High - Disable LCD Control/Timing Gen */
|
||||
GPDR(ssp_machinfo->cs_max1111) |= GPIO_bit(ssp_machinfo->cs_max1111); /* output */
|
||||
GPSR(ssp_machinfo->cs_max1111) = GPIO_bit(ssp_machinfo->cs_max1111); /* High - Disable MAX1111*/
|
||||
GPDR(ssp_machinfo->cs_ads7846) |= GPIO_bit(ssp_machinfo->cs_ads7846); /* output */
|
||||
GPSR(ssp_machinfo->cs_ads7846) = GPIO_bit(ssp_machinfo->cs_ads7846); /* High - Disable ADS7846*/
|
||||
pxa_gpio_mode(ssp_machinfo->cs_lcdcon | GPIO_OUT | GPIO_DFLT_HIGH);
|
||||
pxa_gpio_mode(ssp_machinfo->cs_max1111 | GPIO_OUT | GPIO_DFLT_HIGH);
|
||||
pxa_gpio_mode(ssp_machinfo->cs_ads7846 | GPIO_OUT | GPIO_DFLT_HIGH);
|
||||
|
||||
ret = ssp_init(&corgi_ssp_dev, ssp_machinfo->port, 0);
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ static struct mtd_partition smdk_default_nand_part[] = {
|
|||
.offset = 0,
|
||||
},
|
||||
[1] = {
|
||||
.name = "S3C2410 flash parition 1",
|
||||
.name = "S3C2410 flash partition 1",
|
||||
.offset = 0,
|
||||
.size = SZ_2M,
|
||||
},
|
||||
|
|
|
@ -139,7 +139,7 @@ static int s3c2440_clk_add(struct sys_device *sysdev)
|
|||
|
||||
clkdivn = __raw_readl(S3C2410_CLKDIVN);
|
||||
clkdivn |= S3C2440_CLKDIVN_UCLK;
|
||||
__raw_writel(camdivn, S3C2410_CLKDIVN);
|
||||
__raw_writel(clkdivn, S3C2410_CLKDIVN);
|
||||
|
||||
mutex_unlock(&clocks_mutex);
|
||||
}
|
||||
|
|
|
@ -81,4 +81,13 @@ config X86_MPPARSE
|
|||
depends on X86_LOCAL_APIC && !X86_VISWS
|
||||
default y
|
||||
|
||||
config DOUBLEFAULT
|
||||
default y
|
||||
bool "Enable doublefault exception handler" if EMBEDDED
|
||||
help
|
||||
This option allows trapping of rare doublefault exceptions that
|
||||
would otherwise cause a system to silently reboot. Disabling this
|
||||
option saves about 4k and might cause you much additional grey
|
||||
hair.
|
||||
|
||||
endmenu
|
||||
|
|
|
@ -168,7 +168,7 @@ int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size)
|
|||
unsigned long i;
|
||||
int config_size;
|
||||
|
||||
if (!phys_addr || !size || !cpu_has_apic)
|
||||
if (!phys_addr || !size)
|
||||
return -EINVAL;
|
||||
|
||||
mcfg = (struct acpi_table_mcfg *)__acpi_map_table(phys_addr, size);
|
||||
|
@ -1102,6 +1102,9 @@ int __init acpi_boot_table_init(void)
|
|||
dmi_check_system(acpi_dmi_table);
|
||||
#endif
|
||||
|
||||
if (!cpu_has_apic)
|
||||
return -ENODEV;
|
||||
|
||||
/*
|
||||
* If acpi_disabled, bail out
|
||||
* One exception: acpi=ht continues far enough to enumerate LAPICs
|
||||
|
|
|
@ -1079,7 +1079,7 @@ static int apm_console_blank(int blank)
|
|||
break;
|
||||
}
|
||||
|
||||
if (error == APM_NOT_ENGAGED && state != APM_STATE_READY) {
|
||||
if (error == APM_NOT_ENGAGED) {
|
||||
static int tried;
|
||||
int eng_error;
|
||||
if (tried++ == 0) {
|
||||
|
|
|
@ -207,6 +207,8 @@ static void __init init_amd(struct cpuinfo_x86 *c)
|
|||
set_bit(X86_FEATURE_K7, c->x86_capability);
|
||||
break;
|
||||
}
|
||||
if (c->x86 >= 6)
|
||||
set_bit(X86_FEATURE_FXSAVE_LEAK, c->x86_capability);
|
||||
|
||||
display_cacheinfo(c);
|
||||
|
||||
|
|
|
@ -905,14 +905,17 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
|
|||
{
|
||||
cpumask_t oldmask = CPU_MASK_ALL;
|
||||
struct powernow_k8_data *data = powernow_data[pol->cpu];
|
||||
u32 checkfid = data->currfid;
|
||||
u32 checkvid = data->currvid;
|
||||
u32 checkfid;
|
||||
u32 checkvid;
|
||||
unsigned int newstate;
|
||||
int ret = -EIO;
|
||||
|
||||
if (!data)
|
||||
return -EINVAL;
|
||||
|
||||
checkfid = data->currfid;
|
||||
checkvid = data->currvid;
|
||||
|
||||
/* only run on specific CPU from here on */
|
||||
oldmask = current->cpus_allowed;
|
||||
set_cpus_allowed(current, cpumask_of_cpu(pol->cpu));
|
||||
|
@ -1106,9 +1109,6 @@ static unsigned int powernowk8_get (unsigned int cpu)
|
|||
|
||||
data = powernow_data[first_cpu(cpu_core_map[cpu])];
|
||||
|
||||
if (!data)
|
||||
return -EINVAL;
|
||||
|
||||
if (!data)
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -168,7 +168,7 @@ static int cpuid_class_device_create(int i)
|
|||
return err;
|
||||
}
|
||||
|
||||
static int __devinit cpuid_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
||||
static int cpuid_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
||||
{
|
||||
unsigned int cpu = (unsigned long)hcpu;
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@ DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
|
|||
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
|
||||
|
||||
/* insert a jmp code */
|
||||
static inline void set_jmp_op(void *from, void *to)
|
||||
static __always_inline void set_jmp_op(void *from, void *to)
|
||||
{
|
||||
struct __arch_jmp_op {
|
||||
char op;
|
||||
|
@ -57,7 +57,7 @@ static inline void set_jmp_op(void *from, void *to)
|
|||
/*
|
||||
* returns non-zero if opcodes can be boosted.
|
||||
*/
|
||||
static inline int can_boost(kprobe_opcode_t opcode)
|
||||
static __always_inline int can_boost(kprobe_opcode_t opcode)
|
||||
{
|
||||
switch (opcode & 0xf0 ) {
|
||||
case 0x70:
|
||||
|
@ -88,7 +88,7 @@ static inline int can_boost(kprobe_opcode_t opcode)
|
|||
/*
|
||||
* returns non-zero if opcode modifies the interrupt flag.
|
||||
*/
|
||||
static inline int is_IF_modifier(kprobe_opcode_t opcode)
|
||||
static int __kprobes is_IF_modifier(kprobe_opcode_t opcode)
|
||||
{
|
||||
switch (opcode) {
|
||||
case 0xfa: /* cli */
|
||||
|
@ -138,7 +138,7 @@ void __kprobes arch_remove_kprobe(struct kprobe *p)
|
|||
mutex_unlock(&kprobe_mutex);
|
||||
}
|
||||
|
||||
static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
|
||||
static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
|
||||
{
|
||||
kcb->prev_kprobe.kp = kprobe_running();
|
||||
kcb->prev_kprobe.status = kcb->kprobe_status;
|
||||
|
@ -146,7 +146,7 @@ static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
|
|||
kcb->prev_kprobe.saved_eflags = kcb->kprobe_saved_eflags;
|
||||
}
|
||||
|
||||
static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
|
||||
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
|
||||
{
|
||||
__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
|
||||
kcb->kprobe_status = kcb->prev_kprobe.status;
|
||||
|
@ -154,7 +154,7 @@ static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
|
|||
kcb->kprobe_saved_eflags = kcb->prev_kprobe.saved_eflags;
|
||||
}
|
||||
|
||||
static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
|
||||
static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
|
||||
struct kprobe_ctlblk *kcb)
|
||||
{
|
||||
__get_cpu_var(current_kprobe) = p;
|
||||
|
@ -164,7 +164,7 @@ static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
|
|||
kcb->kprobe_saved_eflags &= ~IF_MASK;
|
||||
}
|
||||
|
||||
static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
|
||||
static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
|
||||
{
|
||||
regs->eflags |= TF_MASK;
|
||||
regs->eflags &= ~IF_MASK;
|
||||
|
@ -507,7 +507,7 @@ no_change:
|
|||
* Interrupts are disabled on entry as trap1 is an interrupt gate and they
|
||||
* remain disabled thoroughout this function.
|
||||
*/
|
||||
static inline int post_kprobe_handler(struct pt_regs *regs)
|
||||
static int __kprobes post_kprobe_handler(struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe *cur = kprobe_running();
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
|
@ -543,7 +543,7 @@ out:
|
|||
return 1;
|
||||
}
|
||||
|
||||
static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
|
||||
static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
|
||||
{
|
||||
struct kprobe *cur = kprobe_running();
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
|
|
|
@ -251,7 +251,7 @@ static int msr_class_device_create(int i)
|
|||
return err;
|
||||
}
|
||||
|
||||
static int __devinit msr_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
||||
static int msr_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
||||
{
|
||||
unsigned int cpu = (unsigned long)hcpu;
|
||||
|
||||
|
|
|
@ -120,7 +120,6 @@ static struct resource qic_res = {
|
|||
* It writes num_bits of the data buffer in msg starting at start_bit.
|
||||
* Note: This function assumes that any unused bit in the data stream
|
||||
* is set to zero so that the ors will work correctly */
|
||||
#define BITS_PER_BYTE 8
|
||||
static void
|
||||
cat_pack(__u8 *msg, const __u16 start_bit, __u8 *data, const __u16 num_bits)
|
||||
{
|
||||
|
|
|
@ -251,7 +251,7 @@ static void __kprobes prepare_break_inst(uint template, uint slot,
|
|||
update_kprobe_inst_flag(template, slot, major_opcode, kprobe_inst, p);
|
||||
}
|
||||
|
||||
static inline void get_kprobe_inst(bundle_t *bundle, uint slot,
|
||||
static void __kprobes get_kprobe_inst(bundle_t *bundle, uint slot,
|
||||
unsigned long *kprobe_inst, uint *major_opcode)
|
||||
{
|
||||
unsigned long kprobe_inst_p0, kprobe_inst_p1;
|
||||
|
@ -278,7 +278,7 @@ static inline void get_kprobe_inst(bundle_t *bundle, uint slot,
|
|||
}
|
||||
|
||||
/* Returns non-zero if the addr is in the Interrupt Vector Table */
|
||||
static inline int in_ivt_functions(unsigned long addr)
|
||||
static int __kprobes in_ivt_functions(unsigned long addr)
|
||||
{
|
||||
return (addr >= (unsigned long)__start_ivt_text
|
||||
&& addr < (unsigned long)__end_ivt_text);
|
||||
|
@ -308,19 +308,19 @@ static int __kprobes valid_kprobe_addr(int template, int slot,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
|
||||
static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
|
||||
{
|
||||
kcb->prev_kprobe.kp = kprobe_running();
|
||||
kcb->prev_kprobe.status = kcb->kprobe_status;
|
||||
}
|
||||
|
||||
static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
|
||||
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
|
||||
{
|
||||
__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
|
||||
kcb->kprobe_status = kcb->prev_kprobe.status;
|
||||
}
|
||||
|
||||
static inline void set_current_kprobe(struct kprobe *p,
|
||||
static void __kprobes set_current_kprobe(struct kprobe *p,
|
||||
struct kprobe_ctlblk *kcb)
|
||||
{
|
||||
__get_cpu_var(current_kprobe) = p;
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
* Stack layout in 'ret_from_system_call':
|
||||
* ptrace needs to have all regs on the stack.
|
||||
* if the order here is changed, it needs to be
|
||||
* updated in fork.c:copy_process, signal.c:do_signal,
|
||||
* updated in fork.c:copy_thread, signal.c:do_signal,
|
||||
* ptrace.c and ptrace.h
|
||||
*
|
||||
* M32Rx/M32R2 M32R
|
||||
|
@ -41,18 +41,17 @@
|
|||
* @(0x38,sp) - syscall_nr ditto
|
||||
* @(0x3c,sp) - acc0h @(0x3c,sp) - acch
|
||||
* @(0x40,sp) - acc0l @(0x40,sp) - accl
|
||||
* @(0x44,sp) - acc1h @(0x44,sp) - psw
|
||||
* @(0x48,sp) - acc1l @(0x48,sp) - bpc
|
||||
* @(0x4c,sp) - psw @(0x4c,sp) - bbpsw
|
||||
* @(0x50,sp) - bpc @(0x50,sp) - bbpc
|
||||
* @(0x54,sp) - bbpsw @(0x54,sp) - spu (cr3)
|
||||
* @(0x58,sp) - bbpc @(0x58,sp) - fp (r13)
|
||||
* @(0x5c,sp) - spu (cr3) @(0x5c,sp) - lr (r14)
|
||||
* @(0x60,sp) - fp (r13) @(0x60,sp) - spi (cr12)
|
||||
* @(0x64,sp) - lr (r14) @(0x64,sp) - orig_r0
|
||||
* @(0x68,sp) - spi (cr2)
|
||||
* @(0x6c,sp) - orig_r0
|
||||
*
|
||||
* @(0x44,sp) - acc1h @(0x44,sp) - dummy_acc1h
|
||||
* @(0x48,sp) - acc1l @(0x48,sp) - dummy_acc1l
|
||||
* @(0x4c,sp) - psw ditto
|
||||
* @(0x50,sp) - bpc ditto
|
||||
* @(0x54,sp) - bbpsw ditto
|
||||
* @(0x58,sp) - bbpc ditto
|
||||
* @(0x5c,sp) - spu (cr3) ditto
|
||||
* @(0x60,sp) - fp (r13) ditto
|
||||
* @(0x64,sp) - lr (r14) ditto
|
||||
* @(0x68,sp) - spi (cr2) ditto
|
||||
* @(0x6c,sp) - orig_r0 ditto
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
|
@ -102,6 +101,12 @@
|
|||
#define ACC0L(reg) @(0x40,reg)
|
||||
#define ACC1H(reg) @(0x44,reg)
|
||||
#define ACC1L(reg) @(0x48,reg)
|
||||
#elif defined(CONFIG_ISA_M32R2) || defined(CONFIG_ISA_M32R)
|
||||
#define ACCH(reg) @(0x3C,reg)
|
||||
#define ACCL(reg) @(0x40,reg)
|
||||
#else
|
||||
#error unknown isa configuration
|
||||
#endif
|
||||
#define PSW(reg) @(0x4C,reg)
|
||||
#define BPC(reg) @(0x50,reg)
|
||||
#define BBPSW(reg) @(0x54,reg)
|
||||
|
@ -111,21 +116,6 @@
|
|||
#define LR(reg) @(0x64,reg)
|
||||
#define SP(reg) @(0x68,reg)
|
||||
#define ORIG_R0(reg) @(0x6C,reg)
|
||||
#elif defined(CONFIG_ISA_M32R2) || defined(CONFIG_ISA_M32R)
|
||||
#define ACCH(reg) @(0x3C,reg)
|
||||
#define ACCL(reg) @(0x40,reg)
|
||||
#define PSW(reg) @(0x44,reg)
|
||||
#define BPC(reg) @(0x48,reg)
|
||||
#define BBPSW(reg) @(0x4C,reg)
|
||||
#define BBPC(reg) @(0x50,reg)
|
||||
#define SPU(reg) @(0x54,reg)
|
||||
#define FP(reg) @(0x58,reg) /* FP = R13 */
|
||||
#define LR(reg) @(0x5C,reg)
|
||||
#define SP(reg) @(0x60,reg)
|
||||
#define ORIG_R0(reg) @(0x64,reg)
|
||||
#else
|
||||
#error unknown isa configuration
|
||||
#endif
|
||||
|
||||
CF_MASK = 0x00000001
|
||||
TF_MASK = 0x00000100
|
||||
|
@ -142,7 +132,7 @@ VM_MASK = 0x00020000
|
|||
#endif
|
||||
|
||||
ENTRY(ret_from_fork)
|
||||
ld r0, @sp+
|
||||
pop r0
|
||||
bl schedule_tail
|
||||
GET_THREAD_INFO(r8)
|
||||
bra syscall_exit
|
||||
|
@ -231,7 +221,7 @@ restore_all:
|
|||
RESTORE_ALL
|
||||
|
||||
# perform work that needs to be done immediately before resumption
|
||||
# r9 : frags
|
||||
# r9 : flags
|
||||
ALIGN
|
||||
work_pending:
|
||||
and3 r4, r9, #_TIF_NEED_RESCHED
|
||||
|
@ -320,7 +310,7 @@ ENTRY(ei_handler)
|
|||
; GET_ICU_STATUS;
|
||||
seth r0, #shigh(M32R_ICU_ISTS_ADDR)
|
||||
ld r0, @(low(M32R_ICU_ISTS_ADDR),r0)
|
||||
st r0, @-sp
|
||||
push r0
|
||||
#if defined(CONFIG_SMP)
|
||||
/*
|
||||
* If IRQ == 0 --> Nothing to do, Not write IMASK
|
||||
|
@ -557,7 +547,7 @@ check_end:
|
|||
#endif /* CONFIG_PLAT_M32104UT */
|
||||
bl do_IRQ
|
||||
#endif /* CONFIG_SMP */
|
||||
ld r14, @sp+
|
||||
pop r14
|
||||
seth r0, #shigh(M32R_ICU_IMASK_ADDR)
|
||||
st r14, @(low(M32R_ICU_IMASK_ADDR),r0)
|
||||
#else
|
||||
|
@ -1015,4 +1005,3 @@ ENTRY(sys_call_table)
|
|||
.long sys_waitid
|
||||
|
||||
syscall_table_size=(.-sys_call_table)
|
||||
|
||||
|
|
|
@ -116,6 +116,10 @@ void cpu_idle (void)
|
|||
|
||||
void machine_restart(char *__unused)
|
||||
{
|
||||
#if defined(CONFIG_PLAT_MAPPI3)
|
||||
outw(1, (unsigned long)PLD_REBOOT);
|
||||
#endif
|
||||
|
||||
printk("Please push reset button!\n");
|
||||
while (1)
|
||||
cpu_relax();
|
||||
|
|
|
@ -118,6 +118,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
|
|||
#elif defined(CONFIG_ISA_M32R2) || defined(CONFIG_ISA_M32R)
|
||||
COPY(acch);
|
||||
COPY(accl);
|
||||
COPY(dummy_acc1h);
|
||||
COPY(dummy_acc1l);
|
||||
#else
|
||||
#error unknown isa configuration
|
||||
#endif
|
||||
|
@ -203,6 +205,8 @@ setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
|
|||
#elif defined(CONFIG_ISA_M32R2) || defined(CONFIG_ISA_M32R)
|
||||
COPY(acch);
|
||||
COPY(accl);
|
||||
COPY(dummy_acc1h);
|
||||
COPY(dummy_acc1l);
|
||||
#else
|
||||
#error unknown isa configuration
|
||||
#endif
|
||||
|
|
|
@ -816,6 +816,10 @@ config GENERIC_CALIBRATE_DELAY
|
|||
bool
|
||||
default y
|
||||
|
||||
config SCHED_NO_NO_OMIT_FRAME_POINTER
|
||||
bool
|
||||
default y
|
||||
|
||||
#
|
||||
# Select some configuration options automatically based on user selections.
|
||||
#
|
||||
|
@ -1443,6 +1447,10 @@ choice
|
|||
prompt "MIPS MT options"
|
||||
depends on MIPS_MT
|
||||
|
||||
config MIPS_MT_SMTC
|
||||
bool "SMTC: Use all TCs on all VPEs for SMP"
|
||||
select SMP
|
||||
|
||||
config MIPS_MT_SMP
|
||||
bool "Use 1 TC on each available VPE for SMP"
|
||||
select SMP
|
||||
|
@ -1456,6 +1464,11 @@ config MIPS_VPE_LOADER
|
|||
|
||||
endchoice
|
||||
|
||||
config MIPS_MT_FPAFF
|
||||
bool "Dynamic FPU affinity for FP-intensive threads"
|
||||
depends on MIPS_MT
|
||||
default y
|
||||
|
||||
config MIPS_VPE_LOADER_TOM
|
||||
bool "Load VPE program into memory hidden from linux"
|
||||
depends on MIPS_VPE_LOADER
|
||||
|
@ -1472,6 +1485,16 @@ config MIPS_VPE_APSP_API
|
|||
depends on MIPS_VPE_LOADER
|
||||
help
|
||||
|
||||
config MIPS_APSP_KSPD
|
||||
bool "Enable KSPD"
|
||||
depends on MIPS_VPE_APSP_API
|
||||
default y
|
||||
help
|
||||
KSPD is a kernel daemon that accepts syscall requests from the SP
|
||||
side, actions them and returns the results. It also handles the
|
||||
"exit" syscall notifying other kernel modules the SP program is
|
||||
exiting. You probably want to say yes here.
|
||||
|
||||
config SB1_PASS_1_WORKAROUNDS
|
||||
bool
|
||||
depends on CPU_SB1_PASS_1
|
||||
|
@ -1599,7 +1622,7 @@ source "mm/Kconfig"
|
|||
|
||||
config SMP
|
||||
bool "Multi-Processing support"
|
||||
depends on CPU_RM9000 || ((SIBYTE_BCM1x80 || SIBYTE_BCM1x55 || SIBYTE_SB1250 || QEMU) && !SIBYTE_STANDALONE) || SGI_IP27 || MIPS_MT_SMP
|
||||
depends on CPU_RM9000 || ((SIBYTE_BCM1x80 || SIBYTE_BCM1x55 || SIBYTE_SB1250 || QEMU) && !SIBYTE_STANDALONE) || SGI_IP27 || MIPS_MT_SMP || MIPS_MT_SMTC
|
||||
---help---
|
||||
This enables support for systems with more than one CPU. If you have
|
||||
a system with only one CPU, like most personal computers, say N. If
|
||||
|
|
|
@ -105,18 +105,18 @@ cflags-$(CONFIG_CPU_R4300) += -march=r4300 -Wa,--trap
|
|||
cflags-$(CONFIG_CPU_VR41XX) += -march=r4100 -Wa,--trap
|
||||
cflags-$(CONFIG_CPU_R4X00) += -march=r4600 -Wa,--trap
|
||||
cflags-$(CONFIG_CPU_TX49XX) += -march=r4600 -Wa,--trap
|
||||
cflags-$(CONFIG_CPU_MIPS32_R1) += $(call cc-option,-march=mips32,-mips2 -mtune=r4600) \
|
||||
cflags-$(CONFIG_CPU_MIPS32_R1) += $(call cc-option,-march=mips32,-mips32 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
|
||||
-Wa,-mips32 -Wa,--trap
|
||||
cflags-$(CONFIG_CPU_MIPS32_R2) += $(call cc-option,-march=mips32r2,-mips2 -mtune=r4600) \
|
||||
cflags-$(CONFIG_CPU_MIPS32_R2) += $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
|
||||
-Wa,-mips32r2 -Wa,--trap
|
||||
cflags-$(CONFIG_CPU_MIPS64_R1) += $(call cc-option,-march=mips64,-mips2 -mtune=r4600) \
|
||||
cflags-$(CONFIG_CPU_MIPS64_R1) += $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
|
||||
-Wa,-mips64 -Wa,--trap
|
||||
cflags-$(CONFIG_CPU_MIPS64_R2) += $(call cc-option,-march=mips64r2,-mips2 -mtune=r4600 ) \
|
||||
cflags-$(CONFIG_CPU_MIPS64_R2) += $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
|
||||
-Wa,-mips64r2 -Wa,--trap
|
||||
cflags-$(CONFIG_CPU_R5000) += -march=r5000 -Wa,--trap
|
||||
cflags-$(CONFIG_CPU_R5432) += $(call cc-options,-march=r5400,-march=r5000) \
|
||||
cflags-$(CONFIG_CPU_R5432) += $(call cc-option,-march=r5400,-march=r5000) \
|
||||
-Wa,--trap
|
||||
cflags-$(CONFIG_CPU_NEVADA) += $(call cc-options,-march=rm5200,-march=r5000) \
|
||||
cflags-$(CONFIG_CPU_NEVADA) += $(call cc-option,-march=rm5200,-march=r5000) \
|
||||
-Wa,--trap
|
||||
cflags-$(CONFIG_CPU_RM7000) += $(call cc-option,-march=rm7000,-march=r5000) \
|
||||
-Wa,--trap
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
# Makefile for the Alchemy Au1000 CPU, generic files.
|
||||
#
|
||||
|
||||
obj-y += prom.o int-handler.o irq.o puts.o time.o reset.o \
|
||||
obj-y += prom.o irq.o puts.o time.o reset.o \
|
||||
au1xxx_irqmap.o clocks.o platform.o power.o setup.o \
|
||||
sleeper.o cputable.o dma.o dbdma.o gpio.o
|
||||
|
||||
|
|
|
@ -1,68 +0,0 @@
|
|||
/*
|
||||
* Copyright 2001 MontaVista Software Inc.
|
||||
* Author: ppopov@mvista.com
|
||||
*
|
||||
* Interrupt dispatcher for Au1000 boards.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*/
|
||||
#include <asm/asm.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/addrspace.h>
|
||||
#include <asm/regdef.h>
|
||||
#include <asm/stackframe.h>
|
||||
|
||||
.text
|
||||
.set macro
|
||||
.set noat
|
||||
.align 5
|
||||
|
||||
NESTED(au1000_IRQ, PT_SIZE, sp)
|
||||
SAVE_ALL
|
||||
CLI # Important: mark KERNEL mode !
|
||||
|
||||
mfc0 t0,CP0_CAUSE # get pending interrupts
|
||||
mfc0 t1,CP0_STATUS # get enabled interrupts
|
||||
and t0,t1 # isolate allowed ones
|
||||
|
||||
andi t0,0xff00 # isolate pending bits
|
||||
beqz t0, 3f # spurious interrupt
|
||||
|
||||
andi a0, t0, CAUSEF_IP7
|
||||
beq a0, zero, 1f
|
||||
move a0, sp
|
||||
jal mips_timer_interrupt
|
||||
j ret_from_irq
|
||||
|
||||
1:
|
||||
andi a0, t0, CAUSEF_IP2 # Interrupt Controller 0, Request 0
|
||||
beq a0, zero, 2f
|
||||
move a0,sp
|
||||
jal intc0_req0_irqdispatch
|
||||
j ret_from_irq
|
||||
2:
|
||||
andi a0, t0, CAUSEF_IP3 # Interrupt Controller 0, Request 1
|
||||
beq a0, zero, 3f
|
||||
move a0,sp
|
||||
jal intc0_req1_irqdispatch
|
||||
j ret_from_irq
|
||||
3:
|
||||
andi a0, t0, CAUSEF_IP4 # Interrupt Controller 1, Request 0
|
||||
beq a0, zero, 4f
|
||||
move a0,sp
|
||||
jal intc1_req0_irqdispatch
|
||||
j ret_from_irq
|
||||
4:
|
||||
andi a0, t0, CAUSEF_IP5 # Interrupt Controller 1, Request 1
|
||||
beq a0, zero, 5f
|
||||
move a0, sp
|
||||
jal intc1_req1_irqdispatch
|
||||
j ret_from_irq
|
||||
|
||||
5:
|
||||
move a0, sp
|
||||
j spurious_interrupt
|
||||
END(au1000_IRQ)
|
|
@ -66,7 +66,6 @@
|
|||
#define EXT_INTC1_REQ1 5 /* IP 5 */
|
||||
#define MIPS_TIMER_IP 7 /* IP 7 */
|
||||
|
||||
extern asmlinkage void au1000_IRQ(void);
|
||||
extern void set_debug_traps(void);
|
||||
extern irq_cpustat_t irq_stat [NR_CPUS];
|
||||
|
||||
|
@ -446,7 +445,6 @@ void __init arch_init_irq(void)
|
|||
extern int au1xxx_ic0_nr_irqs;
|
||||
|
||||
cp0_status = read_c0_status();
|
||||
set_except_vector(0, au1000_IRQ);
|
||||
|
||||
/* Initialize interrupt controllers to a safe state.
|
||||
*/
|
||||
|
@ -661,3 +659,21 @@ restore_au1xxx_intctl(void)
|
|||
au_writel(sleep_intctl_mask[0], IC0_MASKSET); au_sync();
|
||||
}
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
asmlinkage void plat_irq_dispatch(struct pt_regs *regs)
|
||||
{
|
||||
unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
|
||||
|
||||
if (pending & CAUSEF_IP7)
|
||||
mips_timer_interrupt(regs);
|
||||
else if (pending & CAUSEF_IP2)
|
||||
intc0_req0_irqdispatch(regs);
|
||||
else if (pending & CAUSEF_IP3)
|
||||
intc0_req1_irqdispatch(regs);
|
||||
else if (pending & CAUSEF_IP4)
|
||||
intc1_req0_irqdispatch(regs);
|
||||
else if (pending & CAUSEF_IP5)
|
||||
intc1_req1_irqdispatch(regs);
|
||||
else
|
||||
spurious_interrupt(regs);
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# Makefile for the Cobalt micro systems family specific parts of the kernel
|
||||
#
|
||||
|
||||
obj-y := irq.o int-handler.o reset.o setup.o
|
||||
obj-y := irq.o reset.o setup.o
|
||||
|
||||
obj-$(CONFIG_EARLY_PRINTK) += console.o
|
||||
|
||||
|
|
|
@ -1,25 +0,0 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 1995, 1996, 1997, 2003 by Ralf Baechle
|
||||
* Copyright (C) 2001, 2002, 2003 by Liam Davies (ldavies@agile.tv)
|
||||
*/
|
||||
#include <asm/asm.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/mach-cobalt/cobalt.h>
|
||||
#include <asm/regdef.h>
|
||||
#include <asm/stackframe.h>
|
||||
|
||||
.text
|
||||
.align 5
|
||||
NESTED(cobalt_handle_int, PT_SIZE, sp)
|
||||
SAVE_ALL
|
||||
CLI
|
||||
|
||||
PTR_LA ra, ret_from_irq
|
||||
move a0, sp
|
||||
j cobalt_irq
|
||||
|
||||
END(cobalt_handle_int)
|
|
@ -20,8 +20,6 @@
|
|||
|
||||
#include <asm/mach-cobalt/cobalt.h>
|
||||
|
||||
extern void cobalt_handle_int(void);
|
||||
|
||||
/*
|
||||
* We have two types of interrupts that we handle, ones that come in through
|
||||
* the CPU interrupt lines, and ones that come in on the via chip. The CPU
|
||||
|
@ -79,7 +77,7 @@ static inline void via_pic_irq(struct pt_regs *regs)
|
|||
do_IRQ(irq, regs);
|
||||
}
|
||||
|
||||
asmlinkage void cobalt_irq(struct pt_regs *regs)
|
||||
asmlinkage void plat_irq_dispatch(struct pt_regs *regs)
|
||||
{
|
||||
unsigned pending;
|
||||
|
||||
|
@ -122,8 +120,6 @@ void __init arch_init_irq(void)
|
|||
*/
|
||||
GALILEO_OUTL(0, GT_INTRMASK_OFS);
|
||||
|
||||
set_except_vector(0, cobalt_handle_int);
|
||||
|
||||
init_i8259_irqs(); /* 0 ... 15 */
|
||||
mips_cpu_irq_init(COBALT_CPU_IRQ); /* 16 ... 23 */
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -3,6 +3,6 @@
|
|||
# under Linux.
|
||||
#
|
||||
|
||||
obj-y += setup.o irq.o int-handler.o nile4_pic.o
|
||||
obj-y += setup.o irq.o nile4_pic.o
|
||||
|
||||
EXTRA_AFLAGS := $(CFLAGS)
|
||||
|
|
|
@ -1,120 +0,0 @@
|
|||
/*
|
||||
* arch/mips/ddb5074/int-handler.S -- NEC DDB Vrc-5074 interrupt handler
|
||||
*
|
||||
* Based on arch/mips/sgi/kernel/indyIRQ.S
|
||||
*
|
||||
* Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
|
||||
*
|
||||
* Copyright (C) 2000 Geert Uytterhoeven <geert@sonycom.com>
|
||||
* Sony Software Development Center Europe (SDCE), Brussels
|
||||
*/
|
||||
#include <asm/asm.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/regdef.h>
|
||||
#include <asm/stackframe.h>
|
||||
|
||||
/* A lot of complication here is taken away because:
|
||||
*
|
||||
* 1) We handle one interrupt and return, sitting in a loop and moving across
|
||||
* all the pending IRQ bits in the cause register is _NOT_ the answer, the
|
||||
* common case is one pending IRQ so optimize in that direction.
|
||||
*
|
||||
* 2) We need not check against bits in the status register IRQ mask, that
|
||||
* would make this routine slow as hell.
|
||||
*
|
||||
* 3) Linux only thinks in terms of all IRQs on or all IRQs off, nothing in
|
||||
* between like BSD spl() brain-damage.
|
||||
*
|
||||
* Furthermore, the IRQs on the INDY look basically (barring software IRQs
|
||||
* which we don't use at all) like:
|
||||
*
|
||||
* MIPS IRQ Source
|
||||
* -------- ------
|
||||
* 0 Software (ignored)
|
||||
* 1 Software (ignored)
|
||||
* 2 Local IRQ level zero
|
||||
* 3 Local IRQ level one
|
||||
* 4 8254 Timer zero
|
||||
* 5 8254 Timer one
|
||||
* 6 Bus Error
|
||||
* 7 R4k timer (what we use)
|
||||
*
|
||||
* We handle the IRQ according to _our_ priority which is:
|
||||
*
|
||||
* Highest ---- R4k Timer
|
||||
* Local IRQ zero
|
||||
* Local IRQ one
|
||||
* Bus Error
|
||||
* 8254 Timer zero
|
||||
* Lowest ---- 8254 Timer one
|
||||
*
|
||||
* then we just return, if multiple IRQs are pending then we will just take
|
||||
* another exception, big deal.
|
||||
*/
|
||||
|
||||
.text
|
||||
.set noreorder
|
||||
.set noat
|
||||
.align 5
|
||||
NESTED(ddbIRQ, PT_SIZE, sp)
|
||||
SAVE_ALL
|
||||
CLI
|
||||
.set at
|
||||
mfc0 s0, CP0_CAUSE # get irq mask
|
||||
|
||||
#if 1
|
||||
mfc0 t2,CP0_STATUS # get enabled interrupts
|
||||
and s0,t2 # isolate allowed ones
|
||||
#endif
|
||||
/* First we check for r4k counter/timer IRQ. */
|
||||
andi a0, s0, CAUSEF_IP2 # delay slot, check local level zero
|
||||
beq a0, zero, 1f
|
||||
andi a0, s0, CAUSEF_IP3 # delay slot, check local level one
|
||||
|
||||
/* Wheee, local level zero interrupt. */
|
||||
jal ddb_local0_irqdispatch
|
||||
move a0, sp # delay slot
|
||||
|
||||
j ret_from_irq
|
||||
nop # delay slot
|
||||
|
||||
1:
|
||||
beq a0, zero, 1f
|
||||
andi a0, s0, CAUSEF_IP6 # delay slot, check bus error
|
||||
|
||||
/* Wheee, local level one interrupt. */
|
||||
move a0, sp
|
||||
jal ddb_local1_irqdispatch
|
||||
nop
|
||||
|
||||
j ret_from_irq
|
||||
nop
|
||||
|
||||
1:
|
||||
beq a0, zero, 1f
|
||||
nop
|
||||
|
||||
/* Wheee, an asynchronous bus error... */
|
||||
move a0, sp
|
||||
jal ddb_buserror_irq
|
||||
nop
|
||||
|
||||
j ret_from_irq
|
||||
nop
|
||||
|
||||
1:
|
||||
/* Here by mistake? This is possible, what can happen
|
||||
* is that by the time we take the exception the IRQ
|
||||
* pin goes low, so just leave if this is the case.
|
||||
*/
|
||||
andi a0, s0, (CAUSEF_IP4 | CAUSEF_IP5)
|
||||
beq a0, zero, 1f
|
||||
|
||||
/* Must be one of the 8254 timers... */
|
||||
move a0, sp
|
||||
jal ddb_8254timer_irq
|
||||
nop
|
||||
1:
|
||||
j ret_from_irq
|
||||
nop
|
||||
END(ddbIRQ)
|
|
@ -21,8 +21,6 @@
|
|||
#include <asm/ddb5xxx/ddb5074.h>
|
||||
|
||||
|
||||
extern asmlinkage void ddbIRQ(void);
|
||||
|
||||
static struct irqaction irq_cascade = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL };
|
||||
|
||||
#define M1543_PNP_CONFIG 0x03f0 /* PnP Config Port */
|
||||
|
@ -90,7 +88,7 @@ static void m1543_irq_setup(void)
|
|||
|
||||
}
|
||||
|
||||
void ddb_local0_irqdispatch(struct pt_regs *regs)
|
||||
static void ddb_local0_irqdispatch(struct pt_regs *regs)
|
||||
{
|
||||
u32 mask;
|
||||
int nile4_irq;
|
||||
|
@ -118,29 +116,41 @@ void ddb_local0_irqdispatch(struct pt_regs *regs)
|
|||
}
|
||||
}
|
||||
|
||||
void ddb_local1_irqdispatch(void)
|
||||
static void ddb_local1_irqdispatch(void)
|
||||
{
|
||||
printk("ddb_local1_irqdispatch called\n");
|
||||
}
|
||||
|
||||
void ddb_buserror_irq(void)
|
||||
static void ddb_buserror_irq(void)
|
||||
{
|
||||
printk("ddb_buserror_irq called\n");
|
||||
}
|
||||
|
||||
void ddb_8254timer_irq(void)
|
||||
static void ddb_8254timer_irq(void)
|
||||
{
|
||||
printk("ddb_8254timer_irq called\n");
|
||||
}
|
||||
|
||||
asmlinkage void plat_irq_dispatch(struct pt_regs *regs)
|
||||
{
|
||||
unsigned int pending = read_c0_cause() & read_c0_status();
|
||||
|
||||
if (pending & CAUSEF_IP2)
|
||||
ddb_local0_irqdispatch(regs);
|
||||
else if (pending & CAUSEF_IP3)
|
||||
ddb_local1_irqdispatch();
|
||||
else if (pending & CAUSEF_IP6)
|
||||
ddb_buserror_irq();
|
||||
else if (pending & (CAUSEF_IP4 | CAUSEF_IP5))
|
||||
ddb_8254timer_irq();
|
||||
}
|
||||
|
||||
void __init arch_init_irq(void)
|
||||
{
|
||||
/* setup cascade interrupts */
|
||||
setup_irq(NILE4_IRQ_BASE + NILE4_INT_INTE, &irq_cascade);
|
||||
setup_irq(CPU_IRQ_BASE + CPU_NILE4_CASCADE, &irq_cascade);
|
||||
|
||||
set_except_vector(0, ddbIRQ);
|
||||
|
||||
nile4_irq_setup(NILE4_IRQ_BASE);
|
||||
m1543_irq_setup();
|
||||
init_i8259_irqs();
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
# under Linux.
|
||||
#
|
||||
|
||||
obj-y += setup.o irq.o int-handler.o nile4_pic.o vrc5476_irq.o
|
||||
obj-y += setup.o irq.o nile4_pic.o vrc5476_irq.o
|
||||
obj-$(CONFIG_KGDB) += dbg_io.o
|
||||
|
||||
EXTRA_AFLAGS := $(CFLAGS)
|
||||
|
|
|
@ -1,112 +0,0 @@
|
|||
/*
|
||||
* Copyright 2001 MontaVista Software Inc.
|
||||
* Author: jsun@mvista.com or jsun@junsun.net
|
||||
*
|
||||
* First-level interrupt dispatcher for ddb5476
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*/
|
||||
#include <asm/asm.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/addrspace.h>
|
||||
#include <asm/regdef.h>
|
||||
#include <asm/stackframe.h>
|
||||
|
||||
#include <asm/ddb5xxx/ddb5476.h>
|
||||
|
||||
/*
|
||||
* first level interrupt dispatcher for ocelot board -
|
||||
* We check for the timer first, then check PCI ints A and D.
|
||||
* Then check for serial IRQ and fall through.
|
||||
*/
|
||||
.align 5
|
||||
NESTED(ddb5476_handle_int, PT_SIZE, sp)
|
||||
SAVE_ALL
|
||||
CLI
|
||||
.set at
|
||||
.set noreorder
|
||||
mfc0 t0, CP0_CAUSE
|
||||
mfc0 t2, CP0_STATUS
|
||||
|
||||
and t0, t2
|
||||
|
||||
andi t1, t0, STATUSF_IP7 /* cpu timer */
|
||||
bnez t1, ll_cpu_ip7
|
||||
andi t1, t0, STATUSF_IP2 /* vrc5476 & i8259 */
|
||||
bnez t1, ll_cpu_ip2
|
||||
andi t1, t0, STATUSF_IP3
|
||||
bnez t1, ll_cpu_ip3
|
||||
andi t1, t0, STATUSF_IP4
|
||||
bnez t1, ll_cpu_ip4
|
||||
andi t1, t0, STATUSF_IP5
|
||||
bnez t1, ll_cpu_ip5
|
||||
andi t1, t0, STATUSF_IP6
|
||||
bnez t1, ll_cpu_ip6
|
||||
andi t1, t0, STATUSF_IP0 /* software int 0 */
|
||||
bnez t1, ll_cpu_ip0
|
||||
andi t1, t0, STATUSF_IP1 /* software int 1 */
|
||||
bnez t1, ll_cpu_ip1
|
||||
nop
|
||||
|
||||
.set reorder
|
||||
|
||||
/* wrong alarm or masked ... */
|
||||
// j spurious_interrupt
|
||||
move a0, sp
|
||||
jal vrc5476_irq_dispatch
|
||||
j ret_from_irq
|
||||
nop
|
||||
|
||||
.align 5
|
||||
|
||||
ll_cpu_ip0:
|
||||
li a0, CPU_IRQ_BASE + 0
|
||||
move a1, sp
|
||||
jal do_IRQ
|
||||
j ret_from_irq
|
||||
|
||||
ll_cpu_ip1:
|
||||
li a0, CPU_IRQ_BASE + 1
|
||||
move a1, sp
|
||||
jal do_IRQ
|
||||
j ret_from_irq
|
||||
|
||||
ll_cpu_ip2: /* jump to second-level dispatching */
|
||||
move a0, sp
|
||||
jal vrc5476_irq_dispatch
|
||||
j ret_from_irq
|
||||
|
||||
ll_cpu_ip3:
|
||||
li a0, CPU_IRQ_BASE + 3
|
||||
move a1, sp
|
||||
jal do_IRQ
|
||||
j ret_from_irq
|
||||
|
||||
ll_cpu_ip4:
|
||||
li a0, CPU_IRQ_BASE + 4
|
||||
move a1, sp
|
||||
jal do_IRQ
|
||||
j ret_from_irq
|
||||
|
||||
ll_cpu_ip5:
|
||||
li a0, CPU_IRQ_BASE + 5
|
||||
move a1, sp
|
||||
jal do_IRQ
|
||||
j ret_from_irq
|
||||
|
||||
ll_cpu_ip6:
|
||||
li a0, CPU_IRQ_BASE + 6
|
||||
move a1, sp
|
||||
jal do_IRQ
|
||||
j ret_from_irq
|
||||
|
||||
ll_cpu_ip7:
|
||||
li a0, CPU_IRQ_BASE + 7
|
||||
move a1, sp
|
||||
jal do_IRQ
|
||||
j ret_from_irq
|
||||
|
||||
END(ddb5476_handle_int)
|
|
@ -110,11 +110,36 @@ static void nile4_irq_setup(void)
|
|||
static struct irqaction irq_cascade = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL };
|
||||
static struct irqaction irq_error = { no_action, 0, CPU_MASK_NONE, "error", NULL, NULL };
|
||||
|
||||
extern asmlinkage void ddb5476_handle_int(void);
|
||||
extern int setup_irq(unsigned int irq, struct irqaction *irqaction);
|
||||
extern void mips_cpu_irq_init(u32 irq_base);
|
||||
extern void vrc5476_irq_init(u32 irq_base);
|
||||
|
||||
extern void vrc5476_irq_dispatch(struct pt_regs *regs);
|
||||
|
||||
asmlinkage void plat_irq_dispatch(struct pt_regs *regs)
|
||||
{
|
||||
unsigned int pending = read_c0_cause() & read_c0_status();
|
||||
|
||||
if (pending & STATUSF_IP7)
|
||||
do_IRQ(CPU_IRQ_BASE + 7, regs);
|
||||
else if (pending & STATUSF_IP2)
|
||||
vrc5476_irq_dispatch(regs);
|
||||
else if (pending & STATUSF_IP3)
|
||||
do_IRQ(CPU_IRQ_BASE + 3, regs);
|
||||
else if (pending & STATUSF_IP4)
|
||||
do_IRQ(CPU_IRQ_BASE + 4, regs);
|
||||
else if (pending & STATUSF_IP5)
|
||||
do_IRQ(CPU_IRQ_BASE + 5, regs);
|
||||
else if (pending & STATUSF_IP6)
|
||||
do_IRQ(CPU_IRQ_BASE + 6, regs);
|
||||
else if (pending & STATUSF_IP0)
|
||||
do_IRQ(CPU_IRQ_BASE, regs);
|
||||
else if (pending & STATUSF_IP1)
|
||||
do_IRQ(CPU_IRQ_BASE + 1, regs);
|
||||
|
||||
vrc5476_irq_dispatch(regs);
|
||||
}
|
||||
|
||||
void __init arch_init_irq(void)
|
||||
{
|
||||
/* hardware initialization */
|
||||
|
@ -137,7 +162,4 @@ void __init arch_init_irq(void)
|
|||
setup_irq(VRC5476_IRQ_BASE + VRC5476_IRQ_LBRT, &irq_error);
|
||||
setup_irq(VRC5476_IRQ_BASE + VRC5476_IRQ_PCIS, &irq_error);
|
||||
setup_irq(VRC5476_IRQ_BASE + VRC5476_IRQ_PCI, &irq_error);
|
||||
|
||||
/* setup the grandpa intr vector */
|
||||
set_except_vector(0, ddb5476_handle_int);
|
||||
}
|
||||
|
|
|
@ -77,11 +77,9 @@ vrc5476_irq_init(u32 base)
|
|||
}
|
||||
|
||||
|
||||
asmlinkage void
|
||||
void
|
||||
vrc5476_irq_dispatch(struct pt_regs *regs)
|
||||
{
|
||||
extern void spurious_interrupt(void);
|
||||
|
||||
u32 mask;
|
||||
int nile4_irq;
|
||||
|
||||
|
@ -107,5 +105,5 @@ vrc5476_irq_dispatch(struct pt_regs *regs)
|
|||
return;
|
||||
}
|
||||
}
|
||||
spurious_interrupt();
|
||||
spurious_interrupt(regs);
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# Makefile for NEC DDB-Vrc5477 board
|
||||
#
|
||||
|
||||
obj-y += int-handler.o irq.o irq_5477.o setup.o lcd44780.o
|
||||
obj-y += irq.o irq_5477.o setup.o lcd44780.o
|
||||
|
||||
obj-$(CONFIG_RUNTIME_DEBUG) += debug.o
|
||||
obj-$(CONFIG_KGDB) += kgdb_io.o
|
||||
|
|
|
@ -1,75 +0,0 @@
|
|||
/*
|
||||
* Copyright 2001 MontaVista Software Inc.
|
||||
* Author: jsun@mvista.com or jsun@junsun.net
|
||||
*
|
||||
* First-level interrupt dispatcher for ddb5477
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*/
|
||||
#include <asm/asm.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/addrspace.h>
|
||||
#include <asm/regdef.h>
|
||||
#include <asm/stackframe.h>
|
||||
#include <asm/ddb5xxx/ddb5477.h>
|
||||
|
||||
/*
|
||||
* first level interrupt dispatcher for ocelot board -
|
||||
* We check for the timer first, then check PCI ints A and D.
|
||||
* Then check for serial IRQ and fall through.
|
||||
*/
|
||||
.align 5
|
||||
NESTED(ddb5477_handle_int, PT_SIZE, sp)
|
||||
SAVE_ALL
|
||||
CLI
|
||||
.set at
|
||||
.set noreorder
|
||||
mfc0 t0, CP0_CAUSE
|
||||
mfc0 t2, CP0_STATUS
|
||||
|
||||
and t0, t2
|
||||
|
||||
andi t1, t0, STATUSF_IP7 /* cpu timer */
|
||||
bnez t1, ll_cputimer_irq
|
||||
andi t1, t0, (STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP5 | STATUSF_IP6 )
|
||||
bnez t1, ll_vrc5477_irq
|
||||
andi t1, t0, STATUSF_IP0 /* software int 0 */
|
||||
bnez t1, ll_cpu_ip0
|
||||
andi t1, t0, STATUSF_IP1 /* software int 1 */
|
||||
bnez t1, ll_cpu_ip1
|
||||
nop
|
||||
.set reorder
|
||||
|
||||
/* wrong alarm or masked ... */
|
||||
j spurious_interrupt
|
||||
nop
|
||||
END(ddb5477_handle_int)
|
||||
|
||||
.align 5
|
||||
|
||||
ll_vrc5477_irq:
|
||||
move a0, sp
|
||||
jal vrc5477_irq_dispatch
|
||||
j ret_from_irq
|
||||
|
||||
ll_cputimer_irq:
|
||||
li a0, CPU_IRQ_BASE + 7
|
||||
move a1, sp
|
||||
jal do_IRQ
|
||||
j ret_from_irq
|
||||
|
||||
|
||||
ll_cpu_ip0:
|
||||
li a0, CPU_IRQ_BASE + 0
|
||||
move a1, sp
|
||||
jal do_IRQ
|
||||
j ret_from_irq
|
||||
|
||||
ll_cpu_ip1:
|
||||
li a0, CPU_IRQ_BASE + 1
|
||||
move a1, sp
|
||||
jal do_IRQ
|
||||
j ret_from_irq
|
|
@ -75,7 +75,6 @@ set_pci_int_attr(u32 pci, u32 intn, u32 active, u32 trigger)
|
|||
|
||||
extern void vrc5477_irq_init(u32 base);
|
||||
extern void mips_cpu_irq_init(u32 base);
|
||||
extern asmlinkage void ddb5477_handle_int(void);
|
||||
extern int setup_irq(unsigned int irq, struct irqaction *irqaction);
|
||||
static struct irqaction irq_cascade = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL };
|
||||
|
||||
|
@ -135,9 +134,6 @@ void __init arch_init_irq(void)
|
|||
/* setup cascade interrupts */
|
||||
setup_irq(VRC5477_IRQ_BASE + VRC5477_I8259_CASCADE, &irq_cascade);
|
||||
setup_irq(CPU_IRQ_BASE + CPU_VRC5477_CASCADE, &irq_cascade);
|
||||
|
||||
/* hook up the first-level interrupt handler */
|
||||
set_except_vector(0, ddb5477_handle_int);
|
||||
}
|
||||
|
||||
u8 i8259_interrupt_ack(void)
|
||||
|
@ -159,7 +155,7 @@ u8 i8259_interrupt_ack(void)
|
|||
* the first level int-handler will jump here if it is a vrc5477 irq
|
||||
*/
|
||||
#define NUM_5477_IRQS 32
|
||||
asmlinkage void
|
||||
static void
|
||||
vrc5477_irq_dispatch(struct pt_regs *regs)
|
||||
{
|
||||
u32 intStatus;
|
||||
|
@ -197,3 +193,21 @@ vrc5477_irq_dispatch(struct pt_regs *regs)
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
#define VR5477INTS (STATUSF_IP2|STATUSF_IP3|STATUSF_IP4|STATUSF_IP5|STATUSF_IP6)
|
||||
|
||||
asmlinkage void plat_irq_dispatch(struct pt_regs *regs)
|
||||
{
|
||||
unsigned int pending = read_c0_cause() & read_c0_status();
|
||||
|
||||
if (pending & STATUSF_IP7)
|
||||
do_IRQ(CPU_IRQ_BASE + 7, regs);
|
||||
else if (pending & VR5477INTS)
|
||||
vrc5477_irq_dispatch(regs);
|
||||
else if (pending & STATUSF_IP0)
|
||||
do_IRQ(CPU_IRQ_BASE, regs);
|
||||
else if (pending & STATUSF_IP1)
|
||||
do_IRQ(CPU_IRQ_BASE + 1, regs);
|
||||
else
|
||||
spurious_interrupt(regs);
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
/*
|
||||
* arch/mips/dec/decstation.c
|
||||
*/
|
||||
#include <asm/sections.h>
|
||||
|
||||
#define RELOC
|
||||
#define INITRD
|
||||
|
@ -24,7 +25,7 @@
|
|||
#define INITRD_START (*(unsigned long *) (PARAM+0x218))
|
||||
#define INITRD_SIZE (*(unsigned long *) (PARAM+0x21c))
|
||||
|
||||
extern int _ftext, _end; /* begin and end of kernel image */
|
||||
extern int _ftext; /* begin and end of kernel image */
|
||||
extern void kernel_entry(int, char **, unsigned long, int *);
|
||||
|
||||
void * memcpy(void * dest, const void *src, unsigned int count)
|
||||
|
|
|
@ -36,7 +36,7 @@
|
|||
.text
|
||||
.set noreorder
|
||||
/*
|
||||
* decstation_handle_int: Interrupt handler for DECstations
|
||||
* plat_irq_dispatch: Interrupt handler for DECstations
|
||||
*
|
||||
* We follow the model in the Indy interrupt code by David Miller, where he
|
||||
* says: a lot of complication here is taken away because:
|
||||
|
@ -125,11 +125,7 @@
|
|||
* just take another exception, big deal.
|
||||
*/
|
||||
.align 5
|
||||
NESTED(decstation_handle_int, PT_SIZE, ra)
|
||||
.set noat
|
||||
SAVE_ALL
|
||||
CLI # TEST: interrupts should be off
|
||||
.set at
|
||||
NESTED(plat_irq_dispatch, PT_SIZE, ra)
|
||||
.set noreorder
|
||||
|
||||
/*
|
||||
|
@ -282,9 +278,11 @@ fpu:
|
|||
#endif
|
||||
|
||||
spurious:
|
||||
j spurious_interrupt
|
||||
jal spurious_interrupt
|
||||
nop
|
||||
END(decstation_handle_int)
|
||||
j ret_from_irq
|
||||
nop
|
||||
END(plat_irq_dispatch)
|
||||
|
||||
/*
|
||||
* Generic unimplemented interrupt routines -- cpu_mask_nr_tbl
|
||||
|
|
|
@ -48,8 +48,6 @@ extern void dec_machine_halt(void);
|
|||
extern void dec_machine_power_off(void);
|
||||
extern irqreturn_t dec_intr_halt(int irq, void *dev_id, struct pt_regs *regs);
|
||||
|
||||
extern asmlinkage void decstation_handle_int(void);
|
||||
|
||||
unsigned long dec_kn_slot_base, dec_kn_slot_size;
|
||||
|
||||
EXPORT_SYMBOL(dec_kn_slot_base);
|
||||
|
@ -744,7 +742,6 @@ void __init arch_init_irq(void)
|
|||
panic("Don't know how to set this up!");
|
||||
break;
|
||||
}
|
||||
set_except_vector(0, decstation_handle_int);
|
||||
|
||||
/* Free the FPU interrupt if the exception is present. */
|
||||
if (!cpu_has_nofpuex) {
|
||||
|
|
|
@ -6,4 +6,4 @@
|
|||
# Makefile for the Galileo EV96100 board.
|
||||
#
|
||||
|
||||
obj-y += init.o irq.o puts.o reset.o time.o int-handler.o setup.o
|
||||
obj-y += init.o irq.o puts.o reset.o time.o setup.o
|
||||
|
|
|
@ -1,33 +0,0 @@
|
|||
#include <asm/asm.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/regdef.h>
|
||||
#include <asm/stackframe.h>
|
||||
|
||||
.set noat
|
||||
.align 5
|
||||
|
||||
NESTED(ev96100IRQ, PT_SIZE, sp)
|
||||
SAVE_ALL
|
||||
CLI # Important: mark KERNEL mode !
|
||||
|
||||
mfc0 t0, CP0_CAUSE # get pending interrupts
|
||||
mfc0 t1, CP0_STATUS # get enabled interrupts
|
||||
and t0, t1 # isolate allowed ones
|
||||
|
||||
# FIX ME add R7000 extensions
|
||||
andi t0,0xff00 # isolate pending bits
|
||||
andi a0, t0, CAUSEF_IP7
|
||||
beq a0, zero, 1f
|
||||
move a0, sp
|
||||
jal mips_timer_interrupt
|
||||
j ret_from_irq
|
||||
|
||||
1: beqz t0, 3f # spurious interrupt
|
||||
|
||||
move a0, t0
|
||||
move a1, sp
|
||||
jal ev96100_cpu_irq
|
||||
j ret_from_irq
|
||||
|
||||
3: j spurious_interrupt
|
||||
END(ev96100IRQ)
|
|
@ -40,8 +40,6 @@
|
|||
#include <linux/interrupt.h>
|
||||
#include <asm/irq_cpu.h>
|
||||
|
||||
extern asmlinkage void ev96100IRQ(void);
|
||||
|
||||
static inline unsigned int ffz8(unsigned int word)
|
||||
{
|
||||
unsigned long k;
|
||||
|
@ -54,13 +52,26 @@ static inline unsigned int ffz8(unsigned int word)
|
|||
return k;
|
||||
}
|
||||
|
||||
asmlinkage void ev96100_cpu_irq(unsigned int pendin)
|
||||
extern void mips_timer_interrupt(struct pt_regs *regs);
|
||||
|
||||
asmlinkage void ev96100_cpu_irq(unsigned int pending, struct pt_regs *regs)
|
||||
{
|
||||
do_IRQ(ffz8(pending >> 8), regs);
|
||||
}
|
||||
|
||||
asmlinkage void plat_irq_dispatch(struct pt_regs *regs)
|
||||
{
|
||||
unsigned int pending = read_c0_cause() & read_c0_status() & ST0_IM;
|
||||
|
||||
if (pending & CAUSEF_IP7)
|
||||
mips_timer_interrupt(regs);
|
||||
else if (pending)
|
||||
ev96100_cpu_irq(pending, regs);
|
||||
else
|
||||
spurious_interrupt(regs);
|
||||
}
|
||||
|
||||
void __init arch_init_irq(void)
|
||||
{
|
||||
set_except_vector(0, ev96100IRQ);
|
||||
mips_cpu_irq_init(0);
|
||||
}
|
||||
|
|
|
@ -6,6 +6,6 @@
|
|||
# Makefile for the Galileo EV64120 board.
|
||||
#
|
||||
|
||||
obj-y += int-handler.o irq.o promcon.o reset.o serialGT.o setup.o
|
||||
obj-y += irq.o promcon.o reset.o serialGT.o setup.o
|
||||
|
||||
EXTRA_AFLAGS := $(CFLAGS)
|
||||
|
|
|
@ -1,113 +0,0 @@
|
|||
/*
|
||||
* int-handler.S
|
||||
*
|
||||
* Based on the cobalt handler.
|
||||
*/
|
||||
#include <asm/asm.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/addrspace.h>
|
||||
#include <asm/regdef.h>
|
||||
#include <asm/stackframe.h>
|
||||
|
||||
/*
|
||||
* galileo_handle_int -
|
||||
* We check for the timer first, then check PCI ints A and D.
|
||||
* Then check for serial IRQ and fall through.
|
||||
*/
|
||||
.align 5
|
||||
.set reorder
|
||||
.set noat
|
||||
NESTED(galileo_handle_int, PT_SIZE, sp)
|
||||
SAVE_ALL
|
||||
CLI
|
||||
.set at
|
||||
mfc0 t0,CP0_CAUSE
|
||||
mfc0 t2,CP0_STATUS
|
||||
|
||||
and t0,t2
|
||||
|
||||
andi t1,t0,STATUSF_IP4 /* int2 hardware line (timer) */
|
||||
bnez t1,ll_gt64120_irq
|
||||
andi t1,t0,STATUSF_IP2 /* int0 hardware line */
|
||||
bnez t1,ll_pci_intA
|
||||
andi t1,t0,STATUSF_IP5 /* int3 hardware line */
|
||||
bnez t1,ll_pci_intD
|
||||
andi t1,t0,STATUSF_IP6 /* int4 hardware line */
|
||||
bnez t1,ll_serial_irq
|
||||
andi t1,t0,STATUSF_IP7 /* compare int */
|
||||
bnez t1,ll_compare_irq
|
||||
nop
|
||||
|
||||
/* wrong alarm or masked ... */
|
||||
j spurious_interrupt
|
||||
nop
|
||||
END(galileo_handle_int)
|
||||
|
||||
|
||||
.align 5
|
||||
.set reorder
|
||||
ll_gt64120_irq:
|
||||
li a0,4
|
||||
move a1,sp
|
||||
jal do_IRQ
|
||||
nop
|
||||
j ret_from_irq
|
||||
nop
|
||||
|
||||
.align 5
|
||||
.set reorder
|
||||
ll_compare_irq:
|
||||
li a0,7
|
||||
move a1,sp
|
||||
jal do_IRQ
|
||||
nop
|
||||
j ret_from_irq
|
||||
nop
|
||||
|
||||
.align 5
|
||||
.set reorder
|
||||
ll_pci_intA:
|
||||
move a0,sp
|
||||
jal pci_intA
|
||||
nop
|
||||
j ret_from_irq
|
||||
nop
|
||||
|
||||
#if 0
|
||||
.align 5
|
||||
.set reorder
|
||||
ll_pci_intB:
|
||||
move a0,sp
|
||||
jal pci_intB
|
||||
nop
|
||||
j ret_from_irq
|
||||
nop
|
||||
|
||||
.align 5
|
||||
.set reorder
|
||||
ll_pci_intC:
|
||||
move a0,sp
|
||||
jal pci_intC
|
||||
nop
|
||||
j ret_from_irq
|
||||
nop
|
||||
#endif
|
||||
|
||||
.align 5
|
||||
.set reorder
|
||||
ll_pci_intD:
|
||||
move a0,sp
|
||||
jal pci_intD
|
||||
nop
|
||||
j ret_from_irq
|
||||
nop
|
||||
|
||||
.align 5
|
||||
.set reorder
|
||||
ll_serial_irq:
|
||||
li a0,6
|
||||
move a1,sp
|
||||
jal do_IRQ
|
||||
nop
|
||||
j ret_from_irq
|
||||
nop
|
|
@ -46,14 +46,22 @@
|
|||
#include <asm/system.h>
|
||||
#include <asm/gt64120.h>
|
||||
|
||||
asmlinkage inline void pci_intA(struct pt_regs *regs)
|
||||
asmlinkage void plat_irq_dispatch(struct pt_regs *regs)
|
||||
{
|
||||
do_IRQ(GT_INTA, regs);
|
||||
}
|
||||
unsigned int pending = read_c0_status() & read_c0_cause();
|
||||
|
||||
asmlinkage inline void pci_intD(struct pt_regs *regs)
|
||||
{
|
||||
do_IRQ(GT_INTD, regs);
|
||||
if (pending & STATUSF_IP4) /* int2 hardware line (timer) */
|
||||
do_IRQ(4, regs);
|
||||
else if (pending & STATUSF_IP2) /* int0 hardware line */
|
||||
do_IRQ(GT_INTA, regs);
|
||||
else if (pending & STATUSF_IP5) /* int3 hardware line */
|
||||
do_IRQ(GT_INTD, regs);
|
||||
else if (pending & STATUSF_IP6) /* int4 hardware line */
|
||||
do_IRQ(6, regs);
|
||||
else if (pending & STATUSF_IP7) /* compare int */
|
||||
do_IRQ(7, regs);
|
||||
else
|
||||
spurious_interrupt(regs);
|
||||
}
|
||||
|
||||
static void disable_ev64120_irq(unsigned int irq_nr)
|
||||
|
@ -109,16 +117,11 @@ static struct hw_interrupt_type ev64120_irq_type = {
|
|||
|
||||
void gt64120_irq_setup(void)
|
||||
{
|
||||
extern asmlinkage void galileo_handle_int(void);
|
||||
|
||||
/*
|
||||
* Clear all of the interrupts while we change the able around a bit.
|
||||
*/
|
||||
clear_c0_status(ST0_IM);
|
||||
|
||||
/* Sets the exception_handler array. */
|
||||
set_except_vector(0, galileo_handle_int);
|
||||
|
||||
local_irq_disable();
|
||||
|
||||
/*
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# Makefile for Momentum's Ocelot board.
|
||||
#
|
||||
|
||||
obj-y += int-handler.o irq.o prom.o reset.o setup.o
|
||||
obj-y += irq.o prom.o reset.o setup.o
|
||||
|
||||
obj-$(CONFIG_KGDB) += dbg_io.o
|
||||
|
||||
|
|
|
@ -1,131 +0,0 @@
|
|||
/*
|
||||
* Copyright 2001 MontaVista Software Inc.
|
||||
* Author: jsun@mvista.com or jsun@junsun.net
|
||||
*
|
||||
* First-level interrupt dispatcher for ocelot board.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*/
|
||||
#include <asm/asm.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/addrspace.h>
|
||||
#include <asm/regdef.h>
|
||||
#include <asm/stackframe.h>
|
||||
|
||||
/*
|
||||
* first level interrupt dispatcher for ocelot board -
|
||||
* We check for the timer first, then check PCI ints A and D.
|
||||
* Then check for serial IRQ and fall through.
|
||||
*/
|
||||
.align 5
|
||||
NESTED(ocelot_handle_int, PT_SIZE, sp)
|
||||
SAVE_ALL
|
||||
CLI
|
||||
.set at
|
||||
mfc0 t0, CP0_CAUSE
|
||||
mfc0 t2, CP0_STATUS
|
||||
|
||||
and t0, t2
|
||||
|
||||
andi t1, t0, STATUSF_IP2 /* int0 hardware line */
|
||||
bnez t1, ll_pri_enet_irq
|
||||
andi t1, t0, STATUSF_IP3 /* int1 hardware line */
|
||||
bnez t1, ll_sec_enet_irq
|
||||
andi t1, t0, STATUSF_IP4 /* int2 hardware line */
|
||||
bnez t1, ll_uart1_irq
|
||||
andi t1, t0, STATUSF_IP5 /* int3 hardware line */
|
||||
bnez t1, ll_cpci_irq
|
||||
andi t1, t0, STATUSF_IP6 /* int4 hardware line */
|
||||
bnez t1, ll_galileo_irq
|
||||
andi t1, t0, STATUSF_IP7 /* cpu timer */
|
||||
bnez t1, ll_cputimer_irq
|
||||
|
||||
/* now look at the extended interrupts */
|
||||
mfc0 t0, CP0_CAUSE
|
||||
cfc0 t1, CP0_S1_INTCONTROL
|
||||
|
||||
/* shift the mask 8 bits left to line up the bits */
|
||||
sll t2, t1, 8
|
||||
|
||||
and t0, t2
|
||||
srl t0, t0, 16
|
||||
|
||||
andi t1, t0, STATUSF_IP8 /* int6 hardware line */
|
||||
bnez t1, ll_pmc1_irq
|
||||
andi t1, t0, STATUSF_IP9 /* int7 hardware line */
|
||||
bnez t1, ll_pmc2_irq
|
||||
andi t1, t0, STATUSF_IP10 /* int8 hardware line */
|
||||
bnez t1, ll_cpci_abcd_irq
|
||||
andi t1, t0, STATUSF_IP11 /* int9 hardware line */
|
||||
bnez t1, ll_uart2_irq
|
||||
|
||||
.set reorder
|
||||
|
||||
/* wrong alarm or masked ... */
|
||||
j spurious_interrupt
|
||||
nop
|
||||
END(ocelot_handle_int)
|
||||
|
||||
.align 5
|
||||
ll_pri_enet_irq:
|
||||
li a0, 2
|
||||
move a1, sp
|
||||
jal do_IRQ
|
||||
j ret_from_irq
|
||||
|
||||
ll_sec_enet_irq:
|
||||
li a0, 3
|
||||
move a1, sp
|
||||
jal do_IRQ
|
||||
j ret_from_irq
|
||||
|
||||
ll_uart1_irq:
|
||||
li a0, 4
|
||||
move a1, sp
|
||||
jal do_IRQ
|
||||
j ret_from_irq
|
||||
|
||||
ll_cpci_irq:
|
||||
li a0, 5
|
||||
move a1, sp
|
||||
jal do_IRQ
|
||||
j ret_from_irq
|
||||
|
||||
ll_galileo_irq:
|
||||
li a0, 6
|
||||
move a1, sp
|
||||
jal do_IRQ
|
||||
j ret_from_irq
|
||||
|
||||
ll_cputimer_irq:
|
||||
li a0, 7
|
||||
move a1, sp
|
||||
jal do_IRQ
|
||||
j ret_from_irq
|
||||
|
||||
ll_pmc1_irq:
|
||||
li a0, 8
|
||||
move a1, sp
|
||||
jal do_IRQ
|
||||
j ret_from_irq
|
||||
|
||||
ll_pmc2_irq:
|
||||
li a0, 9
|
||||
move a1, sp
|
||||
jal do_IRQ
|
||||
j ret_from_irq
|
||||
|
||||
ll_cpci_abcd_irq:
|
||||
li a0, 10
|
||||
move a1, sp
|
||||
jal do_IRQ
|
||||
j ret_from_irq
|
||||
|
||||
ll_uart2_irq:
|
||||
li a0, 11
|
||||
move a1, sp
|
||||
jal do_IRQ
|
||||
j ret_from_irq
|
|
@ -48,7 +48,38 @@
|
|||
#include <asm/mipsregs.h>
|
||||
#include <asm/system.h>
|
||||
|
||||
extern asmlinkage void ocelot_handle_int(void);
|
||||
asmlinkage void plat_irq_dispatch(struct pt_regs *regs)
|
||||
{
|
||||
unsigned int pending = read_c0_status() & read_c0_cause();
|
||||
|
||||
if (pending & STATUSF_IP2) /* int0 hardware line */
|
||||
do_IRQ(2, regs);
|
||||
else if (pending & STATUSF_IP3) /* int1 hardware line */
|
||||
do_IRQ(3, regs);
|
||||
else if (pending & STATUSF_IP4) /* int2 hardware line */
|
||||
do_IRQ(4, regs);
|
||||
else if (pending & STATUSF_IP5) /* int3 hardware line */
|
||||
do_IRQ(5, regs);
|
||||
else if (pending & STATUSF_IP6) /* int4 hardware line */
|
||||
do_IRQ(6, regs);
|
||||
else if (pending & STATUSF_IP7) /* cpu timer */
|
||||
do_IRQ(7, regs);
|
||||
else {
|
||||
/*
|
||||
* Now look at the extended interrupts
|
||||
*/
|
||||
pending = (read_c0_cause() & (read_c0_intcontrol() << 8)) >> 16;
|
||||
|
||||
if (pending & STATUSF_IP8) /* int6 hardware line */
|
||||
do_IRQ(8, regs);
|
||||
else if (pending & STATUSF_IP9) /* int7 hardware line */
|
||||
do_IRQ(9, regs);
|
||||
else if (pending & STATUSF_IP10) /* int8 hardware line */
|
||||
do_IRQ(10, regs);
|
||||
else if (pending & STATUSF_IP11) /* int9 hardware line */
|
||||
do_IRQ(11, regs);
|
||||
}
|
||||
}
|
||||
|
||||
void __init arch_init_irq(void)
|
||||
{
|
||||
|
@ -59,9 +90,6 @@ void __init arch_init_irq(void)
|
|||
clear_c0_status(ST0_IM);
|
||||
local_irq_disable();
|
||||
|
||||
/* Sets the first-level interrupt dispatcher. */
|
||||
set_except_vector(0, ocelot_handle_int);
|
||||
|
||||
mips_cpu_irq_init(0);
|
||||
rm7k_cpu_irq_init(8);
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
# Makefile for the ITE 8172 (qed-4n-s01b) board, generic files.
|
||||
#
|
||||
|
||||
obj-y += it8172_setup.o irq.o int-handler.o pmon_prom.o \
|
||||
obj-y += it8172_setup.o irq.o pmon_prom.o \
|
||||
time.o lpc.o puts.o reset.o
|
||||
|
||||
obj-$(CONFIG_IT8172_CIR)+= it8172_cir.o
|
||||
|
|
|
@ -1,63 +0,0 @@
|
|||
#include <asm/asm.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/regdef.h>
|
||||
#include <asm/stackframe.h>
|
||||
|
||||
.text
|
||||
.set macro
|
||||
.set noat
|
||||
.align 5
|
||||
|
||||
NESTED(it8172_IRQ, PT_SIZE, sp)
|
||||
SAVE_ALL
|
||||
CLI # Important: mark KERNEL mode !
|
||||
|
||||
/* We're working with 'reorder' set at this point. */
|
||||
/*
|
||||
* Get pending interrupts
|
||||
*/
|
||||
|
||||
mfc0 t0,CP0_CAUSE # get pending interrupts
|
||||
mfc0 t1,CP0_STATUS # get enabled interrupts
|
||||
and t0,t1 # isolate allowed ones
|
||||
|
||||
andi t0,0xff00 # isolate pending bits
|
||||
beqz t0, 3f # spurious interrupt
|
||||
|
||||
andi a0, t0, CAUSEF_IP7
|
||||
beq a0, zero, 1f
|
||||
|
||||
li a0, 127 # MIPS_CPU_TIMER_IRQ = (NR_IRQS-1)
|
||||
move a1, sp
|
||||
jal ll_timer_interrupt
|
||||
j ret_from_irq
|
||||
nop
|
||||
|
||||
1:
|
||||
andi a0, t0, CAUSEF_IP2 # the only int we expect at this time
|
||||
beq a0, zero, 3f
|
||||
move a0,sp
|
||||
jal it8172_hw0_irqdispatch
|
||||
|
||||
mfc0 t0,CP0_STATUS # disable interrupts
|
||||
ori t0,1
|
||||
xori t0,1
|
||||
mtc0 t0,CP0_STATUS
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
|
||||
la a1, ret_from_irq
|
||||
jr a1
|
||||
nop
|
||||
|
||||
3:
|
||||
move a0, sp
|
||||
jal mips_spurious_interrupt
|
||||
nop
|
||||
la a1, ret_from_irq
|
||||
jr a1
|
||||
nop
|
||||
|
||||
END(it8172_IRQ)
|
||||
|
|
@ -62,12 +62,8 @@
|
|||
|
||||
#define ALLINTS_NOTIMER (IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4)
|
||||
|
||||
void disable_it8172_irq(unsigned int irq_nr);
|
||||
void enable_it8172_irq(unsigned int irq_nr);
|
||||
|
||||
extern void set_debug_traps(void);
|
||||
extern void mips_timer_interrupt(int irq, struct pt_regs *regs);
|
||||
extern asmlinkage void it8172_IRQ(void);
|
||||
|
||||
struct it8172_intc_regs volatile *it8172_hw0_icregs =
|
||||
(struct it8172_intc_regs volatile *)(KSEG1ADDR(IT8172_PCI_IO_BASE + IT_INTC_BASE));
|
||||
|
@ -181,8 +177,6 @@ void __init arch_init_irq(void)
|
|||
int i;
|
||||
unsigned long flags;
|
||||
|
||||
set_except_vector(0, it8172_IRQ);
|
||||
|
||||
/* mask all interrupts */
|
||||
it8172_hw0_icregs->lb_mask = 0xffff;
|
||||
it8172_hw0_icregs->lpc_mask = 0xffff;
|
||||
|
@ -282,6 +276,18 @@ void it8172_hw0_irqdispatch(struct pt_regs *regs)
|
|||
do_IRQ(irq, regs);
|
||||
}
|
||||
|
||||
asmlinkage void plat_irq_dispatch(struct pt_regs *regs)
|
||||
{
|
||||
unsigned int pending = read_c0_cause() & read_c0_status() & ST0_IM;
|
||||
|
||||
if (!pending)
|
||||
mips_spurious_interrupt(regs);
|
||||
else if (pending & CAUSEF_IP7)
|
||||
ll_timer_interrupt(127, regs);
|
||||
else if (pending & CAUSEF_IP2)
|
||||
it8172_hw0_irqdispatch(regs);
|
||||
}
|
||||
|
||||
void show_pending_irqs(void)
|
||||
{
|
||||
fputs("intstatus: ");
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include <linux/sched.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/mc146818rtc.h>
|
||||
|
||||
#include <asm/time.h>
|
||||
#include <asm/mipsregs.h>
|
||||
|
|
|
@ -34,13 +34,13 @@
|
|||
#include <asm/bootinfo.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/it8172/it8172.h>
|
||||
#include <asm/it8172/it8172_dbg.h>
|
||||
|
||||
int prom_argc;
|
||||
char **prom_argv, **prom_envp;
|
||||
|
||||
extern char _end;
|
||||
extern void __init prom_init_cmdline(void);
|
||||
extern unsigned long __init prom_get_memsize(void);
|
||||
extern void __init it8172_init_ram_resource(unsigned long memsize);
|
||||
|
|
|
@ -34,13 +34,13 @@
|
|||
#include <asm/bootinfo.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/it8172/it8172.h>
|
||||
#include <asm/it8172/it8172_dbg.h>
|
||||
|
||||
int prom_argc;
|
||||
char **prom_argv, **prom_envp;
|
||||
|
||||
extern char _end;
|
||||
extern void __init prom_init_cmdline(void);
|
||||
extern unsigned long __init prom_get_memsize(void);
|
||||
extern void __init it8172_init_ram_resource(unsigned long memsize);
|
||||
|
|
|
@ -2,6 +2,6 @@
|
|||
# Makefile for the Jazz family specific parts of the kernel
|
||||
#
|
||||
|
||||
obj-y := int-handler.o irq.o jazzdma.o reset.o setup.o
|
||||
obj-y := irq.o jazzdma.o reset.o setup.o
|
||||
|
||||
EXTRA_AFLAGS := $(CFLAGS)
|
||||
|
|
|
@ -1,282 +0,0 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 1995, 1996, 1997, 1998 by Ralf Baechle and Andreas Busse
|
||||
*
|
||||
* Jazz family specific interrupt stuff
|
||||
*
|
||||
* To do: On Jazz machines we remap some non-ISA interrupts to ISA
|
||||
* interrupts. These interrupts should use their own vectors.
|
||||
* Squeeze the last cycles out of the handlers. Only a dead
|
||||
* cycle is a good cycle.
|
||||
*/
|
||||
#include <asm/asm.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/jazz.h>
|
||||
#include <asm/regdef.h>
|
||||
#include <asm/stackframe.h>
|
||||
|
||||
/*
|
||||
* jazz_handle_int: Interrupt handler for the ACER Pica-61 boards
|
||||
*/
|
||||
.set noreorder
|
||||
|
||||
NESTED(jazz_handle_int, PT_SIZE, ra)
|
||||
.set noat
|
||||
SAVE_ALL
|
||||
CLI
|
||||
.set at
|
||||
|
||||
/*
|
||||
* Get pending interrupts
|
||||
*/
|
||||
mfc0 t0,CP0_CAUSE # get pending interrupts
|
||||
mfc0 t1,CP0_STATUS # get enabled interrupts
|
||||
and t0,t1 # isolate allowed ones
|
||||
andi t0,0xff00 # isolate pending bits
|
||||
beqz t0,3f
|
||||
sll t0,16 # delay slot
|
||||
|
||||
/*
|
||||
* Find irq with highest priority
|
||||
* FIXME: This is slow - use binary search
|
||||
*/
|
||||
la t1,ll_vectors
|
||||
1: bltz t0,2f # found pending irq
|
||||
sll t0,1
|
||||
b 1b
|
||||
subu t1,PTRSIZE # delay slot
|
||||
|
||||
/*
|
||||
* Do the low-level stuff
|
||||
*/
|
||||
2: lw t0,(t1)
|
||||
jr t0
|
||||
nop # delay slot
|
||||
END(jazz_handle_int)
|
||||
|
||||
ll_sw0: li s1,~IE_SW0
|
||||
mfc0 t0,CP0_CAUSE
|
||||
and t0,s1
|
||||
mtc0 t0,CP0_CAUSE
|
||||
PANIC("Unimplemented sw0 handler")
|
||||
|
||||
ll_sw1: li s1,~IE_SW1
|
||||
mfc0 t0,CP0_CAUSE
|
||||
and t0,s1
|
||||
mtc0 t0,CP0_CAUSE
|
||||
PANIC("Unimplemented sw1 handler")
|
||||
|
||||
ll_local_dma: li s1,~IE_IRQ0
|
||||
PANIC("Unimplemented local_dma handler")
|
||||
|
||||
ll_local_dev: lbu t0,JAZZ_IO_IRQ_SOURCE
|
||||
#if PTRSIZE == 8 /* True 64 bit kernel */
|
||||
dsll t0,1
|
||||
#endif
|
||||
.set reorder
|
||||
LONG_L t0,local_vector(t0)
|
||||
jr t0
|
||||
.set noreorder
|
||||
|
||||
/*
|
||||
* The braindead PICA hardware gives us no way to distinguish if we really
|
||||
* received interrupt 7 from the (E)ISA bus or if we just received an
|
||||
* interrupt with no findable cause. This sometimes happens with braindead
|
||||
* cards. Oh well - for all the Jazz boxes slots are more or less just
|
||||
* whistles and bells and we're aware of the problem.
|
||||
*/
|
||||
ll_isa_irq: lw a0, JAZZ_EISA_IRQ_ACK
|
||||
|
||||
jal do_IRQ
|
||||
move a1,sp
|
||||
|
||||
j ret_from_irq
|
||||
nop
|
||||
|
||||
/*
|
||||
* Hmm... This is not just a plain PC clone so the question is
|
||||
* which devices on Jazz machines can generate an (E)ISA NMI?
|
||||
* (Writing to nonexistent memory?)
|
||||
*/
|
||||
ll_isa_nmi: li s1,~IE_IRQ3
|
||||
PANIC("Unimplemented isa_nmi handler")
|
||||
|
||||
/*
|
||||
* Timer IRQ - remapped to be more similar to an IBM compatible.
|
||||
*
|
||||
* The timer interrupt is handled specially to ensure that the jiffies
|
||||
* variable is updated at all times. Specifically, the timer interrupt is
|
||||
* just like the complete handlers except that it is invoked with interrupts
|
||||
* disabled and should never re-enable them. If other interrupts were
|
||||
* allowed to be processed while the timer interrupt is active, then the
|
||||
* other interrupts would have to avoid using the jiffies variable for delay
|
||||
* and interval timing operations to avoid hanging the system.
|
||||
*/
|
||||
ll_timer: lw zero,JAZZ_TIMER_REGISTER # timer irq cleared on read
|
||||
li s1,~IE_IRQ4
|
||||
|
||||
li a0, JAZZ_TIMER_IRQ
|
||||
jal do_IRQ
|
||||
move a1,sp
|
||||
|
||||
mfc0 t0,CP0_STATUS # disable interrupts again
|
||||
ori t0,1
|
||||
xori t0,1
|
||||
mtc0 t0,CP0_STATUS
|
||||
|
||||
j ret_from_irq
|
||||
nop
|
||||
|
||||
/*
|
||||
* CPU count/compare IRQ (unused)
|
||||
*/
|
||||
ll_count: j ret_from_irq
|
||||
mtc0 zero,CP0_COMPARE
|
||||
|
||||
#if 0
|
||||
/*
|
||||
* Call the handler for the interrupt
|
||||
* (Currently unused)
|
||||
*/
|
||||
call_real: /*
|
||||
* temporarily disable interrupt
|
||||
*/
|
||||
mfc0 t2,CP0_STATUS
|
||||
and t2,s1
|
||||
mtc0 t2,CP0_STATUS
|
||||
nor s1,zero,s1
|
||||
jal do_IRQ
|
||||
|
||||
/*
|
||||
* reenable interrupt
|
||||
*/
|
||||
mfc0 t2,CP0_STATUS
|
||||
or t2,s1
|
||||
mtc0 t2,CP0_STATUS
|
||||
j ret_from_irq
|
||||
#endif
|
||||
|
||||
.data
|
||||
PTR ll_sw0 # SW0
|
||||
PTR ll_sw1 # SW1
|
||||
PTR ll_local_dma # Local DMA
|
||||
PTR ll_local_dev # Local devices
|
||||
PTR ll_isa_irq # ISA IRQ
|
||||
PTR ll_isa_nmi # ISA NMI
|
||||
PTR ll_timer # Timer
|
||||
ll_vectors: PTR ll_count # Count/Compare IRQ
|
||||
|
||||
/*
|
||||
* Interrupt handlers for local devices.
|
||||
*/
|
||||
.text
|
||||
.set reorder
|
||||
loc_no_irq: PANIC("Unimplemented loc_no_irq handler")
|
||||
/*
|
||||
* Parallel port IRQ
|
||||
*/
|
||||
loc_parallel: li s1,~JAZZ_IE_PARALLEL
|
||||
li a0,JAZZ_PARALLEL_IRQ
|
||||
b loc_call
|
||||
|
||||
/*
|
||||
* Floppy IRQ
|
||||
*/
|
||||
loc_floppy: li s1,~JAZZ_IE_FLOPPY
|
||||
li a0,JAZZ_FLOPPY_IRQ
|
||||
b loc_call
|
||||
|
||||
/*
|
||||
* Sound IRQ
|
||||
*/
|
||||
loc_sound: PANIC("Unimplemented loc_sound handler")
|
||||
loc_video: PANIC("Unimplemented loc_video handler")
|
||||
|
||||
/*
|
||||
* Ethernet interrupt handler
|
||||
*/
|
||||
loc_ethernet: li s1,~JAZZ_IE_ETHERNET
|
||||
li a0,JAZZ_ETHERNET_IRQ
|
||||
b loc_call
|
||||
|
||||
/*
|
||||
* SCSI interrupt handler
|
||||
*/
|
||||
loc_scsi: li s1,~JAZZ_IE_SCSI
|
||||
li a0,JAZZ_SCSI_IRQ
|
||||
b loc_call
|
||||
|
||||
/*
|
||||
* Keyboard interrupt handler
|
||||
*/
|
||||
loc_keyboard: li s1,~JAZZ_IE_KEYBOARD
|
||||
li a0,JAZZ_KEYBOARD_IRQ
|
||||
b loc_call
|
||||
|
||||
/*
|
||||
* Mouse interrupt handler
|
||||
*/
|
||||
loc_mouse: li s1,~JAZZ_IE_MOUSE
|
||||
li a0,JAZZ_MOUSE_IRQ
|
||||
b loc_call
|
||||
|
||||
/*
|
||||
* Serial port 1 IRQ
|
||||
*/
|
||||
loc_serial1: li s1,~JAZZ_IE_SERIAL1
|
||||
li a0,JAZZ_SERIAL1_IRQ
|
||||
b loc_call
|
||||
|
||||
/*
|
||||
* Serial port 2 IRQ
|
||||
*/
|
||||
loc_serial2: li s1,~JAZZ_IE_SERIAL2
|
||||
li a0,JAZZ_SERIAL2_IRQ
|
||||
b loc_call
|
||||
|
||||
/*
|
||||
* Call the interrupt handler for an interrupt generated by a
|
||||
* local device.
|
||||
*/
|
||||
loc_call: /*
|
||||
* Temporarily disable interrupt source
|
||||
*/
|
||||
lhu t2,JAZZ_IO_IRQ_ENABLE
|
||||
and t2,s1
|
||||
sh t2,JAZZ_IO_IRQ_ENABLE
|
||||
|
||||
nor s1,zero,s1
|
||||
jal do_IRQ
|
||||
|
||||
/*
|
||||
* Reenable interrupt
|
||||
*/
|
||||
lhu t2,JAZZ_IO_IRQ_ENABLE
|
||||
or t2,s1
|
||||
sh t2,JAZZ_IO_IRQ_ENABLE
|
||||
|
||||
j ret_from_irq
|
||||
|
||||
/*
|
||||
* "Jump extender" to reach spurious_interrupt
|
||||
*/
|
||||
3: j spurious_interrupt
|
||||
|
||||
/*
|
||||
* Vectors for interrupts generated by local devices
|
||||
*/
|
||||
.data
|
||||
local_vector: PTR loc_no_irq
|
||||
PTR loc_parallel
|
||||
PTR loc_floppy
|
||||
PTR loc_sound
|
||||
PTR loc_video
|
||||
PTR loc_ethernet
|
||||
PTR loc_scsi
|
||||
PTR loc_keyboard
|
||||
PTR loc_mouse
|
||||
PTR loc_serial1
|
||||
PTR loc_serial2
|
|
@ -15,8 +15,6 @@
|
|||
#include <asm/io.h>
|
||||
#include <asm/jazz.h>
|
||||
|
||||
extern asmlinkage void jazz_handle_int(void);
|
||||
|
||||
static DEFINE_SPINLOCK(r4030_lock);
|
||||
|
||||
static void enable_r4030_irq(unsigned int irq)
|
||||
|
@ -90,10 +88,82 @@ void __init init_r4030_ints(void)
|
|||
*/
|
||||
void __init arch_init_irq(void)
|
||||
{
|
||||
set_except_vector(0, jazz_handle_int);
|
||||
|
||||
init_i8259_irqs(); /* Integrated i8259 */
|
||||
init_r4030_ints();
|
||||
|
||||
change_c0_status(ST0_IM, IE_IRQ4 | IE_IRQ3 | IE_IRQ2 | IE_IRQ1);
|
||||
}
|
||||
|
||||
static void loc_call(unsigned int irq, struct pt_regs *regs, unsigned int mask)
|
||||
{
|
||||
r4030_write_reg16(JAZZ_IO_IRQ_ENABLE,
|
||||
r4030_read_reg16(JAZZ_IO_IRQ_ENABLE) & mask);
|
||||
do_IRQ(irq, regs);
|
||||
r4030_write_reg16(JAZZ_IO_IRQ_ENABLE,
|
||||
r4030_read_reg16(JAZZ_IO_IRQ_ENABLE) | mask);
|
||||
}
|
||||
|
||||
static void ll_local_dev(struct pt_regs *regs)
|
||||
{
|
||||
switch (r4030_read_reg32(JAZZ_IO_IRQ_SOURCE)) {
|
||||
case 0:
|
||||
panic("Unimplemented loc_no_irq handler");
|
||||
break;
|
||||
case 4:
|
||||
loc_call(JAZZ_PARALLEL_IRQ, regs, JAZZ_IE_PARALLEL);
|
||||
break;
|
||||
case 8:
|
||||
loc_call(JAZZ_PARALLEL_IRQ, regs, JAZZ_IE_FLOPPY);
|
||||
break;
|
||||
case 12:
|
||||
panic("Unimplemented loc_sound handler");
|
||||
break;
|
||||
case 16:
|
||||
panic("Unimplemented loc_video handler");
|
||||
break;
|
||||
case 20:
|
||||
loc_call(JAZZ_ETHERNET_IRQ, regs, JAZZ_IE_ETHERNET);
|
||||
break;
|
||||
case 24:
|
||||
loc_call(JAZZ_SCSI_IRQ, regs, JAZZ_IE_SCSI);
|
||||
break;
|
||||
case 28:
|
||||
loc_call(JAZZ_KEYBOARD_IRQ, regs, JAZZ_IE_KEYBOARD);
|
||||
break;
|
||||
case 32:
|
||||
loc_call(JAZZ_MOUSE_IRQ, regs, JAZZ_IE_MOUSE);
|
||||
break;
|
||||
case 36:
|
||||
loc_call(JAZZ_SERIAL1_IRQ, regs, JAZZ_IE_SERIAL1);
|
||||
break;
|
||||
case 40:
|
||||
loc_call(JAZZ_SERIAL2_IRQ, regs, JAZZ_IE_SERIAL2);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
asmlinkage void plat_irq_dispatch(struct pt_regs *regs)
|
||||
{
|
||||
unsigned int pending = read_c0_cause() & read_c0_status() & ST0_IM;
|
||||
|
||||
if (pending & IE_IRQ5)
|
||||
write_c0_compare(0);
|
||||
else if (pending & IE_IRQ4) {
|
||||
r4030_read_reg32(JAZZ_TIMER_REGISTER);
|
||||
do_IRQ(JAZZ_TIMER_IRQ, regs);
|
||||
} else if (pending & IE_IRQ3)
|
||||
panic("Unimplemented ISA NMI handler");
|
||||
else if (pending & IE_IRQ2)
|
||||
do_IRQ(r4030_read_reg32(JAZZ_EISA_IRQ_ACK), regs);
|
||||
else if (pending & IE_IRQ1) {
|
||||
ll_local_dev(regs);
|
||||
} else if (unlikely(pending & IE_IRQ0))
|
||||
panic("Unimplemented local_dma handler");
|
||||
else if (pending & IE_SW1) {
|
||||
clear_c0_cause(IE_SW1);
|
||||
panic("Unimplemented sw1 handler");
|
||||
} else if (pending & IE_SW0) {
|
||||
clear_c0_cause(IE_SW0);
|
||||
panic("Unimplemented sw0 handler");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -60,15 +60,15 @@ rtc_ds1742_get_time(void)
|
|||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rtc_lock, flags);
|
||||
CMOS_WRITE(RTC_READ, RTC_CONTROL);
|
||||
second = BCD2BIN(CMOS_READ(RTC_SECONDS) & RTC_SECONDS_MASK);
|
||||
minute = BCD2BIN(CMOS_READ(RTC_MINUTES));
|
||||
hour = BCD2BIN(CMOS_READ(RTC_HOURS));
|
||||
day = BCD2BIN(CMOS_READ(RTC_DATE));
|
||||
month = BCD2BIN(CMOS_READ(RTC_MONTH));
|
||||
year = BCD2BIN(CMOS_READ(RTC_YEAR));
|
||||
century = BCD2BIN(CMOS_READ(RTC_CENTURY) & RTC_CENTURY_MASK);
|
||||
CMOS_WRITE(0, RTC_CONTROL);
|
||||
rtc_write(RTC_READ, RTC_CONTROL);
|
||||
second = BCD2BIN(rtc_read(RTC_SECONDS) & RTC_SECONDS_MASK);
|
||||
minute = BCD2BIN(rtc_read(RTC_MINUTES));
|
||||
hour = BCD2BIN(rtc_read(RTC_HOURS));
|
||||
day = BCD2BIN(rtc_read(RTC_DATE));
|
||||
month = BCD2BIN(rtc_read(RTC_MONTH));
|
||||
year = BCD2BIN(rtc_read(RTC_YEAR));
|
||||
century = BCD2BIN(rtc_read(RTC_CENTURY) & RTC_CENTURY_MASK);
|
||||
rtc_write(0, RTC_CONTROL);
|
||||
spin_unlock_irqrestore(&rtc_lock, flags);
|
||||
|
||||
year += century * 100;
|
||||
|
@ -87,16 +87,16 @@ rtc_ds1742_set_time(unsigned long t)
|
|||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rtc_lock, flags);
|
||||
CMOS_WRITE(RTC_READ, RTC_CONTROL);
|
||||
cmos_second = (u8)(CMOS_READ(RTC_SECONDS) & RTC_SECONDS_MASK);
|
||||
cmos_minute = (u8)CMOS_READ(RTC_MINUTES);
|
||||
cmos_hour = (u8)CMOS_READ(RTC_HOURS);
|
||||
cmos_day = (u8)CMOS_READ(RTC_DATE);
|
||||
cmos_month = (u8)CMOS_READ(RTC_MONTH);
|
||||
cmos_year = (u8)CMOS_READ(RTC_YEAR);
|
||||
cmos_century = CMOS_READ(RTC_CENTURY) & RTC_CENTURY_MASK;
|
||||
rtc_write(RTC_READ, RTC_CONTROL);
|
||||
cmos_second = (u8)(rtc_read(RTC_SECONDS) & RTC_SECONDS_MASK);
|
||||
cmos_minute = (u8)rtc_read(RTC_MINUTES);
|
||||
cmos_hour = (u8)rtc_read(RTC_HOURS);
|
||||
cmos_day = (u8)rtc_read(RTC_DATE);
|
||||
cmos_month = (u8)rtc_read(RTC_MONTH);
|
||||
cmos_year = (u8)rtc_read(RTC_YEAR);
|
||||
cmos_century = rtc_read(RTC_CENTURY) & RTC_CENTURY_MASK;
|
||||
|
||||
CMOS_WRITE(RTC_WRITE, RTC_CONTROL);
|
||||
rtc_write(RTC_WRITE, RTC_CONTROL);
|
||||
|
||||
/* convert */
|
||||
to_tm(t, &tm);
|
||||
|
@ -104,18 +104,18 @@ rtc_ds1742_set_time(unsigned long t)
|
|||
/* check each field one by one */
|
||||
year = BIN2BCD(tm.tm_year - EPOCH);
|
||||
if (year != cmos_year) {
|
||||
CMOS_WRITE(year,RTC_YEAR);
|
||||
rtc_write(year,RTC_YEAR);
|
||||
}
|
||||
|
||||
month = BIN2BCD(tm.tm_mon);
|
||||
if (month != (cmos_month & 0x1f)) {
|
||||
CMOS_WRITE((month & 0x1f) | (cmos_month & ~0x1f),RTC_MONTH);
|
||||
rtc_write((month & 0x1f) | (cmos_month & ~0x1f),RTC_MONTH);
|
||||
}
|
||||
|
||||
day = BIN2BCD(tm.tm_mday);
|
||||
if (day != cmos_day) {
|
||||
|
||||
CMOS_WRITE(day, RTC_DATE);
|
||||
rtc_write(day, RTC_DATE);
|
||||
}
|
||||
|
||||
if (cmos_hour & 0x40) {
|
||||
|
@ -130,20 +130,20 @@ rtc_ds1742_set_time(unsigned long t)
|
|||
/* 24 hour format */
|
||||
hour = BIN2BCD(tm.tm_hour) & 0x3f;
|
||||
}
|
||||
if (hour != cmos_hour) CMOS_WRITE(hour, RTC_HOURS);
|
||||
if (hour != cmos_hour) rtc_write(hour, RTC_HOURS);
|
||||
|
||||
minute = BIN2BCD(tm.tm_min);
|
||||
if (minute != cmos_minute) {
|
||||
CMOS_WRITE(minute, RTC_MINUTES);
|
||||
rtc_write(minute, RTC_MINUTES);
|
||||
}
|
||||
|
||||
second = BIN2BCD(tm.tm_sec);
|
||||
if (second != cmos_second) {
|
||||
CMOS_WRITE(second & RTC_SECONDS_MASK,RTC_SECONDS);
|
||||
rtc_write(second & RTC_SECONDS_MASK,RTC_SECONDS);
|
||||
}
|
||||
|
||||
/* RTC_CENTURY and RTC_CONTROL share same address... */
|
||||
CMOS_WRITE(cmos_century, RTC_CONTROL);
|
||||
rtc_write(cmos_century, RTC_CONTROL);
|
||||
spin_unlock_irqrestore(&rtc_lock, flags);
|
||||
|
||||
return 0;
|
||||
|
@ -163,9 +163,9 @@ rtc_ds1742_init(unsigned long base)
|
|||
rtc_mips_set_time = rtc_ds1742_set_time;
|
||||
|
||||
/* clear oscillator stop bit */
|
||||
CMOS_WRITE(RTC_READ, RTC_CONTROL);
|
||||
cmos_second = (u8)(CMOS_READ(RTC_SECONDS) & RTC_SECONDS_MASK);
|
||||
CMOS_WRITE(RTC_WRITE, RTC_CONTROL);
|
||||
CMOS_WRITE(cmos_second, RTC_SECONDS); /* clear msb */
|
||||
CMOS_WRITE(0, RTC_CONTROL);
|
||||
rtc_write(RTC_READ, RTC_CONTROL);
|
||||
cmos_second = (u8)(rtc_read(RTC_SECONDS) & RTC_SECONDS_MASK);
|
||||
rtc_write(RTC_WRITE, RTC_CONTROL);
|
||||
rtc_write(cmos_second, RTC_SECONDS); /* clear msb */
|
||||
rtc_write(0, RTC_CONTROL);
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# Makefile for TOSHIBA JMR-TX3927 board
|
||||
#
|
||||
|
||||
obj-y += init.o int-handler.o irq.o setup.o
|
||||
obj-y += init.o irq.o setup.o
|
||||
obj-$(CONFIG_RUNTIME_DEBUG) += debug.o
|
||||
obj-$(CONFIG_KGDB) += kgdb_io.o
|
||||
|
||||
|
|
|
@ -1,74 +0,0 @@
|
|||
/*
|
||||
* Copyright 2001 MontaVista Software Inc.
|
||||
* Author: MontaVista Software, Inc.
|
||||
* ahennessy@mvista.com
|
||||
*
|
||||
* Based on arch/mips/tsdb/kernel/int-handler.S
|
||||
*
|
||||
* Copyright (C) 2000-2001 Toshiba Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
||||
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
|
||||
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
|
||||
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*/
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/regdef.h>
|
||||
#include <asm/stackframe.h>
|
||||
#include <asm/jmr3927/jmr3927.h>
|
||||
|
||||
/* A lot of complication here is taken away because:
|
||||
*
|
||||
* 1) We handle one interrupt and return, sitting in a loop
|
||||
* and moving across all the pending IRQ bits in the cause
|
||||
* register is _NOT_ the answer, the common case is one
|
||||
* pending IRQ so optimize in that direction.
|
||||
*
|
||||
* 2) We need not check against bits in the status register
|
||||
* IRQ mask, that would make this routine slow as hell.
|
||||
*
|
||||
* 3) Linux only thinks in terms of all IRQs on or all IRQs
|
||||
* off, nothing in between like BSD spl() brain-damage.
|
||||
*
|
||||
*/
|
||||
|
||||
/* Flush write buffer (needed?)
|
||||
* NOTE: TX39xx performs "non-blocking load", so explicitly use the target
|
||||
* register of LBU to flush immediately.
|
||||
*/
|
||||
#define FLUSH_WB(tmp) \
|
||||
la tmp, JMR3927_IOC_REV_ADDR; \
|
||||
lbu tmp, (tmp); \
|
||||
move tmp, zero;
|
||||
|
||||
.text
|
||||
.set noreorder
|
||||
.set noat
|
||||
.align 5
|
||||
NESTED(jmr3927_IRQ, PT_SIZE, sp)
|
||||
SAVE_ALL
|
||||
CLI
|
||||
.set at
|
||||
jal jmr3927_irc_irqdispatch
|
||||
move a0, sp
|
||||
FLUSH_WB(t0)
|
||||
j ret_from_irq
|
||||
nop
|
||||
END(jmr3927_IRQ)
|
|
@ -77,8 +77,6 @@ static int jmr3927_gen_iack(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
extern asmlinkage void jmr3927_IRQ(void);
|
||||
|
||||
#define irc_dlevel 0
|
||||
#define irc_elevel 1
|
||||
|
||||
|
@ -262,7 +260,7 @@ void jmr3927_spurious(struct pt_regs *regs)
|
|||
regs->cp0_cause, regs->cp0_epc, regs->regs[31]);
|
||||
}
|
||||
|
||||
void jmr3927_irc_irqdispatch(struct pt_regs *regs)
|
||||
asmlinkage void plat_irq_dispatch(struct pt_regs *regs)
|
||||
{
|
||||
int irq;
|
||||
|
||||
|
@ -398,8 +396,6 @@ void __init arch_init_irq(void)
|
|||
|
||||
jmr3927_irq_init(NR_ISA_IRQS);
|
||||
|
||||
set_except_vector(0, jmr3927_IRQ);
|
||||
|
||||
/* setup irq space */
|
||||
add_tb_irq_space(&jmr3927_isac_irqspace);
|
||||
add_tb_irq_space(&jmr3927_ioc_irqspace);
|
||||
|
|
|
@ -34,8 +34,11 @@ obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o
|
|||
|
||||
obj-$(CONFIG_SMP) += smp.o
|
||||
|
||||
obj-$(CONFIG_MIPS_MT_SMP) += smp_mt.o
|
||||
obj-$(CONFIG_MIPS_MT) += mips-mt.o
|
||||
obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o
|
||||
obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o
|
||||
|
||||
obj-$(CONFIG_MIPS_APSP_KSPD) += kspd.o
|
||||
obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o
|
||||
obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o
|
||||
|
||||
|
|
|
@ -69,6 +69,9 @@ void output_ptreg_defines(void)
|
|||
offset("#define PT_BVADDR ", struct pt_regs, cp0_badvaddr);
|
||||
offset("#define PT_STATUS ", struct pt_regs, cp0_status);
|
||||
offset("#define PT_CAUSE ", struct pt_regs, cp0_cause);
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
offset("#define PT_TCSTATUS ", struct pt_regs, cp0_tcstatus);
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
size("#define PT_SIZE ", struct pt_regs);
|
||||
linefeed;
|
||||
}
|
||||
|
|
|
@ -17,6 +17,9 @@
|
|||
#include <asm/isadep.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/war.h>
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
#include <asm/mipsmtregs.h>
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
.macro preempt_stop
|
||||
|
@ -75,6 +78,37 @@ FEXPORT(syscall_exit)
|
|||
bnez t0, syscall_exit_work
|
||||
|
||||
FEXPORT(restore_all) # restore full frame
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/* Detect and execute deferred IPI "interrupts" */
|
||||
move a0,sp
|
||||
jal deferred_smtc_ipi
|
||||
/* Re-arm any temporarily masked interrupts not explicitly "acked" */
|
||||
mfc0 v0, CP0_TCSTATUS
|
||||
ori v1, v0, TCSTATUS_IXMT
|
||||
mtc0 v1, CP0_TCSTATUS
|
||||
andi v0, TCSTATUS_IXMT
|
||||
ehb
|
||||
mfc0 t0, CP0_TCCONTEXT
|
||||
DMT 9 # dmt t1
|
||||
jal mips_ihb
|
||||
mfc0 t2, CP0_STATUS
|
||||
andi t3, t0, 0xff00
|
||||
or t2, t2, t3
|
||||
mtc0 t2, CP0_STATUS
|
||||
ehb
|
||||
andi t1, t1, VPECONTROL_TE
|
||||
beqz t1, 1f
|
||||
EMT
|
||||
1:
|
||||
mfc0 v1, CP0_TCSTATUS
|
||||
/* We set IXMT above, XOR should cler it here */
|
||||
xori v1, v1, TCSTATUS_IXMT
|
||||
or v1, v0, v1
|
||||
mtc0 v1, CP0_TCSTATUS
|
||||
ehb
|
||||
xor t0, t0, t3
|
||||
mtc0 t0, CP0_TCCONTEXT
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
.set noat
|
||||
RESTORE_TEMP
|
||||
RESTORE_AT
|
||||
|
@ -120,28 +154,17 @@ syscall_exit_work:
|
|||
jal do_syscall_trace
|
||||
b resume_userspace
|
||||
|
||||
#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT)
|
||||
|
||||
/*
|
||||
* Common spurious interrupt handler.
|
||||
* MIPS32R2 Instruction Hazard Barrier - must be called
|
||||
*
|
||||
* For C code use the inline version named instruction_hazard().
|
||||
*/
|
||||
LEAF(spurious_interrupt)
|
||||
/*
|
||||
* Someone tried to fool us by sending an interrupt but we
|
||||
* couldn't find a cause for it.
|
||||
*/
|
||||
PTR_LA t1, irq_err_count
|
||||
#ifdef CONFIG_SMP
|
||||
1: ll t0, (t1)
|
||||
addiu t0, 1
|
||||
sc t0, (t1)
|
||||
#if R10000_LLSC_WAR
|
||||
beqzl t0, 1b
|
||||
#else
|
||||
beqz t0, 1b
|
||||
#endif
|
||||
#else
|
||||
lw t0, (t1)
|
||||
addiu t0, 1
|
||||
sw t0, (t1)
|
||||
#endif
|
||||
j ret_from_irq
|
||||
END(spurious_interrupt)
|
||||
LEAF(mips_ihb)
|
||||
.set mips32r2
|
||||
jr.hb ra
|
||||
nop
|
||||
END(mips_ihb)
|
||||
|
||||
#endif /* CONFIG_CPU_MIPSR2 or CONFIG_MIPS_MT */
|
||||
|
|
|
@ -283,11 +283,33 @@
|
|||
*/
|
||||
|
||||
3:
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/* Read-modify write of Status must be atomic */
|
||||
mfc0 t2, CP0_TCSTATUS
|
||||
ori t1, t2, TCSTATUS_IXMT
|
||||
mtc0 t1, CP0_TCSTATUS
|
||||
andi t2, t2, TCSTATUS_IXMT
|
||||
ehb
|
||||
DMT 9 # dmt t1
|
||||
jal mips_ihb
|
||||
nop
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
mfc0 t0, CP0_STATUS
|
||||
ori t0, 0x1f
|
||||
xori t0, 0x1f
|
||||
mtc0 t0, CP0_STATUS
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
andi t1, t1, VPECONTROL_TE
|
||||
beqz t1, 9f
|
||||
nop
|
||||
EMT # emt
|
||||
9:
|
||||
mfc0 t1, CP0_TCSTATUS
|
||||
xori t1, t1, TCSTATUS_IXMT
|
||||
or t1, t1, t2
|
||||
mtc0 t1, CP0_TCSTATUS
|
||||
ehb
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
LONG_L v0, GDB_FR_STATUS(sp)
|
||||
LONG_L v1, GDB_FR_EPC(sp)
|
||||
mtc0 v0, CP0_STATUS
|
||||
|
|
|
@ -140,6 +140,7 @@
|
|||
#include <asm/system.h>
|
||||
#include <asm/gdb-stub.h>
|
||||
#include <asm/inst.h>
|
||||
#include <asm/smp.h>
|
||||
|
||||
/*
|
||||
* external low-level support routines
|
||||
|
@ -669,6 +670,64 @@ static void kgdb_wait(void *arg)
|
|||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* GDB stub needs to call kgdb_wait on all processor with interrupts
|
||||
* disabled, so it uses it's own special variant.
|
||||
*/
|
||||
static int kgdb_smp_call_kgdb_wait(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
struct call_data_struct data;
|
||||
int i, cpus = num_online_cpus() - 1;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
/*
|
||||
* Can die spectacularly if this CPU isn't yet marked online
|
||||
*/
|
||||
BUG_ON(!cpu_online(cpu));
|
||||
|
||||
if (!cpus)
|
||||
return 0;
|
||||
|
||||
if (spin_is_locked(&smp_call_lock)) {
|
||||
/*
|
||||
* Some other processor is trying to make us do something
|
||||
* but we're not going to respond... give up
|
||||
*/
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* We will continue here, accepting the fact that
|
||||
* the kernel may deadlock if another CPU attempts
|
||||
* to call smp_call_function now...
|
||||
*/
|
||||
|
||||
data.func = kgdb_wait;
|
||||
data.info = NULL;
|
||||
atomic_set(&data.started, 0);
|
||||
data.wait = 0;
|
||||
|
||||
spin_lock(&smp_call_lock);
|
||||
call_data = &data;
|
||||
mb();
|
||||
|
||||
/* Send a message to all other CPUs and wait for them to respond */
|
||||
for (i = 0; i < NR_CPUS; i++)
|
||||
if (cpu_online(i) && i != cpu)
|
||||
core_send_ipi(i, SMP_CALL_FUNCTION);
|
||||
|
||||
/* Wait for response */
|
||||
/* FIXME: lock-up detection, backtrace on lock-up */
|
||||
while (atomic_read(&data.started) != cpus)
|
||||
barrier();
|
||||
|
||||
call_data = NULL;
|
||||
spin_unlock(&smp_call_lock);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function does all command processing for interfacing to gdb. It
|
||||
|
@ -718,7 +777,7 @@ void handle_exception (struct gdb_regs *regs)
|
|||
/*
|
||||
* force other cpus to enter kgdb
|
||||
*/
|
||||
smp_call_function(kgdb_wait, NULL, 0, 0);
|
||||
kgdb_smp_call_kgdb_wait();
|
||||
|
||||
/*
|
||||
* If we're in breakpoint() increment the PC
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <linux/init.h>
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <asm/asmmacro.h>
|
||||
#include <asm/cacheops.h>
|
||||
#include <asm/regdef.h>
|
||||
#include <asm/fpregdef.h>
|
||||
|
@ -122,6 +123,20 @@ handle_vcei:
|
|||
.set pop
|
||||
END(except_vec3_r4000)
|
||||
|
||||
__FINIT
|
||||
|
||||
.align 5
|
||||
NESTED(handle_int, PT_SIZE, sp)
|
||||
SAVE_ALL
|
||||
CLI
|
||||
|
||||
PTR_LA ra, ret_from_irq
|
||||
move a0, sp
|
||||
j plat_irq_dispatch
|
||||
END(handle_int)
|
||||
|
||||
__INIT
|
||||
|
||||
/*
|
||||
* Special interrupt vector for MIPS64 ISA & embedded MIPS processors.
|
||||
* This is a dedicated interrupt exception vector which reduces the
|
||||
|
@ -157,6 +172,15 @@ NESTED(except_vec_vi, 0, sp)
|
|||
SAVE_AT
|
||||
.set push
|
||||
.set noreorder
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/*
|
||||
* To keep from blindly blocking *all* interrupts
|
||||
* during service by SMTC kernel, we also want to
|
||||
* pass the IM value to be cleared.
|
||||
*/
|
||||
EXPORT(except_vec_vi_mori)
|
||||
ori a0, $0, 0
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
EXPORT(except_vec_vi_lui)
|
||||
lui v0, 0 /* Patched */
|
||||
j except_vec_vi_handler
|
||||
|
@ -173,6 +197,25 @@ EXPORT(except_vec_vi_end)
|
|||
NESTED(except_vec_vi_handler, 0, sp)
|
||||
SAVE_TEMP
|
||||
SAVE_STATIC
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/*
|
||||
* SMTC has an interesting problem that interrupts are level-triggered,
|
||||
* and the CLI macro will clear EXL, potentially causing a duplicate
|
||||
* interrupt service invocation. So we need to clear the associated
|
||||
* IM bit of Status prior to doing CLI, and restore it after the
|
||||
* service routine has been invoked - we must assume that the
|
||||
* service routine will have cleared the state, and any active
|
||||
* level represents a new or otherwised unserviced event...
|
||||
*/
|
||||
mfc0 t1, CP0_STATUS
|
||||
and t0, a0, t1
|
||||
mfc0 t2, CP0_TCCONTEXT
|
||||
or t0, t0, t2
|
||||
mtc0 t0, CP0_TCCONTEXT
|
||||
xor t1, t1, t0
|
||||
mtc0 t1, CP0_STATUS
|
||||
ehb
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
CLI
|
||||
move a0, sp
|
||||
jalr v0
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <linux/threads.h>
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <asm/asmmacro.h>
|
||||
#include <asm/regdef.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/mipsregs.h>
|
||||
|
@ -82,12 +83,33 @@
|
|||
*/
|
||||
.macro setup_c0_status set clr
|
||||
.set push
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/*
|
||||
* For SMTC, we need to set privilege and disable interrupts only for
|
||||
* the current TC, using the TCStatus register.
|
||||
*/
|
||||
mfc0 t0, CP0_TCSTATUS
|
||||
/* Fortunately CU 0 is in the same place in both registers */
|
||||
/* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
|
||||
li t1, ST0_CU0 | 0x08001c00
|
||||
or t0, t1
|
||||
/* Clear TKSU, leave IXMT */
|
||||
xori t0, 0x00001800
|
||||
mtc0 t0, CP0_TCSTATUS
|
||||
ehb
|
||||
/* We need to leave the global IE bit set, but clear EXL...*/
|
||||
mfc0 t0, CP0_STATUS
|
||||
or t0, ST0_CU0 | ST0_EXL | ST0_ERL | \set | \clr
|
||||
xor t0, ST0_EXL | ST0_ERL | \clr
|
||||
mtc0 t0, CP0_STATUS
|
||||
#else
|
||||
mfc0 t0, CP0_STATUS
|
||||
or t0, ST0_CU0|\set|0x1f|\clr
|
||||
xor t0, 0x1f|\clr
|
||||
mtc0 t0, CP0_STATUS
|
||||
.set noreorder
|
||||
sll zero,3 # ehb
|
||||
#endif
|
||||
.set pop
|
||||
.endm
|
||||
|
||||
|
@ -134,6 +156,24 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
|
|||
|
||||
ARC64_TWIDDLE_PC
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/*
|
||||
* In SMTC kernel, "CLI" is thread-specific, in TCStatus.
|
||||
* We still need to enable interrupts globally in Status,
|
||||
* and clear EXL/ERL.
|
||||
*
|
||||
* TCContext is used to track interrupt levels under
|
||||
* service in SMTC kernel. Clear for boot TC before
|
||||
* allowing any interrupts.
|
||||
*/
|
||||
mtc0 zero, CP0_TCCONTEXT
|
||||
|
||||
mfc0 t0, CP0_STATUS
|
||||
ori t0, t0, 0xff1f
|
||||
xori t0, t0, 0x001e
|
||||
mtc0 t0, CP0_STATUS
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
PTR_LA t0, __bss_start # clear .bss
|
||||
LONG_S zero, (t0)
|
||||
PTR_LA t1, __bss_stop - LONGSIZE
|
||||
|
@ -166,8 +206,25 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
|
|||
* function after setting up the stack and gp registers.
|
||||
*/
|
||||
NESTED(smp_bootstrap, 16, sp)
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/*
|
||||
* Read-modify-writes of Status must be atomic, and this
|
||||
* is one case where CLI is invoked without EXL being
|
||||
* necessarily set. The CLI and setup_c0_status will
|
||||
* in fact be redundant for all but the first TC of
|
||||
* each VPE being booted.
|
||||
*/
|
||||
DMT 10 # dmt t2 /* t0, t1 are used by CLI and setup_c0_status() */
|
||||
jal mips_ihb
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
setup_c0_status_sec
|
||||
smp_slave_setup
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
andi t2, t2, VPECONTROL_TE
|
||||
beqz t2, 2f
|
||||
EMT # emt
|
||||
2:
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
j start_secondary
|
||||
END(smp_bootstrap)
|
||||
#endif /* CONFIG_SMP */
|
||||
|
|
|
@ -187,6 +187,10 @@ handle_real_irq:
|
|||
outb(cached_21,0x21);
|
||||
outb(0x60+irq,0x20); /* 'Specific EOI' to master */
|
||||
}
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
if (irq_hwmask[irq] & ST0_IM)
|
||||
set_c0_status(irq_hwmask[irq] & ST0_IM);
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
spin_unlock_irqrestore(&i8259A_lock, flags);
|
||||
return;
|
||||
|
||||
|
|
|
@ -76,6 +76,11 @@ static void level_mask_and_ack_msc_irq(unsigned int irq)
|
|||
mask_msc_irq(irq);
|
||||
if (!cpu_has_veic)
|
||||
MSCIC_WRITE(MSC01_IC_EOI, 0);
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/* This actually needs to be a call into platform code */
|
||||
if (irq_hwmask[irq] & ST0_IM)
|
||||
set_c0_status(irq_hwmask[irq] & ST0_IM);
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -92,6 +97,10 @@ static void edge_mask_and_ack_msc_irq(unsigned int irq)
|
|||
MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT);
|
||||
MSCIC_WRITE(MSC01_IC_SUP+irq*8, r);
|
||||
}
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
if (irq_hwmask[irq] & ST0_IM)
|
||||
set_c0_status(irq_hwmask[irq] & ST0_IM);
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -38,6 +38,15 @@ void ack_bad_irq(unsigned int irq)
|
|||
|
||||
atomic_t irq_err_count;
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/*
|
||||
* SMTC Kernel needs to manipulate low-level CPU interrupt mask
|
||||
* in do_IRQ. These are passed in setup_irq_smtc() and stored
|
||||
* in this table.
|
||||
*/
|
||||
unsigned long irq_hwmask[NR_IRQS];
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
#undef do_IRQ
|
||||
|
||||
/*
|
||||
|
@ -49,6 +58,7 @@ asmlinkage unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs)
|
|||
{
|
||||
irq_enter();
|
||||
|
||||
__DO_IRQ_SMTC_HOOK();
|
||||
__do_IRQ(irq, regs);
|
||||
|
||||
irq_exit();
|
||||
|
@ -101,6 +111,11 @@ skip:
|
|||
return 0;
|
||||
}
|
||||
|
||||
asmlinkage void spurious_interrupt(struct pt_regs *regs)
|
||||
{
|
||||
atomic_inc(&irq_err_count);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KGDB
|
||||
extern void breakpoint(void);
|
||||
extern void set_debug_traps(void);
|
||||
|
@ -124,6 +139,9 @@ void __init init_IRQ(void)
|
|||
irq_desc[i].depth = 1;
|
||||
irq_desc[i].handler = &no_irq_type;
|
||||
spin_lock_init(&irq_desc[i].lock);
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
irq_hwmask[i] = 0;
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
}
|
||||
|
||||
arch_init_irq();
|
||||
|
|
|
@ -0,0 +1,398 @@
|
|||
/*
|
||||
* Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can distribute it and/or modify it
|
||||
* under the terms of the GNU General Public License (Version 2) as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
|
||||
*
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/unistd.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/list.h>
|
||||
|
||||
#include <asm/vpe.h>
|
||||
#include <asm/rtlx.h>
|
||||
#include <asm/kspd.h>
|
||||
|
||||
static struct workqueue_struct *workqueue = NULL;
|
||||
static struct work_struct work;
|
||||
|
||||
extern unsigned long cpu_khz;
|
||||
|
||||
struct mtsp_syscall {
|
||||
int cmd;
|
||||
unsigned char abi;
|
||||
unsigned char size;
|
||||
};
|
||||
|
||||
struct mtsp_syscall_ret {
|
||||
int retval;
|
||||
int errno;
|
||||
};
|
||||
|
||||
struct mtsp_syscall_generic {
|
||||
int arg0;
|
||||
int arg1;
|
||||
int arg2;
|
||||
int arg3;
|
||||
int arg4;
|
||||
int arg5;
|
||||
int arg6;
|
||||
};
|
||||
|
||||
static struct list_head kspd_notifylist;
|
||||
static int sp_stopping = 0;
|
||||
|
||||
/* these should match with those in the SDE kit */
|
||||
#define MTSP_SYSCALL_BASE 0
|
||||
#define MTSP_SYSCALL_EXIT (MTSP_SYSCALL_BASE + 0)
|
||||
#define MTSP_SYSCALL_OPEN (MTSP_SYSCALL_BASE + 1)
|
||||
#define MTSP_SYSCALL_READ (MTSP_SYSCALL_BASE + 2)
|
||||
#define MTSP_SYSCALL_WRITE (MTSP_SYSCALL_BASE + 3)
|
||||
#define MTSP_SYSCALL_CLOSE (MTSP_SYSCALL_BASE + 4)
|
||||
#define MTSP_SYSCALL_LSEEK32 (MTSP_SYSCALL_BASE + 5)
|
||||
#define MTSP_SYSCALL_ISATTY (MTSP_SYSCALL_BASE + 6)
|
||||
#define MTSP_SYSCALL_GETTIME (MTSP_SYSCALL_BASE + 7)
|
||||
#define MTSP_SYSCALL_PIPEFREQ (MTSP_SYSCALL_BASE + 8)
|
||||
#define MTSP_SYSCALL_GETTOD (MTSP_SYSCALL_BASE + 9)
|
||||
|
||||
#define MTSP_O_RDONLY 0x0000
|
||||
#define MTSP_O_WRONLY 0x0001
|
||||
#define MTSP_O_RDWR 0x0002
|
||||
#define MTSP_O_NONBLOCK 0x0004
|
||||
#define MTSP_O_APPEND 0x0008
|
||||
#define MTSP_O_SHLOCK 0x0010
|
||||
#define MTSP_O_EXLOCK 0x0020
|
||||
#define MTSP_O_ASYNC 0x0040
|
||||
#define MTSP_O_FSYNC O_SYNC
|
||||
#define MTSP_O_NOFOLLOW 0x0100
|
||||
#define MTSP_O_SYNC 0x0080
|
||||
#define MTSP_O_CREAT 0x0200
|
||||
#define MTSP_O_TRUNC 0x0400
|
||||
#define MTSP_O_EXCL 0x0800
|
||||
#define MTSP_O_BINARY 0x8000
|
||||
|
||||
#define SP_VPE 1
|
||||
|
||||
struct apsp_table {
|
||||
int sp;
|
||||
int ap;
|
||||
};
|
||||
|
||||
/* we might want to do the mode flags too */
|
||||
struct apsp_table open_flags_table[] = {
|
||||
{ MTSP_O_RDWR, O_RDWR },
|
||||
{ MTSP_O_WRONLY, O_WRONLY },
|
||||
{ MTSP_O_CREAT, O_CREAT },
|
||||
{ MTSP_O_TRUNC, O_TRUNC },
|
||||
{ MTSP_O_NONBLOCK, O_NONBLOCK },
|
||||
{ MTSP_O_APPEND, O_APPEND },
|
||||
{ MTSP_O_NOFOLLOW, O_NOFOLLOW }
|
||||
};
|
||||
|
||||
struct apsp_table syscall_command_table[] = {
|
||||
{ MTSP_SYSCALL_OPEN, __NR_open },
|
||||
{ MTSP_SYSCALL_CLOSE, __NR_close },
|
||||
{ MTSP_SYSCALL_READ, __NR_read },
|
||||
{ MTSP_SYSCALL_WRITE, __NR_write },
|
||||
{ MTSP_SYSCALL_LSEEK32, __NR_lseek }
|
||||
};
|
||||
|
||||
static int sp_syscall(int num, int arg0, int arg1, int arg2, int arg3)
|
||||
{
|
||||
register long int _num __asm__ ("$2") = num;
|
||||
register long int _arg0 __asm__ ("$4") = arg0;
|
||||
register long int _arg1 __asm__ ("$5") = arg1;
|
||||
register long int _arg2 __asm__ ("$6") = arg2;
|
||||
register long int _arg3 __asm__ ("$7") = arg3;
|
||||
|
||||
mm_segment_t old_fs;
|
||||
|
||||
old_fs = get_fs();
|
||||
set_fs(KERNEL_DS);
|
||||
|
||||
__asm__ __volatile__ (
|
||||
" syscall \n"
|
||||
: "=r" (_num), "=r" (_arg3)
|
||||
: "r" (_num), "r" (_arg0), "r" (_arg1), "r" (_arg2), "r" (_arg3));
|
||||
|
||||
set_fs(old_fs);
|
||||
|
||||
/* $a3 is error flag */
|
||||
if (_arg3)
|
||||
return -_num;
|
||||
|
||||
return _num;
|
||||
}
|
||||
|
||||
static int translate_syscall_command(int cmd)
|
||||
{
|
||||
int i;
|
||||
int ret = -1;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(syscall_command_table); i++) {
|
||||
if ((cmd == syscall_command_table[i].sp))
|
||||
return syscall_command_table[i].ap;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static unsigned int translate_open_flags(int flags)
|
||||
{
|
||||
int i;
|
||||
unsigned int ret = 0;
|
||||
|
||||
for (i = 0; i < (sizeof(open_flags_table) / sizeof(struct apsp_table));
|
||||
i++) {
|
||||
if( (flags & open_flags_table[i].sp) ) {
|
||||
ret |= open_flags_table[i].ap;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static void sp_setfsuidgid( uid_t uid, gid_t gid)
|
||||
{
|
||||
current->fsuid = uid;
|
||||
current->fsgid = gid;
|
||||
|
||||
key_fsuid_changed(current);
|
||||
key_fsgid_changed(current);
|
||||
}
|
||||
|
||||
/*
|
||||
* Expects a request to be on the sysio channel. Reads it. Decides whether
|
||||
* its a linux syscall and runs it, or whatever. Puts the return code back
|
||||
* into the request and sends the whole thing back.
|
||||
*/
|
||||
void sp_work_handle_request(void)
|
||||
{
|
||||
struct mtsp_syscall sc;
|
||||
struct mtsp_syscall_generic generic;
|
||||
struct mtsp_syscall_ret ret;
|
||||
struct kspd_notifications *n;
|
||||
struct timeval tv;
|
||||
struct timezone tz;
|
||||
int cmd;
|
||||
|
||||
char *vcwd;
|
||||
mm_segment_t old_fs;
|
||||
int size;
|
||||
|
||||
ret.retval = -1;
|
||||
|
||||
if (!rtlx_read(RTLX_CHANNEL_SYSIO, &sc, sizeof(struct mtsp_syscall), 0)) {
|
||||
printk(KERN_ERR "Expected request but nothing to read\n");
|
||||
return;
|
||||
}
|
||||
|
||||
size = sc.size;
|
||||
|
||||
if (size) {
|
||||
if (!rtlx_read(RTLX_CHANNEL_SYSIO, &generic, size, 0)) {
|
||||
printk(KERN_ERR "Expected request but nothing to read\n");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* Run the syscall at the priviledge of the user who loaded the
|
||||
SP program */
|
||||
|
||||
if (vpe_getuid(SP_VPE))
|
||||
sp_setfsuidgid( vpe_getuid(SP_VPE), vpe_getgid(SP_VPE));
|
||||
|
||||
switch (sc.cmd) {
|
||||
/* needs the flags argument translating from SDE kit to
|
||||
linux */
|
||||
case MTSP_SYSCALL_PIPEFREQ:
|
||||
ret.retval = cpu_khz * 1000;
|
||||
ret.errno = 0;
|
||||
break;
|
||||
|
||||
case MTSP_SYSCALL_GETTOD:
|
||||
memset(&tz, 0, sizeof(tz));
|
||||
if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv,
|
||||
(int)&tz, 0,0)) == 0)
|
||||
ret.retval = tv.tv_sec;
|
||||
|
||||
ret.errno = errno;
|
||||
break;
|
||||
|
||||
case MTSP_SYSCALL_EXIT:
|
||||
list_for_each_entry(n, &kspd_notifylist, list)
|
||||
n->kspd_sp_exit(SP_VPE);
|
||||
sp_stopping = 1;
|
||||
|
||||
printk(KERN_DEBUG "KSPD got exit syscall from SP exitcode %d\n",
|
||||
generic.arg0);
|
||||
break;
|
||||
|
||||
case MTSP_SYSCALL_OPEN:
|
||||
generic.arg1 = translate_open_flags(generic.arg1);
|
||||
|
||||
vcwd = vpe_getcwd(SP_VPE);
|
||||
|
||||
/* change to the cwd of the process that loaded the SP program */
|
||||
old_fs = get_fs();
|
||||
set_fs(KERNEL_DS);
|
||||
sys_chdir(vcwd);
|
||||
set_fs(old_fs);
|
||||
|
||||
sc.cmd = __NR_open;
|
||||
|
||||
/* fall through */
|
||||
|
||||
default:
|
||||
if ((sc.cmd >= __NR_Linux) &&
|
||||
(sc.cmd <= (__NR_Linux + __NR_Linux_syscalls)) )
|
||||
cmd = sc.cmd;
|
||||
else
|
||||
cmd = translate_syscall_command(sc.cmd);
|
||||
|
||||
if (cmd >= 0) {
|
||||
ret.retval = sp_syscall(cmd, generic.arg0, generic.arg1,
|
||||
generic.arg2, generic.arg3);
|
||||
ret.errno = errno;
|
||||
} else
|
||||
printk(KERN_WARNING
|
||||
"KSPD: Unknown SP syscall number %d\n", sc.cmd);
|
||||
break;
|
||||
} /* switch */
|
||||
|
||||
if (vpe_getuid(SP_VPE))
|
||||
sp_setfsuidgid( 0, 0);
|
||||
|
||||
if ((rtlx_write(RTLX_CHANNEL_SYSIO, &ret, sizeof(struct mtsp_syscall_ret), 0))
|
||||
< sizeof(struct mtsp_syscall_ret))
|
||||
printk("KSPD: sp_work_handle_request failed to send to SP\n");
|
||||
}
|
||||
|
||||
static void sp_cleanup(void)
|
||||
{
|
||||
struct files_struct *files = current->files;
|
||||
int i, j;
|
||||
struct fdtable *fdt;
|
||||
|
||||
j = 0;
|
||||
|
||||
/*
|
||||
* It is safe to dereference the fd table without RCU or
|
||||
* ->file_lock
|
||||
*/
|
||||
fdt = files_fdtable(files);
|
||||
for (;;) {
|
||||
unsigned long set;
|
||||
i = j * __NFDBITS;
|
||||
if (i >= fdt->max_fdset || i >= fdt->max_fds)
|
||||
break;
|
||||
set = fdt->open_fds->fds_bits[j++];
|
||||
while (set) {
|
||||
if (set & 1) {
|
||||
struct file * file = xchg(&fdt->fd[i], NULL);
|
||||
if (file)
|
||||
filp_close(file, files);
|
||||
}
|
||||
i++;
|
||||
set >>= 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int channel_open = 0;
|
||||
|
||||
/* the work handler */
|
||||
static void sp_work(void *data)
|
||||
{
|
||||
if (!channel_open) {
|
||||
if( rtlx_open(RTLX_CHANNEL_SYSIO, 1) != 0) {
|
||||
printk("KSPD: unable to open sp channel\n");
|
||||
sp_stopping = 1;
|
||||
} else {
|
||||
channel_open++;
|
||||
printk(KERN_DEBUG "KSPD: SP channel opened\n");
|
||||
}
|
||||
} else {
|
||||
/* wait for some data, allow it to sleep */
|
||||
rtlx_read_poll(RTLX_CHANNEL_SYSIO, 1);
|
||||
|
||||
/* Check we haven't been woken because we are stopping */
|
||||
if (!sp_stopping)
|
||||
sp_work_handle_request();
|
||||
}
|
||||
|
||||
if (!sp_stopping)
|
||||
queue_work(workqueue, &work);
|
||||
else
|
||||
sp_cleanup();
|
||||
}
|
||||
|
||||
static void startwork(int vpe)
|
||||
{
|
||||
sp_stopping = channel_open = 0;
|
||||
|
||||
if (workqueue == NULL) {
|
||||
if ((workqueue = create_singlethread_workqueue("kspd")) == NULL) {
|
||||
printk(KERN_ERR "unable to start kspd\n");
|
||||
return;
|
||||
}
|
||||
|
||||
INIT_WORK(&work, sp_work, NULL);
|
||||
queue_work(workqueue, &work);
|
||||
} else
|
||||
queue_work(workqueue, &work);
|
||||
|
||||
}
|
||||
|
||||
static void stopwork(int vpe)
|
||||
{
|
||||
sp_stopping = 1;
|
||||
|
||||
printk(KERN_DEBUG "KSPD: SP stopping\n");
|
||||
}
|
||||
|
||||
void kspd_notify(struct kspd_notifications *notify)
|
||||
{
|
||||
list_add(¬ify->list, &kspd_notifylist);
|
||||
}
|
||||
|
||||
static struct vpe_notifications notify;
|
||||
static int kspd_module_init(void)
|
||||
{
|
||||
INIT_LIST_HEAD(&kspd_notifylist);
|
||||
|
||||
notify.start = startwork;
|
||||
notify.stop = stopwork;
|
||||
vpe_notify(SP_VPE, ¬ify);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void kspd_module_exit(void)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
module_init(kspd_module_init);
|
||||
module_exit(kspd_module_exit);
|
||||
|
||||
MODULE_DESCRIPTION("MIPS KSPD");
|
||||
MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc.");
|
||||
MODULE_LICENSE("GPL");
|
|
@ -1182,6 +1182,16 @@ asmlinkage ssize_t sys32_readahead(int fd, u32 pad0, u64 a2, u64 a3,
|
|||
return sys_readahead(fd, merge_64(a2, a3), count);
|
||||
}
|
||||
|
||||
asmlinkage long sys32_sync_file_range(int fd, int __pad,
|
||||
unsigned long a2, unsigned long a3,
|
||||
unsigned long a4, unsigned long a5,
|
||||
int flags)
|
||||
{
|
||||
return sys_sync_file_range(fd,
|
||||
merge_64(a2, a3), merge_64(a4, a5),
|
||||
flags);
|
||||
}
|
||||
|
||||
/* Argument list sizes for sys_socketcall */
|
||||
#define AL(x) ((x) * sizeof(unsigned int))
|
||||
static unsigned char socketcall_nargs[18]={AL(0),AL(3),AL(3),AL(3),AL(2),AL(3),
|
||||
|
|
|
@ -0,0 +1,449 @@
|
|||
/*
|
||||
* General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels
|
||||
* Copyright (C) 2005 Mips Technologies, Inc
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/hardirq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/mipsmtregs.h>
|
||||
#include <asm/r4kcache.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
/*
|
||||
* CPU mask used to set process affinity for MT VPEs/TCs with FPUs
|
||||
*/
|
||||
|
||||
cpumask_t mt_fpu_cpumask;
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_FPAFF
|
||||
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/delay.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
unsigned long mt_fpemul_threshold = 0;
|
||||
|
||||
/*
|
||||
* Replacement functions for the sys_sched_setaffinity() and
|
||||
* sys_sched_getaffinity() system calls, so that we can integrate
|
||||
* FPU affinity with the user's requested processor affinity.
|
||||
* This code is 98% identical with the sys_sched_setaffinity()
|
||||
* and sys_sched_getaffinity() system calls, and should be
|
||||
* updated when kernel/sched.c changes.
|
||||
*/
|
||||
|
||||
/*
|
||||
* find_process_by_pid - find a process with a matching PID value.
|
||||
* used in sys_sched_set/getaffinity() in kernel/sched.c, so
|
||||
* cloned here.
|
||||
*/
|
||||
static inline task_t *find_process_by_pid(pid_t pid)
|
||||
{
|
||||
return pid ? find_task_by_pid(pid) : current;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
|
||||
*/
|
||||
asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
|
||||
unsigned long __user *user_mask_ptr)
|
||||
{
|
||||
cpumask_t new_mask;
|
||||
cpumask_t effective_mask;
|
||||
int retval;
|
||||
task_t *p;
|
||||
|
||||
if (len < sizeof(new_mask))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
|
||||
return -EFAULT;
|
||||
|
||||
lock_cpu_hotplug();
|
||||
read_lock(&tasklist_lock);
|
||||
|
||||
p = find_process_by_pid(pid);
|
||||
if (!p) {
|
||||
read_unlock(&tasklist_lock);
|
||||
unlock_cpu_hotplug();
|
||||
return -ESRCH;
|
||||
}
|
||||
|
||||
/*
|
||||
* It is not safe to call set_cpus_allowed with the
|
||||
* tasklist_lock held. We will bump the task_struct's
|
||||
* usage count and drop tasklist_lock before invoking
|
||||
* set_cpus_allowed.
|
||||
*/
|
||||
get_task_struct(p);
|
||||
|
||||
retval = -EPERM;
|
||||
if ((current->euid != p->euid) && (current->euid != p->uid) &&
|
||||
!capable(CAP_SYS_NICE)) {
|
||||
read_unlock(&tasklist_lock);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* Record new user-specified CPU set for future reference */
|
||||
p->thread.user_cpus_allowed = new_mask;
|
||||
|
||||
/* Unlock the task list */
|
||||
read_unlock(&tasklist_lock);
|
||||
|
||||
/* Compute new global allowed CPU set if necessary */
|
||||
if( (p->thread.mflags & MF_FPUBOUND)
|
||||
&& cpus_intersects(new_mask, mt_fpu_cpumask)) {
|
||||
cpus_and(effective_mask, new_mask, mt_fpu_cpumask);
|
||||
retval = set_cpus_allowed(p, effective_mask);
|
||||
} else {
|
||||
p->thread.mflags &= ~MF_FPUBOUND;
|
||||
retval = set_cpus_allowed(p, new_mask);
|
||||
}
|
||||
|
||||
|
||||
out_unlock:
|
||||
put_task_struct(p);
|
||||
unlock_cpu_hotplug();
|
||||
return retval;
|
||||
}
|
||||
|
||||
/*
|
||||
* mipsmt_sys_sched_getaffinity - get the cpu affinity of a process
|
||||
*/
|
||||
asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
|
||||
unsigned long __user *user_mask_ptr)
|
||||
{
|
||||
unsigned int real_len;
|
||||
cpumask_t mask;
|
||||
int retval;
|
||||
task_t *p;
|
||||
|
||||
real_len = sizeof(mask);
|
||||
if (len < real_len)
|
||||
return -EINVAL;
|
||||
|
||||
lock_cpu_hotplug();
|
||||
read_lock(&tasklist_lock);
|
||||
|
||||
retval = -ESRCH;
|
||||
p = find_process_by_pid(pid);
|
||||
if (!p)
|
||||
goto out_unlock;
|
||||
|
||||
retval = 0;
|
||||
|
||||
cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map);
|
||||
|
||||
out_unlock:
|
||||
read_unlock(&tasklist_lock);
|
||||
unlock_cpu_hotplug();
|
||||
if (retval)
|
||||
return retval;
|
||||
if (copy_to_user(user_mask_ptr, &mask, real_len))
|
||||
return -EFAULT;
|
||||
return real_len;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_MIPS_MT_FPAFF */
|
||||
|
||||
/*
|
||||
* Dump new MIPS MT state for the core. Does not leave TCs halted.
|
||||
* Takes an argument which taken to be a pre-call MVPControl value.
|
||||
*/
|
||||
|
||||
void mips_mt_regdump(unsigned long mvpctl)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long vpflags;
|
||||
unsigned long mvpconf0;
|
||||
int nvpe;
|
||||
int ntc;
|
||||
int i;
|
||||
int tc;
|
||||
unsigned long haltval;
|
||||
unsigned long tcstatval;
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
void smtc_soft_dump(void);
|
||||
#endif /* CONFIG_MIPT_MT_SMTC */
|
||||
|
||||
local_irq_save(flags);
|
||||
vpflags = dvpe();
|
||||
printk("=== MIPS MT State Dump ===\n");
|
||||
printk("-- Global State --\n");
|
||||
printk(" MVPControl Passed: %08lx\n", mvpctl);
|
||||
printk(" MVPControl Read: %08lx\n", vpflags);
|
||||
printk(" MVPConf0 : %08lx\n", (mvpconf0 = read_c0_mvpconf0()));
|
||||
nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
|
||||
ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
|
||||
printk("-- per-VPE State --\n");
|
||||
for(i = 0; i < nvpe; i++) {
|
||||
for(tc = 0; tc < ntc; tc++) {
|
||||
settc(tc);
|
||||
if((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) {
|
||||
printk(" VPE %d\n", i);
|
||||
printk(" VPEControl : %08lx\n", read_vpe_c0_vpecontrol());
|
||||
printk(" VPEConf0 : %08lx\n", read_vpe_c0_vpeconf0());
|
||||
printk(" VPE%d.Status : %08lx\n",
|
||||
i, read_vpe_c0_status());
|
||||
printk(" VPE%d.EPC : %08lx\n", i, read_vpe_c0_epc());
|
||||
printk(" VPE%d.Cause : %08lx\n", i, read_vpe_c0_cause());
|
||||
printk(" VPE%d.Config7 : %08lx\n",
|
||||
i, read_vpe_c0_config7());
|
||||
break; /* Next VPE */
|
||||
}
|
||||
}
|
||||
}
|
||||
printk("-- per-TC State --\n");
|
||||
for(tc = 0; tc < ntc; tc++) {
|
||||
settc(tc);
|
||||
if(read_tc_c0_tcbind() == read_c0_tcbind()) {
|
||||
/* Are we dumping ourself? */
|
||||
haltval = 0; /* Then we're not halted, and mustn't be */
|
||||
tcstatval = flags; /* And pre-dump TCStatus is flags */
|
||||
printk(" TC %d (current TC with VPE EPC above)\n", tc);
|
||||
} else {
|
||||
haltval = read_tc_c0_tchalt();
|
||||
write_tc_c0_tchalt(1);
|
||||
tcstatval = read_tc_c0_tcstatus();
|
||||
printk(" TC %d\n", tc);
|
||||
}
|
||||
printk(" TCStatus : %08lx\n", tcstatval);
|
||||
printk(" TCBind : %08lx\n", read_tc_c0_tcbind());
|
||||
printk(" TCRestart : %08lx\n", read_tc_c0_tcrestart());
|
||||
printk(" TCHalt : %08lx\n", haltval);
|
||||
printk(" TCContext : %08lx\n", read_tc_c0_tccontext());
|
||||
if (!haltval)
|
||||
write_tc_c0_tchalt(0);
|
||||
}
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
smtc_soft_dump();
|
||||
#endif /* CONFIG_MIPT_MT_SMTC */
|
||||
printk("===========================\n");
|
||||
evpe(vpflags);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static int mt_opt_norps = 0;
|
||||
static int mt_opt_rpsctl = -1;
|
||||
static int mt_opt_nblsu = -1;
|
||||
static int mt_opt_forceconfig7 = 0;
|
||||
static int mt_opt_config7 = -1;
|
||||
|
||||
static int __init rps_disable(char *s)
|
||||
{
|
||||
mt_opt_norps = 1;
|
||||
return 1;
|
||||
}
|
||||
__setup("norps", rps_disable);
|
||||
|
||||
static int __init rpsctl_set(char *str)
|
||||
{
|
||||
get_option(&str, &mt_opt_rpsctl);
|
||||
return 1;
|
||||
}
|
||||
__setup("rpsctl=", rpsctl_set);
|
||||
|
||||
static int __init nblsu_set(char *str)
|
||||
{
|
||||
get_option(&str, &mt_opt_nblsu);
|
||||
return 1;
|
||||
}
|
||||
__setup("nblsu=", nblsu_set);
|
||||
|
||||
static int __init config7_set(char *str)
|
||||
{
|
||||
get_option(&str, &mt_opt_config7);
|
||||
mt_opt_forceconfig7 = 1;
|
||||
return 1;
|
||||
}
|
||||
__setup("config7=", config7_set);
|
||||
|
||||
/* Experimental cache flush control parameters that should go away some day */
|
||||
int mt_protiflush = 0;
|
||||
int mt_protdflush = 0;
|
||||
int mt_n_iflushes = 1;
|
||||
int mt_n_dflushes = 1;
|
||||
|
||||
static int __init set_protiflush(char *s)
|
||||
{
|
||||
mt_protiflush = 1;
|
||||
return 1;
|
||||
}
|
||||
__setup("protiflush", set_protiflush);
|
||||
|
||||
static int __init set_protdflush(char *s)
|
||||
{
|
||||
mt_protdflush = 1;
|
||||
return 1;
|
||||
}
|
||||
__setup("protdflush", set_protdflush);
|
||||
|
||||
static int __init niflush(char *s)
|
||||
{
|
||||
get_option(&s, &mt_n_iflushes);
|
||||
return 1;
|
||||
}
|
||||
__setup("niflush=", niflush);
|
||||
|
||||
static int __init ndflush(char *s)
|
||||
{
|
||||
get_option(&s, &mt_n_dflushes);
|
||||
return 1;
|
||||
}
|
||||
__setup("ndflush=", ndflush);
|
||||
#ifdef CONFIG_MIPS_MT_FPAFF
|
||||
static int fpaff_threshold = -1;
|
||||
|
||||
static int __init fpaff_thresh(char *str)
|
||||
{
|
||||
get_option(&str, &fpaff_threshold);
|
||||
return 1;
|
||||
}
|
||||
|
||||
__setup("fpaff=", fpaff_thresh);
|
||||
#endif /* CONFIG_MIPS_MT_FPAFF */
|
||||
|
||||
static unsigned int itc_base = 0;
|
||||
|
||||
static int __init set_itc_base(char *str)
|
||||
{
|
||||
get_option(&str, &itc_base);
|
||||
return 1;
|
||||
}
|
||||
|
||||
__setup("itcbase=", set_itc_base);
|
||||
|
||||
void mips_mt_set_cpuoptions(void)
|
||||
{
|
||||
unsigned int oconfig7 = read_c0_config7();
|
||||
unsigned int nconfig7 = oconfig7;
|
||||
|
||||
if (mt_opt_norps) {
|
||||
printk("\"norps\" option deprectated: use \"rpsctl=\"\n");
|
||||
}
|
||||
if (mt_opt_rpsctl >= 0) {
|
||||
printk("34K return prediction stack override set to %d.\n",
|
||||
mt_opt_rpsctl);
|
||||
if (mt_opt_rpsctl)
|
||||
nconfig7 |= (1 << 2);
|
||||
else
|
||||
nconfig7 &= ~(1 << 2);
|
||||
}
|
||||
if (mt_opt_nblsu >= 0) {
|
||||
printk("34K ALU/LSU sync override set to %d.\n", mt_opt_nblsu);
|
||||
if (mt_opt_nblsu)
|
||||
nconfig7 |= (1 << 5);
|
||||
else
|
||||
nconfig7 &= ~(1 << 5);
|
||||
}
|
||||
if (mt_opt_forceconfig7) {
|
||||
printk("CP0.Config7 forced to 0x%08x.\n", mt_opt_config7);
|
||||
nconfig7 = mt_opt_config7;
|
||||
}
|
||||
if (oconfig7 != nconfig7) {
|
||||
__asm__ __volatile("sync");
|
||||
write_c0_config7(nconfig7);
|
||||
ehb ();
|
||||
printk("Config7: 0x%08x\n", read_c0_config7());
|
||||
}
|
||||
|
||||
/* Report Cache management debug options */
|
||||
if (mt_protiflush)
|
||||
printk("I-cache flushes single-threaded\n");
|
||||
if (mt_protdflush)
|
||||
printk("D-cache flushes single-threaded\n");
|
||||
if (mt_n_iflushes != 1)
|
||||
printk("I-Cache Flushes Repeated %d times\n", mt_n_iflushes);
|
||||
if (mt_n_dflushes != 1)
|
||||
printk("D-Cache Flushes Repeated %d times\n", mt_n_dflushes);
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_FPAFF
|
||||
/* FPU Use Factor empirically derived from experiments on 34K */
|
||||
#define FPUSEFACTOR 333
|
||||
|
||||
if (fpaff_threshold >= 0) {
|
||||
mt_fpemul_threshold = fpaff_threshold;
|
||||
} else {
|
||||
mt_fpemul_threshold =
|
||||
(FPUSEFACTOR * (loops_per_jiffy/(500000/HZ))) / HZ;
|
||||
}
|
||||
printk("FPU Affinity set after %ld emulations\n",
|
||||
mt_fpemul_threshold);
|
||||
#endif /* CONFIG_MIPS_MT_FPAFF */
|
||||
|
||||
if (itc_base != 0) {
|
||||
/*
|
||||
* Configure ITC mapping. This code is very
|
||||
* specific to the 34K core family, which uses
|
||||
* a special mode bit ("ITC") in the ErrCtl
|
||||
* register to enable access to ITC control
|
||||
* registers via cache "tag" operations.
|
||||
*/
|
||||
unsigned long ectlval;
|
||||
unsigned long itcblkgrn;
|
||||
|
||||
/* ErrCtl register is known as "ecc" to Linux */
|
||||
ectlval = read_c0_ecc();
|
||||
write_c0_ecc(ectlval | (0x1 << 26));
|
||||
ehb();
|
||||
#define INDEX_0 (0x80000000)
|
||||
#define INDEX_8 (0x80000008)
|
||||
/* Read "cache tag" for Dcache pseudo-index 8 */
|
||||
cache_op(Index_Load_Tag_D, INDEX_8);
|
||||
ehb();
|
||||
itcblkgrn = read_c0_dtaglo();
|
||||
itcblkgrn &= 0xfffe0000;
|
||||
/* Set for 128 byte pitch of ITC cells */
|
||||
itcblkgrn |= 0x00000c00;
|
||||
/* Stage in Tag register */
|
||||
write_c0_dtaglo(itcblkgrn);
|
||||
ehb();
|
||||
/* Write out to ITU with CACHE op */
|
||||
cache_op(Index_Store_Tag_D, INDEX_8);
|
||||
/* Now set base address, and turn ITC on with 0x1 bit */
|
||||
write_c0_dtaglo((itc_base & 0xfffffc00) | 0x1 );
|
||||
ehb();
|
||||
/* Write out to ITU with CACHE op */
|
||||
cache_op(Index_Store_Tag_D, INDEX_0);
|
||||
write_c0_ecc(ectlval);
|
||||
ehb();
|
||||
printk("Mapped %ld ITC cells starting at 0x%08x\n",
|
||||
((itcblkgrn & 0x7fe00000) >> 20), itc_base);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Function to protect cache flushes from concurrent execution
|
||||
* depends on MP software model chosen.
|
||||
*/
|
||||
|
||||
void mt_cflush_lockdown(void)
|
||||
{
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
void smtc_cflush_lockdown(void);
|
||||
|
||||
smtc_cflush_lockdown();
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
/* FILL IN VSMP and AP/SP VERSIONS HERE */
|
||||
}
|
||||
|
||||
void mt_cflush_release(void)
|
||||
{
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
void smtc_cflush_release(void);
|
||||
|
||||
smtc_cflush_release();
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
/* FILL IN VSMP and AP/SP VERSIONS HERE */
|
||||
}
|
|
@ -28,21 +28,9 @@ extern long __strnlen_user_asm(const char *s);
|
|||
/*
|
||||
* String functions
|
||||
*/
|
||||
EXPORT_SYMBOL(memchr);
|
||||
EXPORT_SYMBOL(memcmp);
|
||||
EXPORT_SYMBOL(memset);
|
||||
EXPORT_SYMBOL(memcpy);
|
||||
EXPORT_SYMBOL(memmove);
|
||||
EXPORT_SYMBOL(strcat);
|
||||
EXPORT_SYMBOL(strchr);
|
||||
#ifdef CONFIG_64BIT
|
||||
EXPORT_SYMBOL(strncmp);
|
||||
#endif
|
||||
EXPORT_SYMBOL(strlen);
|
||||
EXPORT_SYMBOL(strncat);
|
||||
EXPORT_SYMBOL(strnlen);
|
||||
EXPORT_SYMBOL(strrchr);
|
||||
EXPORT_SYMBOL(strstr);
|
||||
|
||||
EXPORT_SYMBOL(kernel_thread);
|
||||
|
||||
|
@ -61,6 +49,3 @@ EXPORT_SYMBOL(__strnlen_user_asm);
|
|||
EXPORT_SYMBOL(csum_partial);
|
||||
|
||||
EXPORT_SYMBOL(invalid_pte_table);
|
||||
#ifdef CONFIG_GENERIC_IRQ_PROBE
|
||||
EXPORT_SYMBOL(probe_irq_mask);
|
||||
#endif
|
||||
|
|
|
@ -41,6 +41,10 @@
|
|||
#include <asm/elf.h>
|
||||
#include <asm/isadep.h>
|
||||
#include <asm/inst.h>
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
#include <asm/mipsmtregs.h>
|
||||
extern void smtc_idle_loop_hook(void);
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
/*
|
||||
* The idle thread. There's no useful work to be done, so just try to conserve
|
||||
|
@ -51,9 +55,13 @@ ATTRIB_NORET void cpu_idle(void)
|
|||
{
|
||||
/* endless idle loop with no priority at all */
|
||||
while (1) {
|
||||
while (!need_resched())
|
||||
while (!need_resched()) {
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
smtc_idle_loop_hook();
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
if (cpu_wait)
|
||||
(*cpu_wait)();
|
||||
}
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
|
@ -177,6 +185,17 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
|
|||
childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
|
||||
clear_tsk_thread_flag(p, TIF_USEDFPU);
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_FPAFF
|
||||
/*
|
||||
* FPU affinity support is cleaner if we track the
|
||||
* user-visible CPU affinity from the very beginning.
|
||||
* The generic cpus_allowed mask will already have
|
||||
* been copied from the parent before copy_thread
|
||||
* is invoked.
|
||||
*/
|
||||
p->thread.user_cpus_allowed = p->cpus_allowed;
|
||||
#endif /* CONFIG_MIPS_MT_FPAFF */
|
||||
|
||||
if (clone_flags & CLONE_SETTLS)
|
||||
ti->tp_value = regs->regs[7];
|
||||
|
||||
|
|
|
@ -248,10 +248,20 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
|||
break;
|
||||
case FPC_EIR: { /* implementation / version register */
|
||||
unsigned int flags;
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
unsigned int irqflags;
|
||||
unsigned int mtflags;
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
if (!cpu_has_fpu)
|
||||
break;
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/* Read-modify-write of Status must be atomic */
|
||||
local_irq_save(irqflags);
|
||||
mtflags = dmt();
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
preempt_disable();
|
||||
if (cpu_has_mipsmt) {
|
||||
unsigned int vpflags = dvpe();
|
||||
|
@ -266,6 +276,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
|||
__asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
|
||||
write_c0_status(flags);
|
||||
}
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
emt(mtflags);
|
||||
local_irq_restore(irqflags);
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
preempt_enable();
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -173,12 +173,22 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
|
|||
break;
|
||||
case FPC_EIR: { /* implementation / version register */
|
||||
unsigned int flags;
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
unsigned int irqflags;
|
||||
unsigned int mtflags;
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
if (!cpu_has_fpu) {
|
||||
tmp = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/* Read-modify-write of Status must be atomic */
|
||||
local_irq_save(irqflags);
|
||||
mtflags = dmt();
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
preempt_disable();
|
||||
if (cpu_has_mipsmt) {
|
||||
unsigned int vpflags = dvpe();
|
||||
|
@ -193,6 +203,10 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
|
|||
__asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
|
||||
write_c0_status(flags);
|
||||
}
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
emt(mtflags);
|
||||
local_irq_restore(irqflags);
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
preempt_enable();
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -88,7 +88,18 @@
|
|||
|
||||
PTR_ADDIU t0, $28, _THREAD_SIZE - 32
|
||||
set_saved_sp t0, t1, t2
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/* Read-modify-writes of Status must be atomic on a VPE */
|
||||
mfc0 t2, CP0_TCSTATUS
|
||||
ori t1, t2, TCSTATUS_IXMT
|
||||
mtc0 t1, CP0_TCSTATUS
|
||||
andi t2, t2, TCSTATUS_IXMT
|
||||
ehb
|
||||
DMT 8 # dmt t0
|
||||
move t1,ra
|
||||
jal mips_ihb
|
||||
move ra,t1
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
mfc0 t1, CP0_STATUS /* Do we really need this? */
|
||||
li a3, 0xff01
|
||||
and t1, a3
|
||||
|
@ -97,6 +108,18 @@
|
|||
and a2, a3
|
||||
or a2, t1
|
||||
mtc0 a2, CP0_STATUS
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
ehb
|
||||
andi t0, t0, VPECONTROL_TE
|
||||
beqz t0, 1f
|
||||
emt
|
||||
1:
|
||||
mfc0 t1, CP0_TCSTATUS
|
||||
xori t1, t1, TCSTATUS_IXMT
|
||||
or t1, t1, t2
|
||||
mtc0 t1, CP0_TCSTATUS
|
||||
ehb
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
move v0, a0
|
||||
jr ra
|
||||
END(resume)
|
||||
|
@ -131,10 +154,19 @@ LEAF(_restore_fp)
|
|||
#define FPU_DEFAULT 0x00000000
|
||||
|
||||
LEAF(_init_fpu)
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/* Rather than manipulate per-VPE Status, set per-TC bit in TCStatus */
|
||||
mfc0 t0, CP0_TCSTATUS
|
||||
/* Bit position is the same for Status, TCStatus */
|
||||
li t1, ST0_CU1
|
||||
or t0, t1
|
||||
mtc0 t0, CP0_TCSTATUS
|
||||
#else /* Normal MIPS CU1 enable */
|
||||
mfc0 t0, CP0_STATUS
|
||||
li t1, ST0_CU1
|
||||
or t0, t1
|
||||
mtc0 t0, CP0_STATUS
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
fpu_enable_hazard
|
||||
|
||||
li t1, FPU_DEFAULT
|
||||
|
|
|
@ -21,45 +21,44 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/elf.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/moduleloader.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/poll.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/wait.h>
|
||||
|
||||
#include <asm/mipsmtregs.h>
|
||||
#include <asm/bitops.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/vpe.h>
|
||||
#include <asm/rtlx.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#define RTLX_TARG_VPE 1
|
||||
|
||||
static struct rtlx_info *rtlx;
|
||||
static int major;
|
||||
static char module_name[] = "rtlx";
|
||||
static struct irqaction irq;
|
||||
static int irq_num;
|
||||
|
||||
static inline int spacefree(int read, int write, int size)
|
||||
{
|
||||
if (read == write) {
|
||||
/*
|
||||
* never fill the buffer completely, so indexes are always
|
||||
* equal if empty and only empty, or !equal if data available
|
||||
*/
|
||||
return size - 1;
|
||||
}
|
||||
|
||||
return ((read + size - write) % size) - 1;
|
||||
}
|
||||
|
||||
static struct chan_waitqueues {
|
||||
wait_queue_head_t rt_queue;
|
||||
wait_queue_head_t lx_queue;
|
||||
int in_open;
|
||||
} channel_wqs[RTLX_CHANNELS];
|
||||
|
||||
static struct irqaction irq;
|
||||
static int irq_num;
|
||||
static struct vpe_notifications notify;
|
||||
static int sp_stopping = 0;
|
||||
|
||||
extern void *vpe_get_shared(int index);
|
||||
|
||||
static void rtlx_dispatch(struct pt_regs *regs)
|
||||
|
@ -67,174 +66,298 @@ static void rtlx_dispatch(struct pt_regs *regs)
|
|||
do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ, regs);
|
||||
}
|
||||
|
||||
|
||||
/* Interrupt handler may be called before rtlx_init has otherwise had
|
||||
a chance to run.
|
||||
*/
|
||||
static irqreturn_t rtlx_interrupt(int irq, void *dev_id, struct pt_regs *regs)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < RTLX_CHANNELS; i++) {
|
||||
struct rtlx_channel *chan = &rtlx->channel[i];
|
||||
|
||||
if (chan->lx_read != chan->lx_write)
|
||||
wake_up_interruptible(&channel_wqs[i].lx_queue);
|
||||
wake_up(&channel_wqs[i].lx_queue);
|
||||
wake_up(&channel_wqs[i].rt_queue);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/* call when we have the address of the shared structure from the SP side. */
|
||||
static int rtlx_init(struct rtlx_info *rtlxi)
|
||||
static __attribute_used__ void dump_rtlx(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
printk("id 0x%lx state %d\n", rtlx->id, rtlx->state);
|
||||
|
||||
for (i = 0; i < RTLX_CHANNELS; i++) {
|
||||
struct rtlx_channel *chan = &rtlx->channel[i];
|
||||
|
||||
printk(" rt_state %d lx_state %d buffer_size %d\n",
|
||||
chan->rt_state, chan->lx_state, chan->buffer_size);
|
||||
|
||||
printk(" rt_read %d rt_write %d\n",
|
||||
chan->rt_read, chan->rt_write);
|
||||
|
||||
printk(" lx_read %d lx_write %d\n",
|
||||
chan->lx_read, chan->lx_write);
|
||||
|
||||
printk(" rt_buffer <%s>\n", chan->rt_buffer);
|
||||
printk(" lx_buffer <%s>\n", chan->lx_buffer);
|
||||
}
|
||||
}
|
||||
|
||||
/* call when we have the address of the shared structure from the SP side. */
|
||||
static int rtlx_init(struct rtlx_info *rtlxi)
|
||||
{
|
||||
if (rtlxi->id != RTLX_ID) {
|
||||
printk(KERN_WARNING "no valid RTLX id at 0x%p\n", rtlxi);
|
||||
printk(KERN_ERR "no valid RTLX id at 0x%p 0x%x\n", rtlxi, rtlxi->id);
|
||||
return -ENOEXEC;
|
||||
}
|
||||
|
||||
/* initialise the wait queues */
|
||||
for (i = 0; i < RTLX_CHANNELS; i++) {
|
||||
init_waitqueue_head(&channel_wqs[i].rt_queue);
|
||||
init_waitqueue_head(&channel_wqs[i].lx_queue);
|
||||
}
|
||||
|
||||
/* set up for interrupt handling */
|
||||
memset(&irq, 0, sizeof(struct irqaction));
|
||||
|
||||
if (cpu_has_vint)
|
||||
set_vi_handler(MIPS_CPU_RTLX_IRQ, rtlx_dispatch);
|
||||
|
||||
irq_num = MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ;
|
||||
irq.handler = rtlx_interrupt;
|
||||
irq.flags = SA_INTERRUPT;
|
||||
irq.name = "RTLX";
|
||||
irq.dev_id = rtlx;
|
||||
setup_irq(irq_num, &irq);
|
||||
|
||||
rtlx = rtlxi;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* only allow one open process at a time to open each channel */
|
||||
static int rtlx_open(struct inode *inode, struct file *filp)
|
||||
/* notifications */
|
||||
static void starting(int vpe)
|
||||
{
|
||||
int minor, ret;
|
||||
struct rtlx_channel *chan;
|
||||
int i;
|
||||
sp_stopping = 0;
|
||||
|
||||
/* assume only 1 device at the mo. */
|
||||
minor = MINOR(inode->i_rdev);
|
||||
/* force a reload of rtlx */
|
||||
rtlx=NULL;
|
||||
|
||||
/* wake up any sleeping rtlx_open's */
|
||||
for (i = 0; i < RTLX_CHANNELS; i++)
|
||||
wake_up_interruptible(&channel_wqs[i].lx_queue);
|
||||
}
|
||||
|
||||
static void stopping(int vpe)
|
||||
{
|
||||
int i;
|
||||
|
||||
sp_stopping = 1;
|
||||
for (i = 0; i < RTLX_CHANNELS; i++)
|
||||
wake_up_interruptible(&channel_wqs[i].lx_queue);
|
||||
}
|
||||
|
||||
|
||||
int rtlx_open(int index, int can_sleep)
|
||||
{
|
||||
int ret;
|
||||
struct rtlx_channel *chan;
|
||||
volatile struct rtlx_info **p;
|
||||
|
||||
if (index >= RTLX_CHANNELS) {
|
||||
printk(KERN_DEBUG "rtlx_open index out of range\n");
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
if (channel_wqs[index].in_open) {
|
||||
printk(KERN_DEBUG "rtlx_open channel %d already opened\n", index);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
channel_wqs[index].in_open++;
|
||||
|
||||
if (rtlx == NULL) {
|
||||
struct rtlx_info **p;
|
||||
if( (p = vpe_get_shared(RTLX_TARG_VPE)) == NULL) {
|
||||
printk(KERN_ERR "vpe_get_shared is NULL. "
|
||||
"Has an SP program been loaded?\n");
|
||||
return -EFAULT;
|
||||
if (can_sleep) {
|
||||
DECLARE_WAITQUEUE(wait, current);
|
||||
|
||||
/* go to sleep */
|
||||
add_wait_queue(&channel_wqs[index].lx_queue, &wait);
|
||||
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
while ((p = vpe_get_shared(RTLX_TARG_VPE)) == NULL) {
|
||||
schedule();
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
}
|
||||
|
||||
set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(&channel_wqs[index].lx_queue, &wait);
|
||||
|
||||
/* back running */
|
||||
} else {
|
||||
printk( KERN_DEBUG "No SP program loaded, and device "
|
||||
"opened with O_NONBLOCK\n");
|
||||
channel_wqs[index].in_open = 0;
|
||||
return -ENOSYS;
|
||||
}
|
||||
}
|
||||
|
||||
if (*p == NULL) {
|
||||
printk(KERN_ERR "vpe_shared %p %p\n", p, *p);
|
||||
return -EFAULT;
|
||||
if (can_sleep) {
|
||||
DECLARE_WAITQUEUE(wait, current);
|
||||
|
||||
/* go to sleep */
|
||||
add_wait_queue(&channel_wqs[index].lx_queue, &wait);
|
||||
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
while (*p == NULL) {
|
||||
schedule();
|
||||
|
||||
/* reset task state to interruptable otherwise
|
||||
we'll whizz round here like a very fast loopy
|
||||
thing. schedule() appears to return with state
|
||||
set to TASK_RUNNING.
|
||||
|
||||
If the loaded SP program, for whatever reason,
|
||||
doesn't set up the shared structure *p will never
|
||||
become true. So whoever connected to either /dev/rt?
|
||||
or if it was kspd, will then take up rather a lot of
|
||||
processor cycles.
|
||||
*/
|
||||
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
}
|
||||
|
||||
set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(&channel_wqs[index].lx_queue, &wait);
|
||||
|
||||
/* back running */
|
||||
}
|
||||
else {
|
||||
printk(" *vpe_get_shared is NULL. "
|
||||
"Has an SP program been loaded?\n");
|
||||
channel_wqs[index].in_open = 0;
|
||||
return -ENOSYS;
|
||||
}
|
||||
}
|
||||
|
||||
if ((ret = rtlx_init(*p)) < 0)
|
||||
return ret;
|
||||
if ((unsigned int)*p < KSEG0) {
|
||||
printk(KERN_WARNING "vpe_get_shared returned an invalid pointer "
|
||||
"maybe an error code %d\n", (int)*p);
|
||||
channel_wqs[index].in_open = 0;
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
if ((ret = rtlx_init(*p)) < 0) {
|
||||
channel_wqs[index].in_open = 0;
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
chan = &rtlx->channel[minor];
|
||||
chan = &rtlx->channel[index];
|
||||
|
||||
if (test_and_set_bit(RTLX_STATE_OPENED, &chan->lx_state))
|
||||
return -EBUSY;
|
||||
if (chan->lx_state == RTLX_STATE_OPENED) {
|
||||
channel_wqs[index].in_open = 0;
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
chan->lx_state = RTLX_STATE_OPENED;
|
||||
channel_wqs[index].in_open = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rtlx_release(struct inode *inode, struct file *filp)
|
||||
int rtlx_release(int index)
|
||||
{
|
||||
int minor = MINOR(inode->i_rdev);
|
||||
|
||||
clear_bit(RTLX_STATE_OPENED, &rtlx->channel[minor].lx_state);
|
||||
smp_mb__after_clear_bit();
|
||||
|
||||
rtlx->channel[index].lx_state = RTLX_STATE_UNUSED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int rtlx_poll(struct file *file, poll_table * wait)
|
||||
unsigned int rtlx_read_poll(int index, int can_sleep)
|
||||
{
|
||||
int minor;
|
||||
unsigned int mask = 0;
|
||||
struct rtlx_channel *chan;
|
||||
struct rtlx_channel *chan;
|
||||
|
||||
minor = MINOR(file->f_dentry->d_inode->i_rdev);
|
||||
chan = &rtlx->channel[minor];
|
||||
if (rtlx == NULL)
|
||||
return 0;
|
||||
|
||||
poll_wait(file, &channel_wqs[minor].rt_queue, wait);
|
||||
poll_wait(file, &channel_wqs[minor].lx_queue, wait);
|
||||
chan = &rtlx->channel[index];
|
||||
|
||||
/* data available to read? */
|
||||
if (chan->lx_read != chan->lx_write)
|
||||
mask |= POLLIN | POLLRDNORM;
|
||||
if (chan->lx_read == chan->lx_write) {
|
||||
if (can_sleep) {
|
||||
DECLARE_WAITQUEUE(wait, current);
|
||||
|
||||
/* space to write */
|
||||
if (spacefree(chan->rt_read, chan->rt_write, chan->buffer_size))
|
||||
mask |= POLLOUT | POLLWRNORM;
|
||||
/* go to sleep */
|
||||
add_wait_queue(&channel_wqs[index].lx_queue, &wait);
|
||||
|
||||
return mask;
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
while (chan->lx_read == chan->lx_write) {
|
||||
schedule();
|
||||
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
if (sp_stopping) {
|
||||
set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(&channel_wqs[index].lx_queue, &wait);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(&channel_wqs[index].lx_queue, &wait);
|
||||
|
||||
/* back running */
|
||||
}
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
return (chan->lx_write + chan->buffer_size - chan->lx_read)
|
||||
% chan->buffer_size;
|
||||
}
|
||||
|
||||
static ssize_t rtlx_read(struct file *file, char __user * buffer, size_t count,
|
||||
loff_t * ppos)
|
||||
static inline int write_spacefree(int read, int write, int size)
|
||||
{
|
||||
unsigned long failed;
|
||||
size_t fl = 0L;
|
||||
int minor;
|
||||
struct rtlx_channel *lx;
|
||||
DECLARE_WAITQUEUE(wait, current);
|
||||
|
||||
minor = MINOR(file->f_dentry->d_inode->i_rdev);
|
||||
lx = &rtlx->channel[minor];
|
||||
|
||||
/* data available? */
|
||||
if (lx->lx_write == lx->lx_read) {
|
||||
if (file->f_flags & O_NONBLOCK)
|
||||
return 0; /* -EAGAIN makes cat whinge */
|
||||
|
||||
/* go to sleep */
|
||||
add_wait_queue(&channel_wqs[minor].lx_queue, &wait);
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
while (lx->lx_write == lx->lx_read)
|
||||
schedule();
|
||||
|
||||
set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(&channel_wqs[minor].lx_queue, &wait);
|
||||
|
||||
/* back running */
|
||||
if (read == write) {
|
||||
/*
|
||||
* Never fill the buffer completely, so indexes are always
|
||||
* equal if empty and only empty, or !equal if data available
|
||||
*/
|
||||
return size - 1;
|
||||
}
|
||||
|
||||
return ((read + size - write) % size) - 1;
|
||||
}
|
||||
|
||||
unsigned int rtlx_write_poll(int index)
|
||||
{
|
||||
struct rtlx_channel *chan = &rtlx->channel[index];
|
||||
return write_spacefree(chan->rt_read, chan->rt_write, chan->buffer_size);
|
||||
}
|
||||
|
||||
static inline void copy_to(void *dst, void *src, size_t count, int user)
|
||||
{
|
||||
if (user)
|
||||
copy_to_user(dst, src, count);
|
||||
else
|
||||
memcpy(dst, src, count);
|
||||
}
|
||||
|
||||
static inline void copy_from(void *dst, void *src, size_t count, int user)
|
||||
{
|
||||
if (user)
|
||||
copy_from_user(dst, src, count);
|
||||
else
|
||||
memcpy(dst, src, count);
|
||||
}
|
||||
|
||||
ssize_t rtlx_read(int index, void *buff, size_t count, int user)
|
||||
{
|
||||
size_t fl = 0L;
|
||||
struct rtlx_channel *lx;
|
||||
|
||||
if (rtlx == NULL)
|
||||
return -ENOSYS;
|
||||
|
||||
lx = &rtlx->channel[index];
|
||||
|
||||
/* find out how much in total */
|
||||
count = min(count,
|
||||
(size_t)(lx->lx_write + lx->buffer_size - lx->lx_read) % lx->buffer_size);
|
||||
(size_t)(lx->lx_write + lx->buffer_size - lx->lx_read)
|
||||
% lx->buffer_size);
|
||||
|
||||
/* then how much from the read pointer onwards */
|
||||
fl = min(count, (size_t)lx->buffer_size - lx->lx_read);
|
||||
fl = min( count, (size_t)lx->buffer_size - lx->lx_read);
|
||||
|
||||
failed = copy_to_user (buffer, &lx->lx_buffer[lx->lx_read], fl);
|
||||
if (failed) {
|
||||
count = fl - failed;
|
||||
goto out;
|
||||
}
|
||||
copy_to(buff, &lx->lx_buffer[lx->lx_read], fl, user);
|
||||
|
||||
/* and if there is anything left at the beginning of the buffer */
|
||||
if (count - fl) {
|
||||
failed = copy_to_user (buffer + fl, lx->lx_buffer, count - fl);
|
||||
if (failed) {
|
||||
count -= failed;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
if ( count - fl )
|
||||
copy_to (buff + fl, lx->lx_buffer, count - fl, user);
|
||||
|
||||
out:
|
||||
/* update the index */
|
||||
lx->lx_read += count;
|
||||
lx->lx_read %= lx->buffer_size;
|
||||
|
@ -242,20 +365,101 @@ out:
|
|||
return count;
|
||||
}
|
||||
|
||||
static ssize_t rtlx_write(struct file *file, const char __user * buffer,
|
||||
size_t count, loff_t * ppos)
|
||||
ssize_t rtlx_write(int index, void *buffer, size_t count, int user)
|
||||
{
|
||||
unsigned long failed;
|
||||
int minor;
|
||||
struct rtlx_channel *rt;
|
||||
size_t fl;
|
||||
|
||||
if (rtlx == NULL)
|
||||
return(-ENOSYS);
|
||||
|
||||
rt = &rtlx->channel[index];
|
||||
|
||||
/* total number of bytes to copy */
|
||||
count = min(count,
|
||||
(size_t)write_spacefree(rt->rt_read, rt->rt_write,
|
||||
rt->buffer_size));
|
||||
|
||||
/* first bit from write pointer to the end of the buffer, or count */
|
||||
fl = min(count, (size_t) rt->buffer_size - rt->rt_write);
|
||||
|
||||
copy_from (&rt->rt_buffer[rt->rt_write], buffer, fl, user);
|
||||
|
||||
/* if there's any left copy to the beginning of the buffer */
|
||||
if( count - fl )
|
||||
copy_from (rt->rt_buffer, buffer + fl, count - fl, user);
|
||||
|
||||
rt->rt_write += count;
|
||||
rt->rt_write %= rt->buffer_size;
|
||||
|
||||
return(count);
|
||||
}
|
||||
|
||||
|
||||
static int file_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
int minor = MINOR(inode->i_rdev);
|
||||
|
||||
return rtlx_open(minor, (filp->f_flags & O_NONBLOCK) ? 0 : 1);
|
||||
}
|
||||
|
||||
static int file_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
int minor;
|
||||
minor = MINOR(inode->i_rdev);
|
||||
|
||||
return rtlx_release(minor);
|
||||
}
|
||||
|
||||
static unsigned int file_poll(struct file *file, poll_table * wait)
|
||||
{
|
||||
int minor;
|
||||
unsigned int mask = 0;
|
||||
|
||||
minor = MINOR(file->f_dentry->d_inode->i_rdev);
|
||||
|
||||
poll_wait(file, &channel_wqs[minor].rt_queue, wait);
|
||||
poll_wait(file, &channel_wqs[minor].lx_queue, wait);
|
||||
|
||||
if (rtlx == NULL)
|
||||
return 0;
|
||||
|
||||
/* data available to read? */
|
||||
if (rtlx_read_poll(minor, 0))
|
||||
mask |= POLLIN | POLLRDNORM;
|
||||
|
||||
/* space to write */
|
||||
if (rtlx_write_poll(minor))
|
||||
mask |= POLLOUT | POLLWRNORM;
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
static ssize_t file_read(struct file *file, char __user * buffer, size_t count,
|
||||
loff_t * ppos)
|
||||
{
|
||||
int minor = MINOR(file->f_dentry->d_inode->i_rdev);
|
||||
|
||||
/* data available? */
|
||||
if (!rtlx_read_poll(minor, (file->f_flags & O_NONBLOCK) ? 0 : 1)) {
|
||||
return 0; // -EAGAIN makes cat whinge
|
||||
}
|
||||
|
||||
return rtlx_read(minor, buffer, count, 1);
|
||||
}
|
||||
|
||||
static ssize_t file_write(struct file *file, const char __user * buffer,
|
||||
size_t count, loff_t * ppos)
|
||||
{
|
||||
int minor;
|
||||
struct rtlx_channel *rt;
|
||||
DECLARE_WAITQUEUE(wait, current);
|
||||
|
||||
minor = MINOR(file->f_dentry->d_inode->i_rdev);
|
||||
rt = &rtlx->channel[minor];
|
||||
|
||||
/* any space left... */
|
||||
if (!spacefree(rt->rt_read, rt->rt_write, rt->buffer_size)) {
|
||||
if (!rtlx_write_poll(minor)) {
|
||||
|
||||
if (file->f_flags & O_NONBLOCK)
|
||||
return -EAGAIN;
|
||||
|
@ -263,61 +467,64 @@ static ssize_t rtlx_write(struct file *file, const char __user * buffer,
|
|||
add_wait_queue(&channel_wqs[minor].rt_queue, &wait);
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
while (!spacefree(rt->rt_read, rt->rt_write, rt->buffer_size))
|
||||
while (!rtlx_write_poll(minor))
|
||||
schedule();
|
||||
|
||||
set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(&channel_wqs[minor].rt_queue, &wait);
|
||||
}
|
||||
|
||||
/* total number of bytes to copy */
|
||||
count = min(count, (size_t)spacefree(rt->rt_read, rt->rt_write, rt->buffer_size) );
|
||||
|
||||
/* first bit from write pointer to the end of the buffer, or count */
|
||||
fl = min(count, (size_t) rt->buffer_size - rt->rt_write);
|
||||
|
||||
failed = copy_from_user(&rt->rt_buffer[rt->rt_write], buffer, fl);
|
||||
if (failed) {
|
||||
count = fl - failed;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* if there's any left copy to the beginning of the buffer */
|
||||
if (count - fl) {
|
||||
failed = copy_from_user(rt->rt_buffer, buffer + fl, count - fl);
|
||||
if (failed) {
|
||||
count -= failed;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
rt->rt_write += count;
|
||||
rt->rt_write %= rt->buffer_size;
|
||||
|
||||
return count;
|
||||
return rtlx_write(minor, (void *)buffer, count, 1);
|
||||
}
|
||||
|
||||
static struct file_operations rtlx_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = rtlx_open,
|
||||
.release = rtlx_release,
|
||||
.write = rtlx_write,
|
||||
.read = rtlx_read,
|
||||
.poll = rtlx_poll
|
||||
.owner = THIS_MODULE,
|
||||
.open = file_open,
|
||||
.release = file_release,
|
||||
.write = file_write,
|
||||
.read = file_read,
|
||||
.poll = file_poll
|
||||
};
|
||||
|
||||
static struct irqaction rtlx_irq = {
|
||||
.handler = rtlx_interrupt,
|
||||
.flags = SA_INTERRUPT,
|
||||
.name = "RTLX",
|
||||
};
|
||||
|
||||
static int rtlx_irq_num = MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ;
|
||||
|
||||
static char register_chrdev_failed[] __initdata =
|
||||
KERN_ERR "rtlx_module_init: unable to register device\n";
|
||||
|
||||
static int __init rtlx_module_init(void)
|
||||
static int rtlx_module_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
major = register_chrdev(0, module_name, &rtlx_fops);
|
||||
if (major < 0) {
|
||||
printk(register_chrdev_failed);
|
||||
return major;
|
||||
}
|
||||
|
||||
/* initialise the wait queues */
|
||||
for (i = 0; i < RTLX_CHANNELS; i++) {
|
||||
init_waitqueue_head(&channel_wqs[i].rt_queue);
|
||||
init_waitqueue_head(&channel_wqs[i].lx_queue);
|
||||
channel_wqs[i].in_open = 0;
|
||||
}
|
||||
|
||||
/* set up notifiers */
|
||||
notify.start = starting;
|
||||
notify.stop = stopping;
|
||||
vpe_notify(RTLX_TARG_VPE, ¬ify);
|
||||
|
||||
if (cpu_has_vint)
|
||||
set_vi_handler(MIPS_CPU_RTLX_IRQ, rtlx_dispatch);
|
||||
|
||||
rtlx_irq.dev_id = rtlx;
|
||||
setup_irq(rtlx_irq_num, &rtlx_irq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -330,5 +537,5 @@ module_init(rtlx_module_init);
|
|||
module_exit(rtlx_module_exit);
|
||||
|
||||
MODULE_DESCRIPTION("MIPS RTLX");
|
||||
MODULE_AUTHOR("Elizabeth Clarke, MIPS Technologies, Inc.");
|
||||
MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc.");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -569,8 +569,19 @@ einval: li v0, -EINVAL
|
|||
sys sys_tkill 2
|
||||
sys sys_sendfile64 5
|
||||
sys sys_futex 6
|
||||
#ifdef CONFIG_MIPS_MT_FPAFF
|
||||
/*
|
||||
* For FPU affinity scheduling on MIPS MT processors, we need to
|
||||
* intercept sys_sched_xxxaffinity() calls until we get a proper hook
|
||||
* in kernel/sched.c. Considered only temporary we only support these
|
||||
* hooks for the 32-bit kernel - there is no MIPS64 MT processor atm.
|
||||
*/
|
||||
sys mipsmt_sys_sched_setaffinity 3
|
||||
sys mipsmt_sys_sched_getaffinity 3
|
||||
#else
|
||||
sys sys_sched_setaffinity 3
|
||||
sys sys_sched_getaffinity 3 /* 4240 */
|
||||
#endif /* CONFIG_MIPS_MT_FPAFF */
|
||||
sys sys_io_setup 2
|
||||
sys sys_io_destroy 1
|
||||
sys sys_io_getevents 5
|
||||
|
@ -634,6 +645,8 @@ einval: li v0, -EINVAL
|
|||
sys sys_pselect6 6
|
||||
sys sys_ppoll 5
|
||||
sys sys_unshare 1
|
||||
sys sys_splice 4
|
||||
sys sys_sync_file_range 7 /* 4305 */
|
||||
.endm
|
||||
|
||||
/* We pre-compute the number of _instruction_ bytes needed to
|
||||
|
|
|
@ -460,3 +460,5 @@ sys_call_table:
|
|||
PTR sys_pselect6 /* 5260 */
|
||||
PTR sys_ppoll
|
||||
PTR sys_unshare
|
||||
PTR sys_splice
|
||||
PTR sys_sync_file_range
|
||||
|
|
|
@ -386,3 +386,5 @@ EXPORT(sysn32_call_table)
|
|||
PTR sys_pselect6
|
||||
PTR sys_ppoll /* 6265 */
|
||||
PTR sys_unshare
|
||||
PTR sys_splice
|
||||
PTR sys_sync_file_range
|
||||
|
|
|
@ -508,4 +508,6 @@ sys_call_table:
|
|||
PTR sys_pselect6
|
||||
PTR sys_ppoll
|
||||
PTR sys_unshare
|
||||
PTR sys_splice
|
||||
PTR sys32_sync_file_range /* 4305 */
|
||||
.size sys_call_table,.-sys_call_table
|
||||
|
|
|
@ -529,7 +529,10 @@ void __init setup_arch(char **cmdline_p)
|
|||
|
||||
int __init fpu_disable(char *s)
|
||||
{
|
||||
cpu_data[0].options &= ~MIPS_CPU_FPU;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NR_CPUS; i++)
|
||||
cpu_data[i].options &= ~MIPS_CPU_FPU;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -1,8 +1,4 @@
|
|||
/*
|
||||
* Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
|
||||
*
|
||||
* Elizabeth Clarke (beth@mips.com)
|
||||
*
|
||||
* This program is free software; you can distribute it and/or modify it
|
||||
* under the terms of the GNU General Public License (Version 2) as
|
||||
* published by the Free Software Foundation.
|
||||
|
@ -16,6 +12,10 @@
|
|||
* with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
|
||||
*
|
||||
* Copyright (C) 2004, 05, 06 MIPS Technologies, Inc.
|
||||
* Elizabeth Clarke (beth@mips.com)
|
||||
* Ralf Baechle (ralf@linux-mips.org)
|
||||
* Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
|
@ -24,6 +24,7 @@
|
|||
#include <linux/compiler.h>
|
||||
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/system.h>
|
||||
|
@ -33,8 +34,8 @@
|
|||
#include <asm/time.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/mipsmtregs.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/mips-boards/maltaint.h>
|
||||
#include <asm/mips_mt.h>
|
||||
#include <asm/mips-boards/maltaint.h> /* This is f*cking wrong */
|
||||
|
||||
#define MIPS_CPU_IPI_RESCHED_IRQ 0
|
||||
#define MIPS_CPU_IPI_CALL_IRQ 1
|
||||
|
@ -66,6 +67,7 @@ void __init sanitize_tlb_entries(void)
|
|||
if (!cpu_has_mipsmt)
|
||||
return;
|
||||
|
||||
/* Enable VPC */
|
||||
set_c0_mvpcontrol(MVPCONTROL_VPC);
|
||||
|
||||
back_to_back_c0_hazard();
|
||||
|
@ -106,12 +108,12 @@ void __init sanitize_tlb_entries(void)
|
|||
|
||||
static void ipi_resched_dispatch (struct pt_regs *regs)
|
||||
{
|
||||
do_IRQ(MIPS_CPU_IPI_RESCHED_IRQ, regs);
|
||||
do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ, regs);
|
||||
}
|
||||
|
||||
static void ipi_call_dispatch (struct pt_regs *regs)
|
||||
{
|
||||
do_IRQ(MIPS_CPU_IPI_CALL_IRQ, regs);
|
||||
do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ, regs);
|
||||
}
|
||||
|
||||
irqreturn_t ipi_resched_interrupt(int irq, void *dev_id, struct pt_regs *regs)
|
||||
|
@ -148,6 +150,11 @@ void plat_smp_setup(void)
|
|||
unsigned long val;
|
||||
int i, num;
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_FPAFF
|
||||
/* If we have an FPU, enroll ourselves in the FPU-full mask */
|
||||
if (cpu_has_fpu)
|
||||
cpu_set(0, mt_fpu_cpumask);
|
||||
#endif /* CONFIG_MIPS_MT_FPAFF */
|
||||
if (!cpu_has_mipsmt)
|
||||
return;
|
||||
|
||||
|
@ -155,6 +162,8 @@ void plat_smp_setup(void)
|
|||
dvpe();
|
||||
dmt();
|
||||
|
||||
mips_mt_set_cpuoptions();
|
||||
|
||||
/* Put MVPE's into 'configuration state' */
|
||||
set_c0_mvpcontrol(MVPCONTROL_VPC);
|
||||
|
||||
|
@ -189,11 +198,13 @@ void plat_smp_setup(void)
|
|||
|
||||
if (i != 0) {
|
||||
write_vpe_c0_status((read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0);
|
||||
write_vpe_c0_cause(read_vpe_c0_cause() & ~CAUSEF_IP);
|
||||
|
||||
/* set config to be the same as vpe0, particularly kseg0 coherency alg */
|
||||
write_vpe_c0_config( read_c0_config());
|
||||
|
||||
/* make sure there are no software interrupts pending */
|
||||
write_vpe_c0_cause(read_vpe_c0_cause() & ~(C_SW1|C_SW0));
|
||||
|
||||
/* Propagate Config7 */
|
||||
write_vpe_c0_config7(read_c0_config7());
|
||||
}
|
||||
|
@ -233,16 +244,16 @@ void plat_smp_setup(void)
|
|||
/* We'll wait until starting the secondaries before starting MVPE */
|
||||
|
||||
printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);
|
||||
}
|
||||
|
||||
void __init plat_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
/* set up ipi interrupts */
|
||||
if (cpu_has_vint) {
|
||||
set_vi_handler (MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
|
||||
set_vi_handler (MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
|
||||
}
|
||||
}
|
||||
|
||||
void __init plat_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
cpu_ipi_resched_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
|
||||
cpu_ipi_call_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ;
|
||||
|
||||
|
@ -287,7 +298,8 @@ void prom_boot_secondary(int cpu, struct task_struct *idle)
|
|||
/* global pointer */
|
||||
write_tc_gpr_gp((unsigned long)gp);
|
||||
|
||||
flush_icache_range((unsigned long)gp, (unsigned long)(gp + 1));
|
||||
flush_icache_range((unsigned long)gp,
|
||||
(unsigned long)(gp + sizeof(struct thread_info)));
|
||||
|
||||
/* finally out of configuration and into chaos */
|
||||
clear_c0_mvpcontrol(MVPCONTROL_VPC);
|
||||
|
@ -305,6 +317,12 @@ void prom_smp_finish(void)
|
|||
{
|
||||
write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_FPAFF
|
||||
/* If we have an FPU, enroll ourselves in the FPU-full mask */
|
||||
if (cpu_has_fpu)
|
||||
cpu_set(smp_processor_id(), mt_fpu_cpumask);
|
||||
#endif /* CONFIG_MIPS_MT_FPAFF */
|
||||
|
||||
local_irq_enable();
|
||||
}
|
||||
|
|
@ -38,6 +38,10 @@
|
|||
#include <asm/mmu_context.h>
|
||||
#include <asm/smp.h>
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
#include <asm/mipsmtregs.h>
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */
|
||||
volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
|
||||
cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */
|
||||
|
@ -85,6 +89,10 @@ asmlinkage void start_secondary(void)
|
|||
{
|
||||
unsigned int cpu;
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/* Only do cpu_probe for first TC of CPU */
|
||||
if ((read_c0_tcbind() & TCBIND_CURTC) == 0)
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
cpu_probe();
|
||||
cpu_report();
|
||||
per_cpu_trap_init();
|
||||
|
@ -179,11 +187,13 @@ int smp_call_function (void (*func) (void *info), void *info, int retry,
|
|||
if (wait)
|
||||
while (atomic_read(&data.finished) != cpus)
|
||||
barrier();
|
||||
call_data = NULL;
|
||||
spin_unlock(&smp_call_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void smp_call_function_interrupt(void)
|
||||
{
|
||||
void (*func) (void *info) = call_data->func;
|
||||
|
@ -446,5 +456,3 @@ subsys_initcall(topology_init);
|
|||
|
||||
EXPORT_SYMBOL(flush_tlb_page);
|
||||
EXPORT_SYMBOL(flush_tlb_one);
|
||||
EXPORT_SYMBOL(cpu_data);
|
||||
EXPORT_SYMBOL(synchronize_irq);
|
||||
|
|
|
@ -0,0 +1,130 @@
|
|||
/*
|
||||
* Assembly Language Functions for MIPS MT SMTC support
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set. */
|
||||
|
||||
#include <asm/regdef.h>
|
||||
#include <asm/asmmacro.h>
|
||||
#include <asm/stackframe.h>
|
||||
#include <asm/stackframe.h>
|
||||
|
||||
/*
|
||||
* "Software Interrupt" linkage.
|
||||
*
|
||||
* This is invoked when an "Interrupt" is sent from one TC to another,
|
||||
* where the TC to be interrupted is halted, has it's Restart address
|
||||
* and Status values saved by the "remote control" thread, then modified
|
||||
* to cause execution to begin here, in kenel mode. This code then
|
||||
* disguises the TC state as that of an exception and transfers
|
||||
* control to the general exception or vectored interrupt handler.
|
||||
*/
|
||||
.set noreorder
|
||||
|
||||
/*
|
||||
The __smtc_ipi_vector would use k0 and k1 as temporaries and
|
||||
1) Set EXL (this is per-VPE, so this can't be done by proxy!)
|
||||
2) Restore the K/CU and IXMT bits to the pre "exception" state
|
||||
(EXL means no interrupts and access to the kernel map).
|
||||
3) Set EPC to be the saved value of TCRestart.
|
||||
4) Jump to the exception handler entry point passed by the sender.
|
||||
|
||||
CAN WE PROVE THAT WE WON'T DO THIS IF INTS DISABLED??
|
||||
*/
|
||||
|
||||
/*
|
||||
* Reviled and slandered vision: Set EXL and restore K/CU/IXMT
|
||||
* state of pre-halt thread, then save everything and call
|
||||
* thought some function pointer to imaginary_exception, which
|
||||
* will parse a register value or memory message queue to
|
||||
* deliver things like interprocessor interrupts. On return
|
||||
* from that function, jump to the global ret_from_irq code
|
||||
* to invoke the scheduler and return as appropriate.
|
||||
*/
|
||||
|
||||
#define PT_PADSLOT4 (PT_R0-8)
|
||||
#define PT_PADSLOT5 (PT_R0-4)
|
||||
|
||||
.text
|
||||
.align 5
|
||||
FEXPORT(__smtc_ipi_vector)
|
||||
.set noat
|
||||
/* Disable thread scheduling to make Status update atomic */
|
||||
DMT 27 # dmt k1
|
||||
ehb
|
||||
/* Set EXL */
|
||||
mfc0 k0,CP0_STATUS
|
||||
ori k0,k0,ST0_EXL
|
||||
mtc0 k0,CP0_STATUS
|
||||
ehb
|
||||
/* Thread scheduling now inhibited by EXL. Restore TE state. */
|
||||
andi k1,k1,VPECONTROL_TE
|
||||
beqz k1,1f
|
||||
emt
|
||||
1:
|
||||
/*
|
||||
* The IPI sender has put some information on the anticipated
|
||||
* kernel stack frame. If we were in user mode, this will be
|
||||
* built above the saved kernel SP. If we were already in the
|
||||
* kernel, it will be built above the current CPU SP.
|
||||
*
|
||||
* Were we in kernel mode, as indicated by CU0?
|
||||
*/
|
||||
sll k1,k0,3
|
||||
.set noreorder
|
||||
bltz k1,2f
|
||||
move k1,sp
|
||||
.set reorder
|
||||
/*
|
||||
* If previously in user mode, set CU0 and use kernel stack.
|
||||
*/
|
||||
li k1,ST0_CU0
|
||||
or k1,k1,k0
|
||||
mtc0 k1,CP0_STATUS
|
||||
ehb
|
||||
get_saved_sp
|
||||
/* Interrupting TC will have pre-set values in slots in the new frame */
|
||||
2: subu k1,k1,PT_SIZE
|
||||
/* Load TCStatus Value */
|
||||
lw k0,PT_TCSTATUS(k1)
|
||||
/* Write it to TCStatus to restore CU/KSU/IXMT state */
|
||||
mtc0 k0,$2,1
|
||||
ehb
|
||||
lw k0,PT_EPC(k1)
|
||||
mtc0 k0,CP0_EPC
|
||||
/* Save all will redundantly recompute the SP, but use it for now */
|
||||
SAVE_ALL
|
||||
CLI
|
||||
move a0,sp
|
||||
/* Function to be invoked passed stack pad slot 5 */
|
||||
lw t0,PT_PADSLOT5(sp)
|
||||
/* Argument from sender passed in stack pad slot 4 */
|
||||
lw a1,PT_PADSLOT4(sp)
|
||||
jalr t0
|
||||
nop
|
||||
j ret_from_irq
|
||||
nop
|
||||
|
||||
/*
|
||||
* Called from idle loop to provoke processing of queued IPIs
|
||||
* First IPI message in queue passed as argument.
|
||||
*/
|
||||
|
||||
LEAF(self_ipi)
|
||||
/* Before anything else, block interrupts */
|
||||
mfc0 t0,CP0_TCSTATUS
|
||||
ori t1,t0,TCSTATUS_IXMT
|
||||
mtc0 t1,CP0_TCSTATUS
|
||||
ehb
|
||||
/* We know we're in kernel mode, so prepare stack frame */
|
||||
subu t1,sp,PT_SIZE
|
||||
sw ra,PT_EPC(t1)
|
||||
sw a0,PT_PADSLOT4(t1)
|
||||
la t2,ipi_decode
|
||||
sw t2,PT_PADSLOT5(t1)
|
||||
/* Save pre-disable value of TCStatus */
|
||||
sw t0,PT_TCSTATUS(t1)
|
||||
j __smtc_ipi_vector
|
||||
nop
|
||||
END(self_ipi)
|
|
@ -0,0 +1,93 @@
|
|||
/*
|
||||
* /proc hooks for SMTC kernel
|
||||
* Copyright (C) 2005 Mips Technologies, Inc
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/hardirq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <linux/proc_fs.h>
|
||||
|
||||
#include <asm/smtc_proc.h>
|
||||
|
||||
/*
|
||||
* /proc diagnostic and statistics hooks
|
||||
*/
|
||||
|
||||
/*
|
||||
* Statistics gathered
|
||||
*/
|
||||
unsigned long selfipis[NR_CPUS];
|
||||
|
||||
struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
|
||||
|
||||
static struct proc_dir_entry *smtc_stats;
|
||||
|
||||
atomic_t smtc_fpu_recoveries;
|
||||
|
||||
static int proc_read_smtc(char *page, char **start, off_t off,
|
||||
int count, int *eof, void *data)
|
||||
{
|
||||
int totalen = 0;
|
||||
int len;
|
||||
int i;
|
||||
extern unsigned long ebase;
|
||||
|
||||
len = sprintf(page, "SMTC Status Word: 0x%08x\n", smtc_status);
|
||||
totalen += len;
|
||||
page += len;
|
||||
len = sprintf(page, "Config7: 0x%08x\n", read_c0_config7());
|
||||
totalen += len;
|
||||
page += len;
|
||||
len = sprintf(page, "EBASE: 0x%08lx\n", ebase);
|
||||
totalen += len;
|
||||
page += len;
|
||||
len = sprintf(page, "Counter Interrupts taken per CPU (TC)\n");
|
||||
totalen += len;
|
||||
page += len;
|
||||
for (i=0; i < NR_CPUS; i++) {
|
||||
len = sprintf(page, "%d: %ld\n", i, smtc_cpu_stats[i].timerints);
|
||||
totalen += len;
|
||||
page += len;
|
||||
}
|
||||
len = sprintf(page, "Self-IPIs by CPU:\n");
|
||||
totalen += len;
|
||||
page += len;
|
||||
for(i = 0; i < NR_CPUS; i++) {
|
||||
len = sprintf(page, "%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
|
||||
totalen += len;
|
||||
page += len;
|
||||
}
|
||||
len = sprintf(page, "%d Recoveries of \"stolen\" FPU\n",
|
||||
atomic_read(&smtc_fpu_recoveries));
|
||||
totalen += len;
|
||||
page += len;
|
||||
|
||||
return totalen;
|
||||
}
|
||||
|
||||
void init_smtc_stats(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i=0; i<NR_CPUS; i++) {
|
||||
smtc_cpu_stats[i].timerints = 0;
|
||||
smtc_cpu_stats[i].selfipis = 0;
|
||||
}
|
||||
|
||||
atomic_set(&smtc_fpu_recoveries, 0);
|
||||
|
||||
smtc_stats = create_proc_read_entry("smtc", 0444, NULL,
|
||||
proc_read_smtc, NULL);
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -116,8 +116,7 @@ static void c0_timer_ack(void)
|
|||
write_c0_compare(expirelo);
|
||||
|
||||
/* Check to see if we have missed any timer interrupts. */
|
||||
count = read_c0_count();
|
||||
if ((count - expirelo) < 0x7fffffff) {
|
||||
while (((count = read_c0_count()) - expirelo) < 0x7fffffff) {
|
||||
/* missed_timer_count++; */
|
||||
expirelo = count + cycles_per_jiffy;
|
||||
write_c0_compare(expirelo);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue