Merge branch 'master' into for_paulus

This commit is contained in:
Kumar Gala 2007-02-12 21:17:37 -06:00
commit 06d8bf64ba
1078 changed files with 57101 additions and 26524 deletions

View File

@ -30,6 +30,7 @@ are not a good substitute for a solid C education and/or years of
experience, the following books are good for, if anything, reference:
- "The C Programming Language" by Kernighan and Ritchie [Prentice Hall]
- "Practical C Programming" by Steve Oualline [O'Reilly]
- "C: A Reference Manual" by Harbison and Steele [Prentice Hall]
The kernel is written using GNU C and the GNU toolchain. While it
adheres to the ISO C89 standard, it uses a number of extensions that are

View File

@ -274,6 +274,7 @@ Who: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
---------------------------
<<<<<<< test:Documentation/feature-removal-schedule.txt
What: ACPI hotkey driver (CONFIG_ACPI_HOTKEY)
When: 2.6.21
Why: hotkey.c was an attempt to consolidate multiple drivers that use
@ -306,11 +307,18 @@ Why: The ACPI namespace is effectively the symbol list for
the BIOS can be extracted and disassembled with acpidump
and iasl as documented in the pmtools package here:
http://ftp.kernel.org/pub/linux/kernel/people/lenb/acpi/utils
Who: Len Brown <len.brown@intel.com>
---------------------------
What: ACPI procfs interface
When: July 2007
Why: After ACPI sysfs conversion, ACPI attributes will be duplicated
in sysfs and the ACPI procfs interface should be removed.
Who: Zhang Rui <rui.zhang@intel.com>
---------------------------
What: /proc/acpi/button
When: August 2007
Why: /proc/acpi/button has been replaced by events to the input layer
@ -325,3 +333,10 @@ Why: Unmaintained for years, superceded by JFFS2 for years.
Who: Jeff Garzik <jeff@garzik.org>
---------------------------
What: sk98lin network driver
When: July 2007
Why: In kernel tree version of driver is unmaintained. Sk98lin driver
replaced by the skge driver.
Who: Stephen Hemminger <shemminger@osdl.org>

View File

@ -480,7 +480,7 @@ r2 argument 0 / return value 0 call-clobbered
r3 argument 1 / return value 1 (if long long) call-clobbered
r4 argument 2 call-clobbered
r5 argument 3 call-clobbered
r6 argument 5 saved
r6 argument 4 saved
r7 pointer-to arguments 5 to ... saved
r8 this & that saved
r9 this & that saved

View File

@ -213,15 +213,16 @@ C:* #Ifs=dd Cfg#=dd Atr=xx MPwr=dddmA
Interface descriptor info (can be multiple per Config):
I: If#=dd Alt=dd #EPs=dd Cls=xx(sssss) Sub=xx Prot=xx Driver=ssss
| | | | | | | |__Driver name
| | | | | | | or "(none)"
| | | | | | |__InterfaceProtocol
| | | | | |__InterfaceSubClass
| | | | |__InterfaceClass
| | | |__NumberOfEndpoints
| | |__AlternateSettingNumber
| |__InterfaceNumber
I:* If#=dd Alt=dd #EPs=dd Cls=xx(sssss) Sub=xx Prot=xx Driver=ssss
| | | | | | | | |__Driver name
| | | | | | | | or "(none)"
| | | | | | | |__InterfaceProtocol
| | | | | | |__InterfaceSubClass
| | | | | |__InterfaceClass
| | | | |__NumberOfEndpoints
| | | |__AlternateSettingNumber
| | |__InterfaceNumber
| |__ "*" indicates the active altsetting (others are " ")
|__Interface info tag
A given interface may have one or more "alternate" settings.
@ -277,7 +278,7 @@ of the USB devices on a system's root hub. (See more below
on how to do this.)
The Interface lines can be used to determine what driver is
being used for each device.
being used for each device, and which altsetting it activated.
The Configuration lines could be used to list maximum power
(in milliamps) that a system's USB devices are using.

View File

@ -77,7 +77,7 @@ that the file size is not excessive for your favourite editor.
The '1t' type data consists of a stream of events, such as URB submission,
URB callback, submission error. Every event is a text line, which consists
of whitespace separated words. The number of position of words may depend
of whitespace separated words. The number or position of words may depend
on the event type, but there is a set of words, common for all types.
Here is the list of words, from left to right:
@ -170,4 +170,152 @@ dd65f0e8 4128379808 C Bo:005:02 0 31 >
* Raw binary format and API
TBD
The overall architecture of the API is about the same as the one above,
only the events are delivered in binary format. Each event is sent in
the following structure (its name is made up, so that we can refer to it):
struct usbmon_packet {
u64 id; /* 0: URB ID - from submission to callback */
unsigned char type; /* 8: Same as text; extensible. */
unsigned char xfer_type; /* ISO (0), Intr, Control, Bulk (3) */
unsigned char epnum; /* Endpoint number and transfer direction */
unsigned char devnum; /* Device address */
u16 busnum; /* 12: Bus number */
char flag_setup; /* 14: Same as text */
char flag_data; /* 15: Same as text; Binary zero is OK. */
s64 ts_sec; /* 16: gettimeofday */
s32 ts_usec; /* 24: gettimeofday */
int status; /* 28: */
unsigned int length; /* 32: Length of data (submitted or actual) */
unsigned int len_cap; /* 36: Delivered length */
unsigned char setup[8]; /* 40: Only for Control 'S' */
}; /* 48 bytes total */
These events can be received from a character device by reading with read(2),
with an ioctl(2), or by accessing the buffer with mmap.
The character device is usually called /dev/usbmonN, where N is the USB bus
number. Number zero (/dev/usbmon0) is special and means "all buses".
However, this feature is not implemented yet. Note that specific naming
policy is set by your Linux distribution.
If you create /dev/usbmon0 by hand, make sure that it is owned by root
and has mode 0600. Otherwise, unpriviledged users will be able to snoop
keyboard traffic.
The following ioctl calls are available, with MON_IOC_MAGIC 0x92:
MON_IOCQ_URB_LEN, defined as _IO(MON_IOC_MAGIC, 1)
This call returns the length of data in the next event. Note that majority of
events contain no data, so if this call returns zero, it does not mean that
no events are available.
MON_IOCG_STATS, defined as _IOR(MON_IOC_MAGIC, 3, struct mon_bin_stats)
The argument is a pointer to the following structure:
struct mon_bin_stats {
u32 queued;
u32 dropped;
};
The member "queued" refers to the number of events currently queued in the
buffer (and not to the number of events processed since the last reset).
The member "dropped" is the number of events lost since the last call
to MON_IOCG_STATS.
MON_IOCT_RING_SIZE, defined as _IO(MON_IOC_MAGIC, 4)
This call sets the buffer size. The argument is the size in bytes.
The size may be rounded down to the next chunk (or page). If the requested
size is out of [unspecified] bounds for this kernel, the call fails with
-EINVAL.
MON_IOCQ_RING_SIZE, defined as _IO(MON_IOC_MAGIC, 5)
This call returns the current size of the buffer in bytes.
MON_IOCX_GET, defined as _IOW(MON_IOC_MAGIC, 6, struct mon_get_arg)
This call waits for events to arrive if none were in the kernel buffer,
then returns the first event. Its argument is a pointer to the following
structure:
struct mon_get_arg {
struct usbmon_packet *hdr;
void *data;
size_t alloc; /* Length of data (can be zero) */
};
Before the call, hdr, data, and alloc should be filled. Upon return, the area
pointed by hdr contains the next event structure, and the data buffer contains
the data, if any. The event is removed from the kernel buffer.
MON_IOCX_MFETCH, defined as _IOWR(MON_IOC_MAGIC, 7, struct mon_mfetch_arg)
This ioctl is primarily used when the application accesses the buffer
with mmap(2). Its argument is a pointer to the following structure:
struct mon_mfetch_arg {
uint32_t *offvec; /* Vector of events fetched */
uint32_t nfetch; /* Number of events to fetch (out: fetched) */
uint32_t nflush; /* Number of events to flush */
};
The ioctl operates in 3 stages.
First, it removes and discards up to nflush events from the kernel buffer.
The actual number of events discarded is returned in nflush.
Second, it waits for an event to be present in the buffer, unless the pseudo-
device is open with O_NONBLOCK.
Third, it extracts up to nfetch offsets into the mmap buffer, and stores
them into the offvec. The actual number of event offsets is stored into
the nfetch.
MON_IOCH_MFLUSH, defined as _IO(MON_IOC_MAGIC, 8)
This call removes a number of events from the kernel buffer. Its argument
is the number of events to remove. If the buffer contains fewer events
than requested, all events present are removed, and no error is reported.
This works when no events are available too.
FIONBIO
The ioctl FIONBIO may be implemented in the future, if there's a need.
In addition to ioctl(2) and read(2), the special file of binary API can
be polled with select(2) and poll(2). But lseek(2) does not work.
* Memory-mapped access of the kernel buffer for the binary API
The basic idea is simple:
To prepare, map the buffer by getting the current size, then using mmap(2).
Then, execute a loop similar to the one written in pseudo-code below:
struct mon_mfetch_arg fetch;
struct usbmon_packet *hdr;
int nflush = 0;
for (;;) {
fetch.offvec = vec; // Has N 32-bit words
fetch.nfetch = N; // Or less than N
fetch.nflush = nflush;
ioctl(fd, MON_IOCX_MFETCH, &fetch); // Process errors, too
nflush = fetch.nfetch; // This many packets to flush when done
for (i = 0; i < nflush; i++) {
hdr = (struct ubsmon_packet *) &mmap_area[vec[i]];
if (hdr->type == '@') // Filler packet
continue;
caddr_t data = &mmap_area[vec[i]] + 64;
process_packet(hdr, data);
}
}
Thus, the main idea is to execute only one ioctl per N events.
Although the buffer is circular, the returned headers and data do not cross
the end of the buffer, so the above pseudo-code does not need any gathering.

View File

@ -0,0 +1,34 @@
Video Output Switcher Control
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
2006 luming.yu@intel.com
The output sysfs class driver provides an abstract video output layer that
can be used to hook platform specific methods to enable/disable video output
device through common sysfs interface. For example, on my IBM ThinkPad T42
laptop, The ACPI video driver registered its output devices and read/write
method for 'state' with output sysfs class. The user interface under sysfs is:
linux:/sys/class/video_output # tree .
.
|-- CRT0
| |-- device -> ../../../devices/pci0000:00/0000:00:01.0
| |-- state
| |-- subsystem -> ../../../class/video_output
| `-- uevent
|-- DVI0
| |-- device -> ../../../devices/pci0000:00/0000:00:01.0
| |-- state
| |-- subsystem -> ../../../class/video_output
| `-- uevent
|-- LCD0
| |-- device -> ../../../devices/pci0000:00/0000:00:01.0
| |-- state
| |-- subsystem -> ../../../class/video_output
| `-- uevent
`-- TV0
|-- device -> ../../../devices/pci0000:00/0000:00:01.0
|-- state
|-- subsystem -> ../../../class/video_output
`-- uevent

View File

@ -584,12 +584,30 @@ W: http://sourceforge.net/projects/acpi4asus
W: http://xf.iksaif.net/acpi4asus
S: Maintained
ASUS LAPTOP EXTRAS DRIVER
P: Corentin Chary
M: corentincj@iksaif.net
L: acpi4asus-user@lists.sourceforge.net
W: http://sourceforge.net/projects/acpi4asus
W: http://xf.iksaif.net/acpi4asus
S: Maintained
ATA OVER ETHERNET DRIVER
P: Ed L. Cashin
M: ecashin@coraid.com
W: http://www.coraid.com/support/linux
S: Supported
ATL1 ETHERNET DRIVER
P: Jay Cliburn
M: jcliburn@gmail.com
P: Chris Snook
M: csnook@redhat.com
L: atl1-devel@lists.sourceforge.net
W: http://sourceforge.net/projects/atl1
W: http://atl1.sourceforge.net
S: Maintained
ATM
P: Chas Williams
M: chas@cmf.nrl.navy.mil
@ -2477,6 +2495,12 @@ L: orinoco-devel@lists.sourceforge.net
W: http://www.nongnu.org/orinoco/
S: Maintained
PA SEMI ETHERNET DRIVER
P: Olof Johansson
M: olof@lixom.net
L: netdev@vger.kernel.org
S: Maintained
PARALLEL PORT SUPPORT
P: Phil Blundell
M: philb@gnu.org
@ -2646,7 +2670,7 @@ S: Supported
PRISM54 WIRELESS DRIVER
P: Prism54 Development Team
M: prism54-private@prism54.org
M: developers@islsm.org
L: netdev@vger.kernel.org
W: http://prism54.org
S: Maintained
@ -2791,7 +2815,7 @@ M: schwidefsky@de.ibm.com
P: Heiko Carstens
M: heiko.carstens@de.ibm.com
M: linux390@de.ibm.com
L: linux-390@vm.marist.edu
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported
@ -2799,7 +2823,7 @@ S390 NETWORK DRIVERS
P: Frank Pavlic
M: fpavlic@de.ibm.com
M: linux390@de.ibm.com
L: linux-390@vm.marist.edu
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported
@ -2807,7 +2831,7 @@ S390 ZFCP DRIVER
P: Swen Schillig
M: swen@vnet.ibm.com
M: linux390@de.ibm.com
L: linux-390@vm.marist.edu
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported
@ -3647,7 +3671,7 @@ S: Maintained
W83L51xD SD/MMC CARD INTERFACE DRIVER
P: Pierre Ossman
M: drzeus-wbsd@drzeus.cx
L: wbsd-devel@list.drzeus.cx
L: linux-kernel@vger.kernel.org
W: http://projects.drzeus.cx/wbsd
S: Maintained

View File

@ -776,7 +776,7 @@ $(vmlinux-dirs): prepare scripts
# $(EXTRAVERSION) eg, -rc6
# $(localver-full)
# $(localver)
# localversion* (all localversion* files)
# localversion* (files without backups, containing '~')
# $(CONFIG_LOCALVERSION) (from kernel config setting)
# $(localver-auto) (only if CONFIG_LOCALVERSION_AUTO is set)
# ./scripts/setlocalversion (SCM tag, if one exists)
@ -787,16 +787,11 @@ $(vmlinux-dirs): prepare scripts
# moment, only git is supported but other SCMs can edit the script
# scripts/setlocalversion and add the appropriate checks as needed.
nullstring :=
space := $(nullstring) # end of line
pattern = ".*/localversion[^~]*"
string = $(shell cat /dev/null \
`find $(objtree) $(srctree) -maxdepth 1 -regex $(pattern) | sort`)
___localver = $(objtree)/localversion* $(srctree)/localversion*
__localver = $(sort $(wildcard $(___localver)))
# skip backup files (containing '~')
_localver = $(foreach f, $(__localver), $(if $(findstring ~, $(f)),,$(f)))
localver = $(subst $(space),, \
$(shell cat /dev/null $(_localver)) \
localver = $(subst $(space),, $(string) \
$(patsubst "%",%,$(CONFIG_LOCALVERSION)))
# If CONFIG_LOCALVERSION_AUTO is set scripts/setlocalversion is called

View File

@ -575,3 +575,7 @@ void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
EXPORT_SYMBOL(pci_iomap);
EXPORT_SYMBOL(pci_iounmap);
/* FIXME: Some boxes have multiple ISA bridges! */
struct pci_dev *isa_bridge;
EXPORT_SYMBOL(isa_bridge);

View File

@ -466,7 +466,8 @@ CONFIG_FW_LOADER=y
#
# Plug and Play support
#
# CONFIG_PNP is not set
CONFIG_PNP=y
CONFIG_PNPACPI=y
#
# Block devices

View File

@ -66,7 +66,7 @@ static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return
#define BAD_MADT_ENTRY(entry, end) ( \
(!entry) || (unsigned long)entry + sizeof(*entry) > end || \
((acpi_table_entry_header *)entry)->length < sizeof(*entry))
((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
#define PREFIX "ACPI: "
@ -79,7 +79,7 @@ int acpi_ioapic;
int acpi_strict;
EXPORT_SYMBOL(acpi_strict);
acpi_interrupt_flags acpi_sci_flags __initdata;
u8 acpi_sci_flags __initdata;
int acpi_sci_override_gsi __initdata;
int acpi_skip_timer_override __initdata;
int acpi_use_timer_override __initdata;
@ -92,11 +92,6 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
#warning ACPI uses CMPXCHG, i486 and later hardware
#endif
#define MAX_MADT_ENTRIES 256
u8 x86_acpiid_to_apicid[MAX_MADT_ENTRIES] =
{[0 ... MAX_MADT_ENTRIES - 1] = 0xff };
EXPORT_SYMBOL(x86_acpiid_to_apicid);
/* --------------------------------------------------------------------------
Boot-time Configuration
-------------------------------------------------------------------------- */
@ -166,30 +161,26 @@ char *__acpi_map_table(unsigned long phys, unsigned long size)
#ifdef CONFIG_PCI_MMCONFIG
/* The physical address of the MMCONFIG aperture. Set from ACPI tables. */
struct acpi_table_mcfg_config *pci_mmcfg_config;
struct acpi_mcfg_allocation *pci_mmcfg_config;
int pci_mmcfg_config_num;
int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size)
int __init acpi_parse_mcfg(struct acpi_table_header *header)
{
struct acpi_table_mcfg *mcfg;
unsigned long i;
int config_size;
if (!phys_addr || !size)
if (!header)
return -EINVAL;
mcfg = (struct acpi_table_mcfg *)__acpi_map_table(phys_addr, size);
if (!mcfg) {
printk(KERN_WARNING PREFIX "Unable to map MCFG\n");
return -ENODEV;
}
mcfg = (struct acpi_table_mcfg *)header;
/* how many config structures do we have */
pci_mmcfg_config_num = 0;
i = size - sizeof(struct acpi_table_mcfg);
while (i >= sizeof(struct acpi_table_mcfg_config)) {
i = header->length - sizeof(struct acpi_table_mcfg);
while (i >= sizeof(struct acpi_mcfg_allocation)) {
++pci_mmcfg_config_num;
i -= sizeof(struct acpi_table_mcfg_config);
i -= sizeof(struct acpi_mcfg_allocation);
};
if (pci_mmcfg_config_num == 0) {
printk(KERN_ERR PREFIX "MMCONFIG has no entries\n");
@ -204,9 +195,9 @@ int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size)
return -ENOMEM;
}
memcpy(pci_mmcfg_config, &mcfg->config, config_size);
memcpy(pci_mmcfg_config, &mcfg[1], config_size);
for (i = 0; i < pci_mmcfg_config_num; ++i) {
if (mcfg->config[i].base_reserved) {
if (pci_mmcfg_config[i].address > 0xFFFFFFFF) {
printk(KERN_ERR PREFIX
"MMCONFIG not in low 4GB of memory\n");
kfree(pci_mmcfg_config);
@ -220,24 +211,24 @@ int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size)
#endif /* CONFIG_PCI_MMCONFIG */
#ifdef CONFIG_X86_LOCAL_APIC
static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
static int __init acpi_parse_madt(struct acpi_table_header *table)
{
struct acpi_table_madt *madt = NULL;
if (!phys_addr || !size || !cpu_has_apic)
if (!cpu_has_apic)
return -EINVAL;
madt = (struct acpi_table_madt *)__acpi_map_table(phys_addr, size);
madt = (struct acpi_table_madt *)table;
if (!madt) {
printk(KERN_WARNING PREFIX "Unable to map MADT\n");
return -ENODEV;
}
if (madt->lapic_address) {
acpi_lapic_addr = (u64) madt->lapic_address;
if (madt->address) {
acpi_lapic_addr = (u64) madt->address;
printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
madt->lapic_address);
madt->address);
}
acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
@ -246,21 +237,17 @@ static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
}
static int __init
acpi_parse_lapic(acpi_table_entry_header * header, const unsigned long end)
acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end)
{
struct acpi_table_lapic *processor = NULL;
struct acpi_madt_local_apic *processor = NULL;
processor = (struct acpi_table_lapic *)header;
processor = (struct acpi_madt_local_apic *)header;
if (BAD_MADT_ENTRY(processor, end))
return -EINVAL;
acpi_table_print_madt_entry(header);
/* Record local apic id only when enabled */
if (processor->flags.enabled)
x86_acpiid_to_apicid[processor->acpi_id] = processor->id;
/*
* We need to register disabled CPU as well to permit
* counting disabled CPUs. This allows us to size
@ -269,18 +256,18 @@ acpi_parse_lapic(acpi_table_entry_header * header, const unsigned long end)
* when we use CPU hotplug.
*/
mp_register_lapic(processor->id, /* APIC ID */
processor->flags.enabled); /* Enabled? */
processor->lapic_flags & ACPI_MADT_ENABLED); /* Enabled? */
return 0;
}
static int __init
acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header,
acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
const unsigned long end)
{
struct acpi_table_lapic_addr_ovr *lapic_addr_ovr = NULL;
struct acpi_madt_local_apic_override *lapic_addr_ovr = NULL;
lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr *)header;
lapic_addr_ovr = (struct acpi_madt_local_apic_override *)header;
if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
return -EINVAL;
@ -291,11 +278,11 @@ acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header,
}
static int __init
acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end)
acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end)
{
struct acpi_table_lapic_nmi *lapic_nmi = NULL;
struct acpi_madt_local_apic_nmi *lapic_nmi = NULL;
lapic_nmi = (struct acpi_table_lapic_nmi *)header;
lapic_nmi = (struct acpi_madt_local_apic_nmi *)header;
if (BAD_MADT_ENTRY(lapic_nmi, end))
return -EINVAL;
@ -313,11 +300,11 @@ acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end)
#ifdef CONFIG_X86_IO_APIC
static int __init
acpi_parse_ioapic(acpi_table_entry_header * header, const unsigned long end)
acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end)
{
struct acpi_table_ioapic *ioapic = NULL;
struct acpi_madt_io_apic *ioapic = NULL;
ioapic = (struct acpi_table_ioapic *)header;
ioapic = (struct acpi_madt_io_apic *)header;
if (BAD_MADT_ENTRY(ioapic, end))
return -EINVAL;
@ -342,11 +329,11 @@ static void __init acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
polarity = 3;
/* Command-line over-ride via acpi_sci= */
if (acpi_sci_flags.trigger)
trigger = acpi_sci_flags.trigger;
if (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK)
trigger = (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2;
if (acpi_sci_flags.polarity)
polarity = acpi_sci_flags.polarity;
if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK)
polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
/*
* mp_config_acpi_legacy_irqs() already setup IRQs < 16
@ -357,51 +344,52 @@ static void __init acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
/*
* stash over-ride to indicate we've been here
* and for later update of acpi_fadt
* and for later update of acpi_gbl_FADT
*/
acpi_sci_override_gsi = gsi;
return;
}
static int __init
acpi_parse_int_src_ovr(acpi_table_entry_header * header,
acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
const unsigned long end)
{
struct acpi_table_int_src_ovr *intsrc = NULL;
struct acpi_madt_interrupt_override *intsrc = NULL;
intsrc = (struct acpi_table_int_src_ovr *)header;
intsrc = (struct acpi_madt_interrupt_override *)header;
if (BAD_MADT_ENTRY(intsrc, end))
return -EINVAL;
acpi_table_print_madt_entry(header);
if (intsrc->bus_irq == acpi_fadt.sci_int) {
if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) {
acpi_sci_ioapic_setup(intsrc->global_irq,
intsrc->flags.polarity,
intsrc->flags.trigger);
intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
(intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2);
return 0;
}
if (acpi_skip_timer_override &&
intsrc->bus_irq == 0 && intsrc->global_irq == 2) {
intsrc->source_irq == 0 && intsrc->global_irq == 2) {
printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
return 0;
}
mp_override_legacy_irq(intsrc->bus_irq,
intsrc->flags.polarity,
intsrc->flags.trigger, intsrc->global_irq);
mp_override_legacy_irq(intsrc->source_irq,
intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
(intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2,
intsrc->global_irq);
return 0;
}
static int __init
acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end)
acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end)
{
struct acpi_table_nmi_src *nmi_src = NULL;
struct acpi_madt_nmi_source *nmi_src = NULL;
nmi_src = (struct acpi_table_nmi_src *)header;
nmi_src = (struct acpi_madt_nmi_source *)header;
if (BAD_MADT_ENTRY(nmi_src, end))
return -EINVAL;
@ -511,7 +499,7 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
struct acpi_table_lapic *lapic;
struct acpi_madt_local_apic *lapic;
cpumask_t tmp_map, new_map;
u8 physid;
int cpu;
@ -529,10 +517,10 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
return -EINVAL;
}
lapic = (struct acpi_table_lapic *)obj->buffer.pointer;
lapic = (struct acpi_madt_local_apic *)obj->buffer.pointer;
if ((lapic->header.type != ACPI_MADT_LAPIC) ||
(!lapic->flags.enabled)) {
if (lapic->header.type != ACPI_MADT_TYPE_LOCAL_APIC ||
!(lapic->lapic_flags & ACPI_MADT_ENABLED)) {
kfree(buffer.pointer);
return -EINVAL;
}
@ -544,7 +532,7 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
buffer.pointer = NULL;
tmp_map = cpu_present_map;
mp_register_lapic(physid, lapic->flags.enabled);
mp_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED);
/*
* If mp_register_lapic successfully generates a new logical cpu
@ -566,14 +554,6 @@ EXPORT_SYMBOL(acpi_map_lsapic);
int acpi_unmap_lsapic(int cpu)
{
int i;
for_each_possible_cpu(i) {
if (x86_acpiid_to_apicid[i] == x86_cpu_to_apicid[cpu]) {
x86_acpiid_to_apicid[i] = -1;
break;
}
}
x86_cpu_to_apicid[cpu] = -1;
cpu_clear(cpu, cpu_present_map);
num_processors--;
@ -619,42 +599,36 @@ acpi_scan_rsdp(unsigned long start, unsigned long length)
return 0;
}
static int __init acpi_parse_sbf(unsigned long phys_addr, unsigned long size)
static int __init acpi_parse_sbf(struct acpi_table_header *table)
{
struct acpi_table_sbf *sb;
struct acpi_table_boot *sb;
if (!phys_addr || !size)
return -EINVAL;
sb = (struct acpi_table_sbf *)__acpi_map_table(phys_addr, size);
sb = (struct acpi_table_boot *)table;
if (!sb) {
printk(KERN_WARNING PREFIX "Unable to map SBF\n");
return -ENODEV;
}
sbf_port = sb->sbf_cmos; /* Save CMOS port */
sbf_port = sb->cmos_index; /* Save CMOS port */
return 0;
}
#ifdef CONFIG_HPET_TIMER
static int __init acpi_parse_hpet(unsigned long phys, unsigned long size)
static int __init acpi_parse_hpet(struct acpi_table_header *table)
{
struct acpi_table_hpet *hpet_tbl;
struct resource *hpet_res;
resource_size_t res_start;
if (!phys || !size)
return -EINVAL;
hpet_tbl = (struct acpi_table_hpet *)__acpi_map_table(phys, size);
hpet_tbl = (struct acpi_table_hpet *)table;
if (!hpet_tbl) {
printk(KERN_WARNING PREFIX "Unable to map HPET\n");
return -ENODEV;
}
if (hpet_tbl->addr.space_id != ACPI_SPACE_MEM) {
if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) {
printk(KERN_WARNING PREFIX "HPET timers must be located in "
"memory.\n");
return -1;
@ -667,13 +641,12 @@ static int __init acpi_parse_hpet(unsigned long phys, unsigned long size)
hpet_res->name = (void *)&hpet_res[1];
hpet_res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE,
"HPET %u", hpet_tbl->number);
"HPET %u", hpet_tbl->sequence);
hpet_res->end = (1 * 1024) - 1;
}
#ifdef CONFIG_X86_64
vxtime.hpet_address = hpet_tbl->addr.addrl |
((long)hpet_tbl->addr.addrh << 32);
vxtime.hpet_address = hpet_tbl->address.address;
printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
hpet_tbl->id, vxtime.hpet_address);
@ -683,7 +656,7 @@ static int __init acpi_parse_hpet(unsigned long phys, unsigned long size)
{
extern unsigned long hpet_address;
hpet_address = hpet_tbl->addr.addrl;
hpet_address = hpet_tbl->address.address;
printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
hpet_tbl->id, hpet_address);
@ -707,42 +680,28 @@ static int __init acpi_parse_hpet(unsigned long phys, unsigned long size)
extern u32 pmtmr_ioport;
#endif
static int __init acpi_parse_fadt(unsigned long phys, unsigned long size)
static int __init acpi_parse_fadt(struct acpi_table_header *table)
{
struct fadt_descriptor *fadt = NULL;
fadt = (struct fadt_descriptor *)__acpi_map_table(phys, size);
if (!fadt) {
printk(KERN_WARNING PREFIX "Unable to map FADT\n");
return 0;
}
/* initialize sci_int early for INT_SRC_OVR MADT parsing */
acpi_fadt.sci_int = fadt->sci_int;
/* initialize rev and apic_phys_dest_mode for x86_64 genapic */
acpi_fadt.revision = fadt->revision;
acpi_fadt.force_apic_physical_destination_mode =
fadt->force_apic_physical_destination_mode;
#ifdef CONFIG_X86_PM_TIMER
/* detect the location of the ACPI PM Timer */
if (fadt->revision >= FADT2_REVISION_ID) {
if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) {
/* FADT rev. 2 */
if (fadt->xpm_tmr_blk.address_space_id !=
if (acpi_gbl_FADT.xpm_timer_block.space_id !=
ACPI_ADR_SPACE_SYSTEM_IO)
return 0;
pmtmr_ioport = fadt->xpm_tmr_blk.address;
pmtmr_ioport = acpi_gbl_FADT.xpm_timer_block.address;
/*
* "X" fields are optional extensions to the original V1.0
* fields, so we must selectively expand V1.0 fields if the
* corresponding X field is zero.
*/
if (!pmtmr_ioport)
pmtmr_ioport = fadt->V1_pm_tmr_blk;
pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
} else {
/* FADT rev. 1 */
pmtmr_ioport = fadt->V1_pm_tmr_blk;
pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
}
if (pmtmr_ioport)
printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n",
@ -790,7 +749,7 @@ static int __init acpi_parse_madt_lapic_entries(void)
*/
count =
acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR,
acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
acpi_parse_lapic_addr_ovr, 0);
if (count < 0) {
printk(KERN_ERR PREFIX
@ -800,7 +759,7 @@ static int __init acpi_parse_madt_lapic_entries(void)
mp_register_lapic_address(acpi_lapic_addr);
count = acpi_table_parse_madt(ACPI_MADT_LAPIC, acpi_parse_lapic,
count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, acpi_parse_lapic,
MAX_APICS);
if (!count) {
printk(KERN_ERR PREFIX "No LAPIC entries present\n");
@ -813,7 +772,7 @@ static int __init acpi_parse_madt_lapic_entries(void)
}
count =
acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0);
acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0);
if (count < 0) {
printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
/* TBD: Cleanup to allow fallback to MPS */
@ -855,7 +814,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
}
count =
acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic,
acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic,
MAX_IO_APICS);
if (!count) {
printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
@ -866,7 +825,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
}
count =
acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr,
acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr,
NR_IRQ_VECTORS);
if (count < 0) {
printk(KERN_ERR PREFIX
@ -880,13 +839,13 @@ static int __init acpi_parse_madt_ioapic_entries(void)
* pretend we got one so we can set the SCI flags.
*/
if (!acpi_sci_override_gsi)
acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0);
acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0);
/* Fill in identity legacy mapings where no override */
mp_config_acpi_legacy_irqs();
count =
acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src,
acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src,
NR_IRQ_VECTORS);
if (count < 0) {
printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
@ -908,7 +867,7 @@ static void __init acpi_process_madt(void)
#ifdef CONFIG_X86_LOCAL_APIC
int count, error;
count = acpi_table_parse(ACPI_APIC, acpi_parse_madt);
count = acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt);
if (count >= 1) {
/*
@ -1204,7 +1163,7 @@ int __init acpi_boot_table_init(void)
return error;
}
acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
/*
* blacklist may disable ACPI entirely
@ -1232,19 +1191,19 @@ int __init acpi_boot_init(void)
if (acpi_disabled && !acpi_ht)
return 1;
acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
/*
* set sci_int and PM timer address
*/
acpi_table_parse(ACPI_FADT, acpi_parse_fadt);
acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt);
/*
* Process the Multiple APIC Description Table (MADT), if present
*/
acpi_process_madt();
acpi_table_parse(ACPI_HPET, acpi_parse_hpet);
acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet);
return 0;
}
@ -1315,13 +1274,17 @@ static int __init setup_acpi_sci(char *s)
if (!s)
return -EINVAL;
if (!strcmp(s, "edge"))
acpi_sci_flags.trigger = 1;
acpi_sci_flags = ACPI_MADT_TRIGGER_EDGE |
(acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
else if (!strcmp(s, "level"))
acpi_sci_flags.trigger = 3;
acpi_sci_flags = ACPI_MADT_TRIGGER_LEVEL |
(acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
else if (!strcmp(s, "high"))
acpi_sci_flags.polarity = 1;
acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_HIGH |
(acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
else if (!strcmp(s, "low"))
acpi_sci_flags.polarity = 3;
acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_LOW |
(acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
else
return -EINVAL;
return 0;

View File

@ -16,7 +16,7 @@
static int nvidia_hpet_detected __initdata;
static int __init nvidia_hpet_check(unsigned long phys, unsigned long size)
static int __init nvidia_hpet_check(struct acpi_table_header *header)
{
nvidia_hpet_detected = 1;
return 0;
@ -30,7 +30,7 @@ static int __init check_bridge(int vendor, int device)
is enabled. */
if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) {
nvidia_hpet_detected = 0;
acpi_table_parse(ACPI_HPET, nvidia_hpet_check);
acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check);
if (nvidia_hpet_detected == 0) {
acpi_skip_timer_override = 1;
printk(KERN_INFO "Nvidia board "

View File

@ -190,7 +190,7 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index)
/* Invoke C3 */
inb(cx_address);
/* Dummy op - must do something useless after P_LVL3 read */
t = inl(acpi_fadt.xpm_tmr_blk.address);
t = inl(acpi_gbl_FADT.xpm_timer_block.address);
}
/* Disable bus ratio bit */
local_irq_disable();
@ -250,8 +250,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
outb(3, 0x22);
} else if ((pr != NULL) && pr->flags.bm_control) {
/* Disable bus master arbitration */
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1,
ACPI_MTX_DO_NOT_LOCK);
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
}
switch (longhaul_version) {
@ -281,8 +280,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
case TYPE_POWERSAVER:
if (longhaul_flags & USE_ACPI_C3) {
/* Don't allow wakeup */
acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0,
ACPI_MTX_DO_NOT_LOCK);
acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
do_powersaver(cx->address, clock_ratio_index);
} else {
do_powersaver(0, clock_ratio_index);
@ -295,8 +293,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
outb(0, 0x22);
} else if ((pr != NULL) && pr->flags.bm_control) {
/* Enable bus master arbitration */
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0,
ACPI_MTX_DO_NOT_LOCK);
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
}
outb(pic2_mask,0xA1); /* restore mask */
outb(pic1_mask,0x21);

View File

@ -2606,25 +2606,32 @@ static struct irq_chip msi_chip = {
.retrigger = ioapic_retrigger_irq,
};
int arch_setup_msi_irq(unsigned int irq, struct pci_dev *dev)
int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
{
struct msi_msg msg;
int ret;
int irq, ret;
irq = create_irq();
if (irq < 0)
return irq;
set_irq_msi(irq, desc);
ret = msi_compose_msg(dev, irq, &msg);
if (ret < 0)
if (ret < 0) {
destroy_irq(irq);
return ret;
}
write_msi_msg(irq, &msg);
set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq,
"edge");
return 0;
return irq;
}
void arch_teardown_msi_irq(unsigned int irq)
{
return;
destroy_irq(irq);
}
#endif /* CONFIG_PCI_MSI */

View File

@ -1057,7 +1057,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
static int gsi_to_irq[MAX_GSI_NUM];
/* Don't set up the ACPI SCI because it's already set up */
if (acpi_fadt.sci_int == gsi)
if (acpi_gbl_FADT.sci_interrupt == gsi)
return gsi;
ioapic = mp_find_ioapic(gsi);
@ -1114,7 +1114,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
/*
* Don't assign IRQ used by ACPI SCI
*/
if (gsi == acpi_fadt.sci_int)
if (gsi == acpi_gbl_FADT.sci_interrupt)
gsi = pci_irq++;
gsi_to_irq[irq] = gsi;
} else {

View File

@ -62,19 +62,19 @@ extern void * boot_ioremap(unsigned long, unsigned long);
/* Identify CPU proximity domains */
static void __init parse_cpu_affinity_structure(char *p)
{
struct acpi_table_processor_affinity *cpu_affinity =
(struct acpi_table_processor_affinity *) p;
struct acpi_srat_cpu_affinity *cpu_affinity =
(struct acpi_srat_cpu_affinity *) p;
if (!cpu_affinity->flags.enabled)
if ((cpu_affinity->flags & ACPI_SRAT_CPU_ENABLED) == 0)
return; /* empty entry */
/* mark this node as "seen" in node bitmap */
BMAP_SET(pxm_bitmap, cpu_affinity->proximity_domain);
BMAP_SET(pxm_bitmap, cpu_affinity->proximity_domain_lo);
apicid_to_pxm[cpu_affinity->apic_id] = cpu_affinity->proximity_domain;
apicid_to_pxm[cpu_affinity->apic_id] = cpu_affinity->proximity_domain_lo;
printk("CPU 0x%02X in proximity domain 0x%02X\n",
cpu_affinity->apic_id, cpu_affinity->proximity_domain);
cpu_affinity->apic_id, cpu_affinity->proximity_domain_lo);
}
/*
@ -87,25 +87,24 @@ static void __init parse_memory_affinity_structure (char *sratp)
unsigned long start_pfn, end_pfn;
u8 pxm;
struct node_memory_chunk_s *p, *q, *pend;
struct acpi_table_memory_affinity *memory_affinity =
(struct acpi_table_memory_affinity *) sratp;
struct acpi_srat_mem_affinity *memory_affinity =
(struct acpi_srat_mem_affinity *) sratp;
if (!memory_affinity->flags.enabled)
if ((memory_affinity->flags & ACPI_SRAT_MEM_ENABLED) == 0)
return; /* empty entry */
pxm = memory_affinity->proximity_domain & 0xff;
/* mark this node as "seen" in node bitmap */
BMAP_SET(pxm_bitmap, memory_affinity->proximity_domain);
BMAP_SET(pxm_bitmap, pxm);
/* calculate info for memory chunk structure */
paddr = memory_affinity->base_addr_hi;
paddr = (paddr << 32) | memory_affinity->base_addr_lo;
size = memory_affinity->length_hi;
size = (size << 32) | memory_affinity->length_lo;
paddr = memory_affinity->base_address;
size = memory_affinity->length;
start_pfn = paddr >> PAGE_SHIFT;
end_pfn = (paddr + size) >> PAGE_SHIFT;
pxm = memory_affinity->proximity_domain;
if (num_memory_chunks >= MAXCHUNKS) {
printk("Too many mem chunks in SRAT. Ignoring %lld MBytes at %llx\n",
@ -132,8 +131,8 @@ static void __init parse_memory_affinity_structure (char *sratp)
printk("Memory range 0x%lX to 0x%lX (type 0x%X) in proximity domain 0x%02X %s\n",
start_pfn, end_pfn,
memory_affinity->memory_type,
memory_affinity->proximity_domain,
(memory_affinity->flags.hot_pluggable ?
pxm,
((memory_affinity->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) ?
"enabled and removable" : "enabled" ) );
}
@ -185,10 +184,10 @@ static int __init acpi20_parse_srat(struct acpi_table_srat *sratp)
num_memory_chunks = 0;
while (p < end) {
switch (*p) {
case ACPI_SRAT_PROCESSOR_AFFINITY:
case ACPI_SRAT_TYPE_CPU_AFFINITY:
parse_cpu_affinity_structure(p);
break;
case ACPI_SRAT_MEMORY_AFFINITY:
case ACPI_SRAT_TYPE_MEMORY_AFFINITY:
parse_memory_affinity_structure(p);
break;
default:
@ -262,31 +261,30 @@ out_fail:
return 0;
}
struct acpi_static_rsdt {
struct acpi_table_rsdt table;
u32 padding[7]; /* Allow for 7 more table entries */
};
int __init get_memcfg_from_srat(void)
{
struct acpi_table_header *header = NULL;
struct acpi_table_rsdp *rsdp = NULL;
struct acpi_table_rsdt *rsdt = NULL;
struct acpi_pointer *rsdp_address = NULL;
struct acpi_table_rsdt saved_rsdt;
acpi_native_uint rsdp_address = 0;
struct acpi_static_rsdt saved_rsdt;
int tables = 0;
int i = 0;
if (ACPI_FAILURE(acpi_find_root_pointer(ACPI_PHYSICAL_ADDRESSING,
rsdp_address))) {
rsdp_address = acpi_find_rsdp();
if (!rsdp_address) {
printk("%s: System description tables not found\n",
__FUNCTION__);
goto out_err;
}
if (rsdp_address->pointer_type == ACPI_PHYSICAL_POINTER) {
printk("%s: assigning address to rsdp\n", __FUNCTION__);
rsdp = (struct acpi_table_rsdp *)
(u32)rsdp_address->pointer.physical;
} else {
printk("%s: rsdp_address is not a physical pointer\n", __FUNCTION__);
goto out_err;
}
rsdp = (struct acpi_table_rsdp *)(u32)rsdp_address;
if (!rsdp) {
printk("%s: Didn't find ACPI root!\n", __FUNCTION__);
goto out_err;
@ -295,13 +293,13 @@ int __init get_memcfg_from_srat(void)
printk(KERN_INFO "%.8s v%d [%.6s]\n", rsdp->signature, rsdp->revision,
rsdp->oem_id);
if (strncmp(rsdp->signature, RSDP_SIG,strlen(RSDP_SIG))) {
if (strncmp(rsdp->signature, ACPI_SIG_RSDP,strlen(ACPI_SIG_RSDP))) {
printk(KERN_WARNING "%s: RSDP table signature incorrect\n", __FUNCTION__);
goto out_err;
}
rsdt = (struct acpi_table_rsdt *)
boot_ioremap(rsdp->rsdt_address, sizeof(struct acpi_table_rsdt));
boot_ioremap(rsdp->rsdt_physical_address, sizeof(struct acpi_table_rsdt));
if (!rsdt) {
printk(KERN_WARNING
@ -310,9 +308,9 @@ int __init get_memcfg_from_srat(void)
goto out_err;
}
header = & rsdt->header;
header = &rsdt->header;
if (strncmp(header->signature, RSDT_SIG, strlen(RSDT_SIG))) {
if (strncmp(header->signature, ACPI_SIG_RSDT, strlen(ACPI_SIG_RSDT))) {
printk(KERN_WARNING "ACPI: RSDT signature incorrect\n");
goto out_err;
}
@ -330,9 +328,9 @@ int __init get_memcfg_from_srat(void)
memcpy(&saved_rsdt, rsdt, sizeof(saved_rsdt));
if (saved_rsdt.header.length > sizeof(saved_rsdt)) {
if (saved_rsdt.table.header.length > sizeof(saved_rsdt)) {
printk(KERN_WARNING "ACPI: Too big length in RSDT: %d\n",
saved_rsdt.header.length);
saved_rsdt.table.header.length);
goto out_err;
}
@ -341,15 +339,15 @@ int __init get_memcfg_from_srat(void)
for (i = 0; i < tables; i++) {
/* Map in header, then map in full table length. */
header = (struct acpi_table_header *)
boot_ioremap(saved_rsdt.entry[i], sizeof(struct acpi_table_header));
boot_ioremap(saved_rsdt.table.table_offset_entry[i], sizeof(struct acpi_table_header));
if (!header)
break;
header = (struct acpi_table_header *)
boot_ioremap(saved_rsdt.entry[i], header->length);
boot_ioremap(saved_rsdt.table.table_offset_entry[i], header->length);
if (!header)
break;
if (strncmp((char *) &header->signature, "SRAT", 4))
if (strncmp((char *) &header->signature, ACPI_SIG_SRAT, 4))
continue;
/* we've found the srat table. don't need to look at any more tables */

View File

@ -84,15 +84,6 @@ struct es7000_oem_table {
};
#ifdef CONFIG_ACPI
struct acpi_table_sdt {
unsigned long pa;
unsigned long count;
struct {
unsigned long pa;
enum acpi_table_id id;
unsigned long size;
} entry[50];
};
struct oem_table {
struct acpi_table_header Header;

View File

@ -160,53 +160,16 @@ parse_unisys_oem (char *oemptr)
int __init
find_unisys_acpi_oem_table(unsigned long *oem_addr)
{
struct acpi_table_rsdp *rsdp = NULL;
unsigned long rsdp_phys = 0;
struct acpi_table_header *header = NULL;
int i;
struct acpi_table_sdt sdt;
rsdp_phys = acpi_find_rsdp();
rsdp = __va(rsdp_phys);
if (rsdp->rsdt_address) {
struct acpi_table_rsdt *mapped_rsdt = NULL;
sdt.pa = rsdp->rsdt_address;
header = (struct acpi_table_header *)
__acpi_map_table(sdt.pa, sizeof(struct acpi_table_header));
if (!header)
return -ENODEV;
sdt.count = (header->length - sizeof(struct acpi_table_header)) >> 3;
mapped_rsdt = (struct acpi_table_rsdt *)
__acpi_map_table(sdt.pa, header->length);
if (!mapped_rsdt)
return -ENODEV;
header = &mapped_rsdt->header;
for (i = 0; i < sdt.count; i++)
sdt.entry[i].pa = (unsigned long) mapped_rsdt->entry[i];
};
for (i = 0; i < sdt.count; i++) {
header = (struct acpi_table_header *)
__acpi_map_table(sdt.entry[i].pa,
sizeof(struct acpi_table_header));
if (!header)
continue;
if (!strncmp((char *) &header->signature, "OEM1", 4)) {
if (!strncmp((char *) &header->oem_id, "UNISYS", 6)) {
void *addr;
struct oem_table *t;
acpi_table_print(header, sdt.entry[i].pa);
t = (struct oem_table *) __acpi_map_table(sdt.entry[i].pa, header->length);
addr = (void *) __acpi_map_table(t->OEMTableAddr, t->OEMTableSize);
*oem_addr = (unsigned long) addr;
int i = 0;
while (ACPI_SUCCESS(acpi_get_table("OEM1", i++, &header))) {
if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) {
struct oem_table *t = (struct oem_table *)header;
*oem_addr = (unsigned long)__acpi_map_table(t->OEMTableAddr,
t->OEMTableSize);
return 0;
}
}
}
return -1;
}
#endif

View File

@ -36,7 +36,7 @@ static DECLARE_BITMAP(fallback_slots, MAX_CHECK_BUS*32);
static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
{
int cfg_num = -1;
struct acpi_table_mcfg_config *cfg;
struct acpi_mcfg_allocation *cfg;
if (seg == 0 && bus < MAX_CHECK_BUS &&
test_bit(PCI_SLOT(devfn) + 32*bus, fallback_slots))
@ -48,11 +48,11 @@ static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
break;
}
cfg = &pci_mmcfg_config[cfg_num];
if (cfg->pci_segment_group_number != seg)
if (cfg->pci_segment != seg)
continue;
if ((cfg->start_bus_number <= bus) &&
(cfg->end_bus_number >= bus))
return cfg->base_address;
return cfg->address;
}
/* Handle more broken MCFG tables on Asus etc.
@ -60,9 +60,9 @@ static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
this applies to all busses. */
cfg = &pci_mmcfg_config[0];
if (pci_mmcfg_config_num == 1 &&
cfg->pci_segment_group_number == 0 &&
cfg->pci_segment == 0 &&
(cfg->start_bus_number | cfg->end_bus_number) == 0)
return cfg->base_address;
return cfg->address;
/* Fall back to type 0 */
return 0;
@ -199,19 +199,19 @@ void __init pci_mmcfg_init(int type)
if ((pci_probe & PCI_PROBE_MMCONF) == 0)
return;
acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg);
acpi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg);
if ((pci_mmcfg_config_num == 0) ||
(pci_mmcfg_config == NULL) ||
(pci_mmcfg_config[0].base_address == 0))
(pci_mmcfg_config[0].address == 0))
return;
/* Only do this check when type 1 works. If it doesn't work
assume we run on a Mac and always use MCFG */
if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].base_address,
pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN,
if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].address,
pci_mmcfg_config[0].address + MMCONFIG_APER_MIN,
E820_RESERVED)) {
printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n",
pci_mmcfg_config[0].base_address);
printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %lx is not E820-reserved\n",
(unsigned long)pci_mmcfg_config[0].address);
printk(KERN_ERR "PCI: Not using MMCONFIG.\n");
return;
}

View File

@ -11,6 +11,8 @@ menu "Processor type and features"
config IA64
bool
select PCI if (!IA64_HP_SIM)
select ACPI if (!IA64_HP_SIM)
default y
help
The Itanium Processor Family is Intel's 64-bit successor to
@ -28,7 +30,6 @@ config MMU
config SWIOTLB
bool
default y
config RWSEM_XCHGADD_ALGORITHM
bool
@ -84,10 +85,9 @@ choice
config IA64_GENERIC
bool "generic"
select ACPI
select PCI
select NUMA
select ACPI_NUMA
select SWIOTLB
help
This selects the system type of your hardware. A "generic" kernel
will run on any supported IA-64 system. However, if you configure
@ -104,6 +104,7 @@ config IA64_GENERIC
config IA64_DIG
bool "DIG-compliant"
select SWIOTLB
config IA64_HP_ZX1
bool "HP-zx1/sx1000"
@ -113,6 +114,7 @@ config IA64_HP_ZX1
config IA64_HP_ZX1_SWIOTLB
bool "HP-zx1/sx1000 with software I/O TLB"
select SWIOTLB
help
Build a kernel that runs on HP zx1 and sx1000 systems even when they
have broken PCI devices which cannot DMA to full 32 bits. Apart
@ -131,6 +133,7 @@ config IA64_SGI_SN2
config IA64_HP_SIM
bool "Ski-simulator"
select SWIOTLB
endchoice

View File

@ -192,3 +192,7 @@ EXPORT_SYMBOL(hwsw_unmap_sg);
EXPORT_SYMBOL(hwsw_dma_supported);
EXPORT_SYMBOL(hwsw_alloc_coherent);
EXPORT_SYMBOL(hwsw_free_coherent);
EXPORT_SYMBOL(hwsw_sync_single_for_cpu);
EXPORT_SYMBOL(hwsw_sync_single_for_device);
EXPORT_SYMBOL(hwsw_sync_sg_for_cpu);
EXPORT_SYMBOL(hwsw_sync_sg_for_device);

View File

@ -55,7 +55,7 @@
#define BAD_MADT_ENTRY(entry, end) ( \
(!entry) || (unsigned long)entry + sizeof(*entry) > end || \
((acpi_table_entry_header *)entry)->length < sizeof(*entry))
((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
#define PREFIX "ACPI: "
@ -67,16 +67,11 @@ EXPORT_SYMBOL(pm_power_off);
unsigned int acpi_cpei_override;
unsigned int acpi_cpei_phys_cpuid;
#define MAX_SAPICS 256
u16 ia64_acpiid_to_sapicid[MAX_SAPICS] = {[0 ... MAX_SAPICS - 1] = -1 };
EXPORT_SYMBOL(ia64_acpiid_to_sapicid);
const char *acpi_get_sysname(void)
{
#ifdef CONFIG_IA64_GENERIC
unsigned long rsdp_phys;
struct acpi20_table_rsdp *rsdp;
struct acpi_table_rsdp *rsdp;
struct acpi_table_xsdt *xsdt;
struct acpi_table_header *hdr;
@ -87,16 +82,16 @@ const char *acpi_get_sysname(void)
return "dig";
}
rsdp = (struct acpi20_table_rsdp *)__va(rsdp_phys);
if (strncmp(rsdp->signature, RSDP_SIG, sizeof(RSDP_SIG) - 1)) {
rsdp = (struct acpi_table_rsdp *)__va(rsdp_phys);
if (strncmp(rsdp->signature, ACPI_SIG_RSDP, sizeof(ACPI_SIG_RSDP) - 1)) {
printk(KERN_ERR
"ACPI 2.0 RSDP signature incorrect, default to \"dig\"\n");
return "dig";
}
xsdt = (struct acpi_table_xsdt *)__va(rsdp->xsdt_address);
xsdt = (struct acpi_table_xsdt *)__va(rsdp->xsdt_physical_address);
hdr = &xsdt->header;
if (strncmp(hdr->signature, XSDT_SIG, sizeof(XSDT_SIG) - 1)) {
if (strncmp(hdr->signature, ACPI_SIG_XSDT, sizeof(ACPI_SIG_XSDT) - 1)) {
printk(KERN_ERR
"ACPI 2.0 XSDT signature incorrect, default to \"dig\"\n");
return "dig";
@ -169,12 +164,12 @@ struct acpi_table_madt *acpi_madt __initdata;
static u8 has_8259;
static int __init
acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header,
acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
const unsigned long end)
{
struct acpi_table_lapic_addr_ovr *lapic;
struct acpi_madt_local_apic_override *lapic;
lapic = (struct acpi_table_lapic_addr_ovr *)header;
lapic = (struct acpi_madt_local_apic_override *)header;
if (BAD_MADT_ENTRY(lapic, end))
return -EINVAL;
@ -187,22 +182,19 @@ acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header,
}
static int __init
acpi_parse_lsapic(acpi_table_entry_header * header, const unsigned long end)
acpi_parse_lsapic(struct acpi_subtable_header * header, const unsigned long end)
{
struct acpi_table_lsapic *lsapic;
struct acpi_madt_local_sapic *lsapic;
lsapic = (struct acpi_table_lsapic *)header;
lsapic = (struct acpi_madt_local_sapic *)header;
if (BAD_MADT_ENTRY(lsapic, end))
return -EINVAL;
/*Skip BAD_MADT_ENTRY check, as lsapic size could vary */
if (lsapic->flags.enabled) {
if (lsapic->lapic_flags & ACPI_MADT_ENABLED) {
#ifdef CONFIG_SMP
smp_boot_data.cpu_phys_id[available_cpus] =
(lsapic->id << 8) | lsapic->eid;
#endif
ia64_acpiid_to_sapicid[lsapic->acpi_id] =
(lsapic->id << 8) | lsapic->eid;
++available_cpus;
}
@ -211,11 +203,11 @@ acpi_parse_lsapic(acpi_table_entry_header * header, const unsigned long end)
}
static int __init
acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end)
acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end)
{
struct acpi_table_lapic_nmi *lacpi_nmi;
struct acpi_madt_local_apic_nmi *lacpi_nmi;
lacpi_nmi = (struct acpi_table_lapic_nmi *)header;
lacpi_nmi = (struct acpi_madt_local_apic_nmi *)header;
if (BAD_MADT_ENTRY(lacpi_nmi, end))
return -EINVAL;
@ -225,11 +217,11 @@ acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end)
}
static int __init
acpi_parse_iosapic(acpi_table_entry_header * header, const unsigned long end)
acpi_parse_iosapic(struct acpi_subtable_header * header, const unsigned long end)
{
struct acpi_table_iosapic *iosapic;
struct acpi_madt_io_sapic *iosapic;
iosapic = (struct acpi_table_iosapic *)header;
iosapic = (struct acpi_madt_io_sapic *)header;
if (BAD_MADT_ENTRY(iosapic, end))
return -EINVAL;
@ -240,13 +232,13 @@ acpi_parse_iosapic(acpi_table_entry_header * header, const unsigned long end)
static unsigned int __initdata acpi_madt_rev;
static int __init
acpi_parse_plat_int_src(acpi_table_entry_header * header,
acpi_parse_plat_int_src(struct acpi_subtable_header * header,
const unsigned long end)
{
struct acpi_table_plat_int_src *plintsrc;
struct acpi_madt_interrupt_source *plintsrc;
int vector;
plintsrc = (struct acpi_table_plat_int_src *)header;
plintsrc = (struct acpi_madt_interrupt_source *)header;
if (BAD_MADT_ENTRY(plintsrc, end))
return -EINVAL;
@ -257,19 +249,19 @@ acpi_parse_plat_int_src(acpi_table_entry_header * header,
*/
vector = iosapic_register_platform_intr(plintsrc->type,
plintsrc->global_irq,
plintsrc->iosapic_vector,
plintsrc->io_sapic_vector,
plintsrc->eid,
plintsrc->id,
(plintsrc->flags.polarity ==
1) ? IOSAPIC_POL_HIGH :
IOSAPIC_POL_LOW,
(plintsrc->flags.trigger ==
1) ? IOSAPIC_EDGE :
IOSAPIC_LEVEL);
((plintsrc->inti_flags & ACPI_MADT_POLARITY_MASK) ==
ACPI_MADT_POLARITY_ACTIVE_HIGH) ?
IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
((plintsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) ==
ACPI_MADT_TRIGGER_EDGE) ?
IOSAPIC_EDGE : IOSAPIC_LEVEL);
platform_intr_list[plintsrc->type] = vector;
if (acpi_madt_rev > 1) {
acpi_cpei_override = plintsrc->plint_flags.cpei_override_flag;
acpi_cpei_override = plintsrc->flags & ACPI_MADT_CPEI_OVERRIDE;
}
/*
@ -324,30 +316,32 @@ unsigned int get_cpei_target_cpu(void)
}
static int __init
acpi_parse_int_src_ovr(acpi_table_entry_header * header,
acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
const unsigned long end)
{
struct acpi_table_int_src_ovr *p;
struct acpi_madt_interrupt_override *p;
p = (struct acpi_table_int_src_ovr *)header;
p = (struct acpi_madt_interrupt_override *)header;
if (BAD_MADT_ENTRY(p, end))
return -EINVAL;
iosapic_override_isa_irq(p->bus_irq, p->global_irq,
(p->flags.polarity ==
1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
(p->flags.trigger ==
1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL);
iosapic_override_isa_irq(p->source_irq, p->global_irq,
((p->inti_flags & ACPI_MADT_POLARITY_MASK) ==
ACPI_MADT_POLARITY_ACTIVE_HIGH) ?
IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
((p->inti_flags & ACPI_MADT_TRIGGER_MASK) ==
ACPI_MADT_TRIGGER_EDGE) ?
IOSAPIC_EDGE : IOSAPIC_LEVEL);
return 0;
}
static int __init
acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end)
acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end)
{
struct acpi_table_nmi_src *nmi_src;
struct acpi_madt_nmi_source *nmi_src;
nmi_src = (struct acpi_table_nmi_src *)header;
nmi_src = (struct acpi_madt_nmi_source *)header;
if (BAD_MADT_ENTRY(nmi_src, end))
return -EINVAL;
@ -371,12 +365,12 @@ static void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
}
}
static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
static int __init acpi_parse_madt(struct acpi_table_header *table)
{
if (!phys_addr || !size)
if (!table)
return -EINVAL;
acpi_madt = (struct acpi_table_madt *)__va(phys_addr);
acpi_madt = (struct acpi_table_madt *)table;
acpi_madt_rev = acpi_madt->header.revision;
@ -384,14 +378,14 @@ static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
#ifdef CONFIG_ITANIUM
has_8259 = 1; /* Firmware on old Itanium systems is broken */
#else
has_8259 = acpi_madt->flags.pcat_compat;
has_8259 = acpi_madt->flags & ACPI_MADT_PCAT_COMPAT;
#endif
iosapic_system_init(has_8259);
/* Get base address of IPI Message Block */
if (acpi_madt->lapic_address)
ipi_base_addr = ioremap(acpi_madt->lapic_address, 0);
if (acpi_madt->address)
ipi_base_addr = ioremap(acpi_madt->address, 0);
printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr);
@ -413,23 +407,24 @@ static u32 __devinitdata pxm_flag[PXM_FLAG_LEN];
#define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag))
static struct acpi_table_slit __initdata *slit_table;
static int get_processor_proximity_domain(struct acpi_table_processor_affinity *pa)
static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
{
int pxm;
pxm = pa->proximity_domain;
pxm = pa->proximity_domain_lo;
if (ia64_platform_is("sn2"))
pxm += pa->reserved[0] << 8;
pxm += pa->proximity_domain_hi[0] << 8;
return pxm;
}
static int get_memory_proximity_domain(struct acpi_table_memory_affinity *ma)
static int get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
{
int pxm;
pxm = ma->proximity_domain;
if (ia64_platform_is("sn2"))
pxm += ma->reserved1[0] << 8;
if (!ia64_platform_is("sn2"))
pxm &= 0xff;
return pxm;
}
@ -442,7 +437,7 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
u32 len;
len = sizeof(struct acpi_table_header) + 8
+ slit->localities * slit->localities;
+ slit->locality_count * slit->locality_count;
if (slit->header.length != len) {
printk(KERN_ERR
"ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n",
@ -454,11 +449,11 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
}
void __init
acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
{
int pxm;
if (!pa->flags.enabled)
if (!(pa->flags & ACPI_SRAT_CPU_ENABLED))
return;
pxm = get_processor_proximity_domain(pa);
@ -467,14 +462,14 @@ acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
pxm_bit_set(pxm);
node_cpuid[srat_num_cpus].phys_id =
(pa->apic_id << 8) | (pa->lsapic_eid);
(pa->apic_id << 8) | (pa->local_sapic_eid);
/* nid should be overridden as logical node id later */
node_cpuid[srat_num_cpus].nid = pxm;
srat_num_cpus++;
}
void __init
acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
{
unsigned long paddr, size;
int pxm;
@ -483,13 +478,11 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
pxm = get_memory_proximity_domain(ma);
/* fill node memory chunk structure */
paddr = ma->base_addr_hi;
paddr = (paddr << 32) | ma->base_addr_lo;
size = ma->length_hi;
size = (size << 32) | ma->length_lo;
paddr = ma->base_address;
size = ma->length;
/* Ignore disabled entries */
if (!ma->flags.enabled)
if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
return;
/* record this node in proximity bitmap */
@ -560,16 +553,16 @@ void __init acpi_numa_arch_fixup(void)
if (!slit_table)
return;
memset(numa_slit, -1, sizeof(numa_slit));
for (i = 0; i < slit_table->localities; i++) {
for (i = 0; i < slit_table->locality_count; i++) {
if (!pxm_bit_test(i))
continue;
node_from = pxm_to_node(i);
for (j = 0; j < slit_table->localities; j++) {
for (j = 0; j < slit_table->locality_count; j++) {
if (!pxm_bit_test(j))
continue;
node_to = pxm_to_node(j);
node_distance(node_from, node_to) =
slit_table->entry[i * slit_table->localities + j];
slit_table->entry[i * slit_table->locality_count + j];
}
}
@ -617,21 +610,21 @@ void acpi_unregister_gsi(u32 gsi)
EXPORT_SYMBOL(acpi_unregister_gsi);
static int __init acpi_parse_fadt(unsigned long phys_addr, unsigned long size)
static int __init acpi_parse_fadt(struct acpi_table_header *table)
{
struct acpi_table_header *fadt_header;
struct fadt_descriptor *fadt;
struct acpi_table_fadt *fadt;
if (!phys_addr || !size)
if (!table)
return -EINVAL;
fadt_header = (struct acpi_table_header *)__va(phys_addr);
fadt_header = (struct acpi_table_header *)table;
if (fadt_header->revision != 3)
return -ENODEV; /* Only deal with ACPI 2.0 FADT */
fadt = (struct fadt_descriptor *)fadt_header;
fadt = (struct acpi_table_fadt *)fadt_header;
acpi_register_gsi(fadt->sci_int, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW);
acpi_register_gsi(fadt->sci_interrupt, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW);
return 0;
}
@ -658,7 +651,7 @@ int __init acpi_boot_init(void)
* information -- the successor to MPS tables.
*/
if (acpi_table_parse(ACPI_APIC, acpi_parse_madt) < 1) {
if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt) < 1) {
printk(KERN_ERR PREFIX "Can't find MADT\n");
goto skip_madt;
}
@ -666,40 +659,40 @@ int __init acpi_boot_init(void)
/* Local APIC */
if (acpi_table_parse_madt
(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr, 0) < 0)
(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, acpi_parse_lapic_addr_ovr, 0) < 0)
printk(KERN_ERR PREFIX
"Error parsing LAPIC address override entry\n");
if (acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_parse_lsapic, NR_CPUS)
if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, acpi_parse_lsapic, NR_CPUS)
< 1)
printk(KERN_ERR PREFIX
"Error parsing MADT - no LAPIC entries\n");
if (acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0)
if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0)
< 0)
printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
/* I/O APIC */
if (acpi_table_parse_madt
(ACPI_MADT_IOSAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1)
(ACPI_MADT_TYPE_IO_SAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1)
printk(KERN_ERR PREFIX
"Error parsing MADT - no IOSAPIC entries\n");
/* System-Level Interrupt Routing */
if (acpi_table_parse_madt
(ACPI_MADT_PLAT_INT_SRC, acpi_parse_plat_int_src,
(ACPI_MADT_TYPE_INTERRUPT_SOURCE, acpi_parse_plat_int_src,
ACPI_MAX_PLATFORM_INTERRUPTS) < 0)
printk(KERN_ERR PREFIX
"Error parsing platform interrupt source entry\n");
if (acpi_table_parse_madt
(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr, 0) < 0)
(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr, 0) < 0)
printk(KERN_ERR PREFIX
"Error parsing interrupt source overrides entry\n");
if (acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src, 0) < 0)
if (acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src, 0) < 0)
printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
skip_madt:
@ -709,7 +702,7 @@ int __init acpi_boot_init(void)
* gets interrupts such as power and sleep buttons. If it's not
* on a Legacy interrupt, it needs to be setup.
*/
if (acpi_table_parse(ACPI_FADT, acpi_parse_fadt) < 1)
if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt) < 1)
printk(KERN_ERR PREFIX "Can't find FADT\n");
#ifdef CONFIG_SMP
@ -842,7 +835,7 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
struct acpi_table_lsapic *lsapic;
struct acpi_madt_local_sapic *lsapic;
cpumask_t tmp_map;
long physid;
int cpu;
@ -854,16 +847,16 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
return -EINVAL;
obj = buffer.pointer;
if (obj->type != ACPI_TYPE_BUFFER ||
obj->buffer.length < sizeof(*lsapic)) {
if (obj->type != ACPI_TYPE_BUFFER)
{
kfree(buffer.pointer);
return -EINVAL;
}
lsapic = (struct acpi_table_lsapic *)obj->buffer.pointer;
lsapic = (struct acpi_madt_local_sapic *)obj->buffer.pointer;
if ((lsapic->header.type != ACPI_MADT_LSAPIC) ||
(!lsapic->flags.enabled)) {
if ((lsapic->header.type != ACPI_MADT_TYPE_LOCAL_SAPIC) ||
(!lsapic->lapic_flags & ACPI_MADT_ENABLED)) {
kfree(buffer.pointer);
return -EINVAL;
}
@ -883,7 +876,6 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
cpu_set(cpu, cpu_present_map);
ia64_cpu_to_sapicid[cpu] = physid;
ia64_acpiid_to_sapicid[lsapic->acpi_id] = ia64_cpu_to_sapicid[cpu];
*pcpu = cpu;
return (0);
@ -893,14 +885,6 @@ EXPORT_SYMBOL(acpi_map_lsapic);
int acpi_unmap_lsapic(int cpu)
{
int i;
for (i = 0; i < MAX_SAPICS; i++) {
if (ia64_acpiid_to_sapicid[i] == ia64_cpu_to_sapicid[cpu]) {
ia64_acpiid_to_sapicid[i] = -1;
break;
}
}
ia64_cpu_to_sapicid[cpu] = -1;
cpu_clear(cpu, cpu_present_map);
@ -920,7 +904,7 @@ acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
struct acpi_table_iosapic *iosapic;
struct acpi_madt_io_sapic *iosapic;
unsigned int gsi_base;
int pxm, node;
@ -938,9 +922,9 @@ acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret)
return AE_OK;
}
iosapic = (struct acpi_table_iosapic *)obj->buffer.pointer;
iosapic = (struct acpi_madt_io_sapic *)obj->buffer.pointer;
if (iosapic->header.type != ACPI_MADT_IOSAPIC) {
if (iosapic->header.type != ACPI_MADT_TYPE_IO_SAPIC) {
kfree(buffer.pointer);
return AE_OK;
}

View File

@ -79,6 +79,7 @@ crash_save_this_cpu()
final_note(buf);
}
#ifdef CONFIG_SMP
static int
kdump_wait_cpu_freeze(void)
{
@ -91,6 +92,7 @@ kdump_wait_cpu_freeze(void)
}
return 1;
}
#endif
void
machine_crash_shutdown(struct pt_regs *pt)
@ -116,6 +118,11 @@ machine_crash_shutdown(struct pt_regs *pt)
static void
machine_kdump_on_init(void)
{
if (!ia64_kimage) {
printk(KERN_NOTICE "machine_kdump_on_init(): "
"kdump not configured\n");
return;
}
local_irq_disable();
kexec_disable_iosapic();
machine_kexec(ia64_kimage);
@ -132,11 +139,12 @@ kdump_cpu_freeze(struct unw_frame_info *info, void *arg)
atomic_inc(&kdump_cpu_freezed);
kdump_status[cpuid] = 1;
mb();
if (cpuid == 0) {
#ifdef CONFIG_HOTPLUG_CPU
if (cpuid != 0)
ia64_jump_to_sal(&sal_boot_rendez_state[cpuid]);
#endif
for (;;)
cpu_relax();
} else
ia64_jump_to_sal(&sal_boot_rendez_state[cpuid]);
}
static int

View File

@ -9,7 +9,8 @@
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <asm/page.h>
#include <asm/uaccess.h>
/**
* copy_oldmem_page - copy one page from "oldmem"

View File

@ -380,7 +380,7 @@ efi_get_pal_addr (void)
#endif
return __va(md->phys_addr);
}
printk(KERN_WARNING "%s: no PAL-code memory-descriptor found",
printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n",
__FUNCTION__);
return NULL;
}

View File

@ -1610,5 +1610,7 @@ sys_call_table:
data8 sys_sync_file_range // 1300
data8 sys_tee
data8 sys_vmsplice
data8 sys_ni_syscall // reserved for move_pages
data8 sys_getcpu
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls

View File

@ -925,6 +925,11 @@ iosapic_unregister_intr (unsigned int gsi)
/* Clear the interrupt controller descriptor */
idesc->chip = &no_irq_type;
#ifdef CONFIG_SMP
/* Clear affinity */
cpus_setall(idesc->affinity);
#endif
/* Clear the interrupt information */
memset(&iosapic_intr_info[vector], 0,
sizeof(struct iosapic_intr_info));

View File

@ -14,6 +14,7 @@
#include <linux/kexec.h>
#include <linux/cpu.h>
#include <linux/irq.h>
#include <linux/efi.h>
#include <asm/mmu_context.h>
#include <asm/setup.h>
#include <asm/delay.h>
@ -68,22 +69,10 @@ void machine_kexec_cleanup(struct kimage *image)
{
}
void machine_shutdown(void)
{
int cpu;
for_each_online_cpu(cpu) {
if (cpu != smp_processor_id())
cpu_down(cpu);
}
kexec_disable_iosapic();
}
/*
* Do not allocate memory (or fail in any way) in machine_kexec().
* We are past the point of no return, committed to rebooting now.
*/
extern void *efi_get_pal_addr(void);
static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
{
struct kimage *image = arg;
@ -93,6 +82,7 @@ static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
unsigned long vector;
int ii;
BUG_ON(!image);
if (image->type == KEXEC_TYPE_CRASH) {
crash_save_this_cpu();
current->thread.ksp = (__u64)info->sw - 16;
@ -131,6 +121,7 @@ static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
void machine_kexec(struct kimage *image)
{
BUG_ON(!image);
unw_init_running(ia64_machine_kexec, image);
for(;;);
}

View File

@ -64,12 +64,17 @@ static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
}
#endif /* CONFIG_SMP */
int ia64_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
{
struct msi_msg msg;
unsigned long dest_phys_id;
unsigned int vector;
unsigned int irq, vector;
irq = create_irq();
if (irq < 0)
return irq;
set_irq_msi(irq, desc);
dest_phys_id = cpu_physical_id(first_cpu(cpu_online_map));
vector = irq;
@ -89,12 +94,12 @@ int ia64_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
write_msi_msg(irq, &msg);
set_irq_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq);
return 0;
return irq;
}
void ia64_teardown_msi_irq(unsigned int irq)
{
return; /* no-op */
destroy_irq(irq);
}
static void ia64_ack_msi_irq(unsigned int irq)
@ -126,12 +131,12 @@ static struct irq_chip ia64_msi_chip = {
};
int arch_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
{
if (platform_setup_msi_irq)
return platform_setup_msi_irq(irq, pdev);
return platform_setup_msi_irq(pdev, desc);
return ia64_setup_msi_irq(irq, pdev);
return ia64_setup_msi_irq(pdev, desc);
}
void arch_teardown_msi_irq(unsigned int irq)

View File

@ -34,6 +34,7 @@
#include <asm/ia32.h>
#include <asm/irq.h>
#include <asm/kdebug.h>
#include <asm/kexec.h>
#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/sal.h>
@ -803,6 +804,21 @@ cpu_halt (void)
ia64_pal_halt(min_power_state);
}
void machine_shutdown(void)
{
#ifdef CONFIG_HOTPLUG_CPU
int cpu;
for_each_online_cpu(cpu) {
if (cpu != smp_processor_id())
cpu_down(cpu);
}
#endif
#ifdef CONFIG_KEXEC
kexec_disable_iosapic();
#endif
}
void
machine_restart (char *restart_cmd)
{

View File

@ -607,7 +607,7 @@ find_thread_for_addr (struct task_struct *child, unsigned long addr)
*/
list_for_each_safe(this, next, &current->children) {
p = list_entry(this, struct task_struct, sibling);
if (p->mm != mm)
if (p->tgid != child->tgid)
continue;
if (thread_matches(p, addr)) {
child = p;
@ -1405,6 +1405,7 @@ ptrace_disable (struct task_struct *child)
struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
/* make sure the single step/taken-branch trap bits are not set: */
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
child_psr->ss = 0;
child_psr->tb = 0;
}
@ -1525,6 +1526,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
* Make sure the single step/taken-branch trap bits
* are not set:
*/
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
ia64_psr(pt)->ss = 0;
ia64_psr(pt)->tb = 0;
@ -1556,6 +1558,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
goto out_tsk;
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
set_tsk_thread_flag(child, TIF_SINGLESTEP);
if (request == PTRACE_SINGLESTEP) {
ia64_psr(pt)->ss = 1;
} else {
@ -1595,13 +1598,9 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
}
void
static void
syscall_trace (void)
{
if (!test_thread_flag(TIF_SYSCALL_TRACE))
return;
if (!(current->ptrace & PT_PTRACED))
return;
/*
* The 0x80 provides a way for the tracing parent to
* distinguish between a syscall stop and SIGTRAP delivery.
@ -1664,7 +1663,8 @@ syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
audit_syscall_exit(success, result);
}
if (test_thread_flag(TIF_SYSCALL_TRACE)
if ((test_thread_flag(TIF_SYSCALL_TRACE)
|| test_thread_flag(TIF_SINGLESTEP))
&& (current->ptrace & PT_PTRACED))
syscall_trace();
}

View File

@ -569,34 +569,31 @@ show_cpuinfo (struct seq_file *m, void *v)
{ 1UL << 1, "spontaneous deferral"},
{ 1UL << 2, "16-byte atomic ops" }
};
char features[128], *cp, sep;
char features[128], *cp, *sep;
struct cpuinfo_ia64 *c = v;
unsigned long mask;
unsigned long proc_freq;
int i;
int i, size;
mask = c->features;
/* build the feature string: */
memcpy(features, " standard", 10);
memcpy(features, "standard", 9);
cp = features;
sep = 0;
for (i = 0; i < (int) ARRAY_SIZE(feature_bits); ++i) {
size = sizeof(features);
sep = "";
for (i = 0; i < ARRAY_SIZE(feature_bits) && size > 1; ++i) {
if (mask & feature_bits[i].mask) {
if (sep)
*cp++ = sep;
sep = ',';
*cp++ = ' ';
strcpy(cp, feature_bits[i].feature_name);
cp += strlen(feature_bits[i].feature_name);
cp += snprintf(cp, size, "%s%s", sep,
feature_bits[i].feature_name),
sep = ", ";
mask &= ~feature_bits[i].mask;
size = sizeof(features) - (cp - features);
}
}
if (mask) {
/* print unknown features as a hex value: */
if (sep)
*cp++ = sep;
sprintf(cp, " 0x%lx", mask);
if (mask && size > 1) {
/* print unknown features as a hex value */
snprintf(cp, size, "%s0x%lx", sep, mask);
}
proc_freq = cpufreq_quick_get(cpunum);
@ -612,7 +609,7 @@ show_cpuinfo (struct seq_file *m, void *v)
"model name : %s\n"
"revision : %u\n"
"archrev : %u\n"
"features :%s\n" /* don't change this---it _is_ right! */
"features : %s\n"
"cpu number : %lu\n"
"cpu regs : %u\n"
"cpu MHz : %lu.%06lu\n"

View File

@ -157,6 +157,7 @@ SECTIONS
}
#endif
. = ALIGN(8);
__con_initcall_start = .;
.con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET)
{ *(.con_initcall.init) }

View File

@ -30,47 +30,69 @@ static unsigned long max_gap;
#endif
/**
* show_mem - display a memory statistics summary
* show_mem - give short summary of memory stats
*
* Just walks the pages in the system and describes where they're allocated.
* Shows a simple page count of reserved and used pages in the system.
* For discontig machines, it does this on a per-pgdat basis.
*/
void
show_mem (void)
void show_mem(void)
{
int i, total = 0, reserved = 0;
int shared = 0, cached = 0;
int i, total_reserved = 0;
int total_shared = 0, total_cached = 0;
unsigned long total_present = 0;
pg_data_t *pgdat;
printk(KERN_INFO "Mem-info:\n");
show_free_areas();
printk(KERN_INFO "Free swap: %6ldkB\n",
nr_swap_pages<<(PAGE_SHIFT-10));
i = max_mapnr;
for (i = 0; i < max_mapnr; i++) {
if (!pfn_valid(i)) {
printk(KERN_INFO "Node memory in pages:\n");
for_each_online_pgdat(pgdat) {
unsigned long present;
unsigned long flags;
int shared = 0, cached = 0, reserved = 0;
pgdat_resize_lock(pgdat, &flags);
present = pgdat->node_present_pages;
for(i = 0; i < pgdat->node_spanned_pages; i++) {
struct page *page;
if (pfn_valid(pgdat->node_start_pfn + i))
page = pfn_to_page(pgdat->node_start_pfn + i);
else {
#ifdef CONFIG_VIRTUAL_MEM_MAP
if (max_gap < LARGE_GAP)
continue;
i = vmemmap_find_next_valid_pfn(0, i) - 1;
#endif
i = vmemmap_find_next_valid_pfn(pgdat->node_id,
i) - 1;
continue;
}
total++;
if (PageReserved(mem_map+i))
if (PageReserved(page))
reserved++;
else if (PageSwapCache(mem_map+i))
else if (PageSwapCache(page))
cached++;
else if (page_count(mem_map + i))
shared += page_count(mem_map + i) - 1;
else if (page_count(page))
shared += page_count(page)-1;
}
printk(KERN_INFO "%d pages of RAM\n", total);
printk(KERN_INFO "%d reserved pages\n", reserved);
printk(KERN_INFO "%d pages shared\n", shared);
printk(KERN_INFO "%d pages swap cached\n", cached);
printk(KERN_INFO "%ld pages in page table cache\n",
pgdat_resize_unlock(pgdat, &flags);
total_present += present;
total_reserved += reserved;
total_cached += cached;
total_shared += shared;
printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, "
"shrd: %10d, swpd: %10d\n", pgdat->node_id,
present, reserved, shared, cached);
}
printk(KERN_INFO "%ld pages of RAM\n", total_present);
printk(KERN_INFO "%d reserved pages\n", total_reserved);
printk(KERN_INFO "%d pages shared\n", total_shared);
printk(KERN_INFO "%d pages swap cached\n", total_cached);
printk(KERN_INFO "Total of %ld pages in page table cache\n",
pgtable_quicklist_total_size());
printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages());
}
/* physical address where the bootmem map is located */
unsigned long bootmap_start;
@ -177,7 +199,7 @@ find_memory (void)
#ifdef CONFIG_CRASH_DUMP
/* If we are doing a crash dump, we still need to know the real mem
* size before original memory map is * reset. */
* size before original memory map is reset. */
saved_max_pfn = max_pfn;
#endif
}

View File

@ -412,37 +412,6 @@ static void __init memory_less_nodes(void)
return;
}
#ifdef CONFIG_SPARSEMEM
/**
* register_sparse_mem - notify SPARSEMEM that this memory range exists.
* @start: physical start of range
* @end: physical end of range
* @arg: unused
*
* Simply calls SPARSEMEM to register memory section(s).
*/
static int __init register_sparse_mem(unsigned long start, unsigned long end,
void *arg)
{
int nid;
start = __pa(start) >> PAGE_SHIFT;
end = __pa(end) >> PAGE_SHIFT;
nid = early_pfn_to_nid(start);
memory_present(nid, start, end);
return 0;
}
static void __init arch_sparse_init(void)
{
efi_memmap_walk(register_sparse_mem, NULL);
sparse_init();
}
#else
#define arch_sparse_init() do {} while (0)
#endif
/**
* find_memory - walk the EFI memory map and setup the bootmem allocator
*
@ -473,6 +442,9 @@ void __init find_memory(void)
node_clear(node, memory_less_mask);
mem_data[node].min_pfn = ~0UL;
}
efi_memmap_walk(register_active_ranges, NULL);
/*
* Initialize the boot memory maps in reverse order since that's
* what the bootmem allocator expects
@ -506,6 +478,12 @@ void __init find_memory(void)
max_pfn = max_low_pfn;
find_initrd();
#ifdef CONFIG_CRASH_DUMP
/* If we are doing a crash dump, we still need to know the real mem
* size before original memory map is reset. */
saved_max_pfn = max_pfn;
#endif
}
#ifdef CONFIG_SMP
@ -654,7 +632,6 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n
{
unsigned long end = start + len;
add_active_range(node, start >> PAGE_SHIFT, end >> PAGE_SHIFT);
mem_data[node].num_physpages += len >> PAGE_SHIFT;
if (start <= __pa(MAX_DMA_ADDRESS))
mem_data[node].num_dma_physpages +=
@ -686,10 +663,11 @@ void __init paging_init(void)
max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
arch_sparse_init();
efi_memmap_walk(filter_rsvd_memory, count_node_pages);
sparse_memory_present_with_active_regions(MAX_NUMNODES);
sparse_init();
#ifdef CONFIG_VIRTUAL_MEM_MAP
vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
sizeof(struct page));

View File

@ -19,6 +19,7 @@
#include <linux/swap.h>
#include <linux/proc_fs.h>
#include <linux/bitops.h>
#include <linux/kexec.h>
#include <asm/a.out.h>
#include <asm/dma.h>
@ -128,6 +129,25 @@ lazy_mmu_prot_update (pte_t pte)
set_bit(PG_arch_1, &page->flags); /* mark page as clean */
}
/*
* Since DMA is i-cache coherent, any (complete) pages that were written via
* DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
* flush them when they get mapped into an executable vm-area.
*/
void
dma_mark_clean(void *addr, size_t size)
{
unsigned long pg_addr, end;
pg_addr = PAGE_ALIGN((unsigned long) addr);
end = (unsigned long) addr + size;
while (pg_addr + PAGE_SIZE <= end) {
struct page *page = virt_to_page(pg_addr);
set_bit(PG_arch_1, &page->flags);
pg_addr += PAGE_SIZE;
}
}
inline void
ia64_set_rbs_bot (void)
{
@ -595,13 +615,27 @@ find_largest_hole (u64 start, u64 end, void *arg)
return 0;
}
#endif /* CONFIG_VIRTUAL_MEM_MAP */
int __init
register_active_ranges(u64 start, u64 end, void *arg)
{
add_active_range(0, __pa(start) >> PAGE_SHIFT, __pa(end) >> PAGE_SHIFT);
int nid = paddr_to_nid(__pa(start));
if (nid < 0)
nid = 0;
#ifdef CONFIG_KEXEC
if (start > crashk_res.start && start < crashk_res.end)
start = crashk_res.end;
if (end > crashk_res.start && end < crashk_res.end)
end = crashk_res.start;
#endif
if (start < end)
add_active_range(nid, __pa(start) >> PAGE_SHIFT,
__pa(end) >> PAGE_SHIFT);
return 0;
}
#endif /* CONFIG_VIRTUAL_MEM_MAP */
static int __init
count_reserved_pages (u64 start, u64 end, void *arg)

View File

@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000,2002-2005 Silicon Graphics, Inc. All rights reserved.
* Copyright (C) 1992 - 1997, 2000,2002-2007 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
@ -38,10 +38,18 @@ static irqreturn_t hub_eint_handler(int irq, void *arg)
(u64) nasid, 0, 0, 0, 0, 0, 0);
if ((int)ret_stuff.v0)
panic("hubii_eint_handler(): Fatal TIO Error");
panic("%s: Fatal %s Error", __FUNCTION__,
((nasid & 1) ? "TIO" : "HUBII"));
if (!(nasid & 1)) /* Not a TIO, handle CRB errors */
(void)hubiio_crb_error_handler(hubdev_info);
} else
if (nasid & 1) { /* TIO errors */
SAL_CALL_NOLOCK(ret_stuff, SN_SAL_HUB_ERROR_INTERRUPT,
(u64) nasid, 0, 0, 0, 0, 0, 0);
if ((int)ret_stuff.v0)
panic("%s: Fatal TIO Error", __FUNCTION__);
} else
bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid)));

View File

@ -13,6 +13,7 @@
#include <asm/sn/sn_sal.h>
#include "xtalk/hubdev.h"
#include <linux/acpi.h>
#include <acpi/acnamesp.h>
/*
@ -31,6 +32,12 @@ struct acpi_vendor_uuid sn_uuid = {
0xa2, 0x7c, 0x08, 0x00, 0x69, 0x13, 0xea, 0x51 },
};
struct sn_pcidev_match {
u8 bus;
unsigned int devfn;
acpi_handle handle;
};
/*
* Perform the early IO init in PROM.
*/
@ -119,9 +126,11 @@ sn_get_bussoft_ptr(struct pci_bus *bus)
status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS,
&sn_uuid, &buffer);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR "get_acpi_pcibus_ptr: "
"get_acpi_bussoft_info() failed: %d\n",
status);
printk(KERN_ERR "%s: "
"acpi_get_vendor_resource() failed (0x%x) for: ",
__FUNCTION__, status);
acpi_ns_print_node_pathname(handle, NULL);
printk("\n");
return NULL;
}
resource = buffer.pointer;
@ -130,8 +139,8 @@ sn_get_bussoft_ptr(struct pci_bus *bus)
if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) !=
sizeof(struct pcibus_bussoft *)) {
printk(KERN_ERR
"get_acpi_bussoft_ptr: Invalid vendor data "
"length %d\n", vendor->byte_length);
"%s: Invalid vendor data length %d\n",
__FUNCTION__, vendor->byte_length);
kfree(buffer.pointer);
return NULL;
}
@ -143,34 +152,254 @@ sn_get_bussoft_ptr(struct pci_bus *bus)
}
/*
* sn_acpi_bus_fixup
* sn_extract_device_info - Extract the pcidev_info and the sn_irq_info
* pointers from the vendor resource using the
* provided acpi handle, and copy the structures
* into the argument buffers.
*/
void
sn_acpi_bus_fixup(struct pci_bus *bus)
static int
sn_extract_device_info(acpi_handle handle, struct pcidev_info **pcidev_info,
struct sn_irq_info **sn_irq_info)
{
struct pci_dev *pci_dev = NULL;
struct pcibus_bussoft *prom_bussoft_ptr;
extern void sn_common_bus_fixup(struct pci_bus *,
struct pcibus_bussoft *);
u64 addr;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct sn_irq_info *irq_info, *irq_info_prom;
struct pcidev_info *pcidev_ptr, *pcidev_prom_ptr;
struct acpi_resource *resource;
int ret = 0;
acpi_status status;
struct acpi_resource_vendor_typed *vendor;
if (!bus->parent) { /* If root bus */
prom_bussoft_ptr = sn_get_bussoft_ptr(bus);
if (prom_bussoft_ptr == NULL) {
/*
* The pointer to this device's pcidev_info structure in
* the PROM, is in the vendor resource.
*/
status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS,
&sn_uuid, &buffer);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR
"sn_pci_fixup_bus: 0x%04x:0x%02x Unable to "
"obtain prom_bussoft_ptr\n",
pci_domain_nr(bus), bus->number);
return;
"%s: acpi_get_vendor_resource() failed (0x%x) for: ",
__FUNCTION__, status);
acpi_ns_print_node_pathname(handle, NULL);
printk("\n");
return 1;
}
sn_common_bus_fixup(bus, prom_bussoft_ptr);
resource = buffer.pointer;
vendor = &resource->data.vendor_typed;
if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) !=
sizeof(struct pci_devdev_info *)) {
printk(KERN_ERR
"%s: Invalid vendor data length: %d for: ",
__FUNCTION__, vendor->byte_length);
acpi_ns_print_node_pathname(handle, NULL);
printk("\n");
ret = 1;
goto exit;
}
list_for_each_entry(pci_dev, &bus->devices, bus_list) {
sn_pci_fixup_slot(pci_dev);
pcidev_ptr = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
if (!pcidev_ptr)
panic("%s: Unable to alloc memory for pcidev_info", __FUNCTION__);
memcpy(&addr, vendor->byte_data, sizeof(struct pcidev_info *));
pcidev_prom_ptr = __va(addr);
memcpy(pcidev_ptr, pcidev_prom_ptr, sizeof(struct pcidev_info));
/* Get the IRQ info */
irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
if (!irq_info)
panic("%s: Unable to alloc memory for sn_irq_info", __FUNCTION__);
if (pcidev_ptr->pdi_sn_irq_info) {
irq_info_prom = __va(pcidev_ptr->pdi_sn_irq_info);
memcpy(irq_info, irq_info_prom, sizeof(struct sn_irq_info));
}
*pcidev_info = pcidev_ptr;
*sn_irq_info = irq_info;
exit:
kfree(buffer.pointer);
return ret;
}
static unsigned int
get_host_devfn(acpi_handle device_handle, acpi_handle rootbus_handle)
{
unsigned long adr;
acpi_handle child;
unsigned int devfn;
int function;
acpi_handle parent;
int slot;
acpi_status status;
/*
* Do an upward search to find the root bus device, and
* obtain the host devfn from the previous child device.
*/
child = device_handle;
while (child) {
status = acpi_get_parent(child, &parent);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR "%s: acpi_get_parent() failed "
"(0x%x) for: ", __FUNCTION__, status);
acpi_ns_print_node_pathname(child, NULL);
printk("\n");
panic("%s: Unable to find host devfn\n", __FUNCTION__);
}
if (parent == rootbus_handle)
break;
child = parent;
}
if (!child) {
printk(KERN_ERR "%s: Unable to find root bus for: ",
__FUNCTION__);
acpi_ns_print_node_pathname(device_handle, NULL);
printk("\n");
BUG();
}
status = acpi_evaluate_integer(child, METHOD_NAME__ADR, NULL, &adr);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR "%s: Unable to get _ADR (0x%x) for: ",
__FUNCTION__, status);
acpi_ns_print_node_pathname(child, NULL);
printk("\n");
panic("%s: Unable to find host devfn\n", __FUNCTION__);
}
slot = (adr >> 16) & 0xffff;
function = adr & 0xffff;
devfn = PCI_DEVFN(slot, function);
return devfn;
}
/*
* sn_acpi_slot_fixup - Perform any SN specific slot fixup.
* find_matching_device - Callback routine to find the ACPI device
* that matches up with our pci_dev device.
* Matching is done on bus number and devfn.
* To find the bus number for a particular
* ACPI device, we must look at the _BBN method
* of its parent.
*/
static acpi_status
find_matching_device(acpi_handle handle, u32 lvl, void *context, void **rv)
{
unsigned long bbn = -1;
unsigned long adr;
acpi_handle parent = NULL;
acpi_status status;
unsigned int devfn;
int function;
int slot;
struct sn_pcidev_match *info = context;
status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
&adr);
if (ACPI_SUCCESS(status)) {
status = acpi_get_parent(handle, &parent);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR
"%s: acpi_get_parent() failed (0x%x) for: ",
__FUNCTION__, status);
acpi_ns_print_node_pathname(handle, NULL);
printk("\n");
return AE_OK;
}
status = acpi_evaluate_integer(parent, METHOD_NAME__BBN,
NULL, &bbn);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR
"%s: Failed to find _BBN in parent of: ",
__FUNCTION__);
acpi_ns_print_node_pathname(handle, NULL);
printk("\n");
return AE_OK;
}
slot = (adr >> 16) & 0xffff;
function = adr & 0xffff;
devfn = PCI_DEVFN(slot, function);
if ((info->devfn == devfn) && (info->bus == bbn)) {
/* We have a match! */
info->handle = handle;
return 1;
}
}
return AE_OK;
}
/*
* sn_acpi_get_pcidev_info - Search ACPI namespace for the acpi
* device matching the specified pci_dev,
* and return the pcidev info and irq info.
*/
int
sn_acpi_get_pcidev_info(struct pci_dev *dev, struct pcidev_info **pcidev_info,
struct sn_irq_info **sn_irq_info)
{
unsigned int host_devfn;
struct sn_pcidev_match pcidev_match;
acpi_handle rootbus_handle;
unsigned long segment;
acpi_status status;
rootbus_handle = PCI_CONTROLLER(dev)->acpi_handle;
status = acpi_evaluate_integer(rootbus_handle, METHOD_NAME__SEG, NULL,
&segment);
if (ACPI_SUCCESS(status)) {
if (segment != pci_domain_nr(dev)) {
printk(KERN_ERR
"%s: Segment number mismatch, 0x%lx vs 0x%x for: ",
__FUNCTION__, segment, pci_domain_nr(dev));
acpi_ns_print_node_pathname(rootbus_handle, NULL);
printk("\n");
return 1;
}
} else {
printk(KERN_ERR "%s: Unable to get __SEG from: ",
__FUNCTION__);
acpi_ns_print_node_pathname(rootbus_handle, NULL);
printk("\n");
return 1;
}
/*
* We want to search all devices in this segment/domain
* of the ACPI namespace for the matching ACPI device,
* which holds the pcidev_info pointer in its vendor resource.
*/
pcidev_match.bus = dev->bus->number;
pcidev_match.devfn = dev->devfn;
pcidev_match.handle = NULL;
acpi_walk_namespace(ACPI_TYPE_DEVICE, rootbus_handle, ACPI_UINT32_MAX,
find_matching_device, &pcidev_match, NULL);
if (!pcidev_match.handle) {
printk(KERN_ERR
"%s: Could not find matching ACPI device for %s.\n",
__FUNCTION__, pci_name(dev));
return 1;
}
if (sn_extract_device_info(pcidev_match.handle, pcidev_info, sn_irq_info))
return 1;
/* Build up the pcidev_info.pdi_slot_host_handle */
host_devfn = get_host_devfn(pcidev_match.handle, rootbus_handle);
(*pcidev_info)->pdi_slot_host_handle =
((unsigned long) pci_domain_nr(dev) << 40) |
/* bus == 0 */
host_devfn;
return 0;
}
/*
* sn_acpi_slot_fixup - Obtain the pcidev_info and sn_irq_info.
* Perform any SN specific slot fixup.
* At present there does not appear to be
* any generic way to handle a ROM image
* that has been shadowed by the PROM, so
@ -179,11 +408,18 @@ sn_acpi_bus_fixup(struct pci_bus *bus)
*/
void
sn_acpi_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info)
sn_acpi_slot_fixup(struct pci_dev *dev)
{
void __iomem *addr;
struct pcidev_info *pcidev_info = NULL;
struct sn_irq_info *sn_irq_info = NULL;
size_t size;
if (sn_acpi_get_pcidev_info(dev, &pcidev_info, &sn_irq_info)) {
panic("%s: Failure obtaining pcidev_info for %s\n",
__FUNCTION__, pci_name(dev));
}
if (pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE]) {
/*
* A valid ROM image exists and has been shadowed by the
@ -200,8 +436,11 @@ sn_acpi_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info)
(unsigned long) addr + size;
dev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_BIOS_COPY;
}
sn_pci_fixup_slot(dev, pcidev_info, sn_irq_info);
}
EXPORT_SYMBOL(sn_acpi_slot_fixup);
static struct acpi_driver acpi_sn_hubdev_driver = {
.name = "SGI HUBDEV Driver",
.ids = "SGIHUB,SGITIO",
@ -211,6 +450,33 @@ static struct acpi_driver acpi_sn_hubdev_driver = {
};
/*
* sn_acpi_bus_fixup - Perform SN specific setup of software structs
* (pcibus_bussoft, pcidev_info) and hardware
* registers, for the specified bus and devices under it.
*/
void
sn_acpi_bus_fixup(struct pci_bus *bus)
{
struct pci_dev *pci_dev = NULL;
struct pcibus_bussoft *prom_bussoft_ptr;
if (!bus->parent) { /* If root bus */
prom_bussoft_ptr = sn_get_bussoft_ptr(bus);
if (prom_bussoft_ptr == NULL) {
printk(KERN_ERR
"%s: 0x%04x:0x%02x Unable to "
"obtain prom_bussoft_ptr\n",
__FUNCTION__, pci_domain_nr(bus), bus->number);
return;
}
sn_common_bus_fixup(bus, prom_bussoft_ptr);
}
list_for_each_entry(pci_dev, &bus->devices, bus_list) {
sn_acpi_slot_fixup(pci_dev);
}
}
/*
* sn_io_acpi_init - PROM has ACPI support for IO, defining at a minimum the
* nodes and root buses in the DSDT. As a result, bus scanning

View File

@ -26,14 +26,10 @@
#include <linux/acpi.h>
#include <asm/sn/sn2/sn_hwperf.h>
#include <asm/sn/acpi.h>
#include "acpi/acglobal.h"
extern void sn_init_cpei_timer(void);
extern void register_sn_procfs(void);
extern void sn_acpi_bus_fixup(struct pci_bus *);
extern void sn_bus_fixup(struct pci_bus *);
extern void sn_acpi_slot_fixup(struct pci_dev *, struct pcidev_info *);
extern void sn_more_slot_fixup(struct pci_dev *, struct pcidev_info *);
extern void sn_legacy_pci_window_fixup(struct pci_controller *, u64, u64);
extern void sn_io_acpi_init(void);
extern void sn_io_init(void);
@ -48,6 +44,9 @@ struct sysdata_el {
int sn_ioif_inited; /* SN I/O infrastructure initialized? */
int sn_acpi_rev; /* SN ACPI revision */
EXPORT_SYMBOL_GPL(sn_acpi_rev);
struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */
/*
@ -98,25 +97,6 @@ sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num,
return ret_stuff.status;
}
/*
* Retrieve the pci device information given the bus and device|function number.
*/
static inline u64
sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
u64 sn_irq_info)
{
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
SAL_CALL_NOLOCK(ret_stuff,
(u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
(u64) segment, (u64) bus_number, (u64) devfn,
(u64) pci_dev,
sn_irq_info, 0, 0);
return ret_stuff.v0;
}
/*
* sn_pcidev_info_get() - Retrieve the pcidev_info struct for the specified
* device.
@ -249,47 +229,22 @@ void sn_pci_unfixup_slot(struct pci_dev *dev)
}
/*
* sn_pci_fixup_slot() - This routine sets up a slot's resources consistent
* with the Linux PCI abstraction layer. Resources
* acquired from our PCI provider include PIO maps
* to BAR space and interrupt objects.
* sn_pci_fixup_slot()
*/
void sn_pci_fixup_slot(struct pci_dev *dev)
void sn_pci_fixup_slot(struct pci_dev *dev, struct pcidev_info *pcidev_info,
struct sn_irq_info *sn_irq_info)
{
int segment = pci_domain_nr(dev->bus);
int status = 0;
struct pcibus_bussoft *bs;
struct pci_bus *host_pci_bus;
struct pci_dev *host_pci_dev;
struct pcidev_info *pcidev_info;
struct sn_irq_info *sn_irq_info;
unsigned int bus_no, devfn;
pci_dev_get(dev); /* for the sysdata pointer */
pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
if (!pcidev_info)
BUG(); /* Cannot afford to run out of memory */
sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
if (!sn_irq_info)
BUG(); /* Cannot afford to run out of memory */
/* Call to retrieve pci device information needed by kernel. */
status = sal_get_pcidev_info((u64) segment, (u64) dev->bus->number,
dev->devfn,
(u64) __pa(pcidev_info),
(u64) __pa(sn_irq_info));
if (status)
BUG(); /* Cannot get platform pci device information */
/* Add pcidev_info to list in pci_controller.platform_data */
list_add_tail(&pcidev_info->pdi_list,
&(SN_PLATFORM_DATA(dev->bus)->pcidev_info));
if (SN_ACPI_BASE_SUPPORT())
sn_acpi_slot_fixup(dev, pcidev_info);
else
sn_more_slot_fixup(dev, pcidev_info);
/*
* Using the PROMs values for the PCI host bus, get the Linux
* PCI host_pci_dev struct and set up host bus linkages
@ -489,11 +444,6 @@ void sn_generate_path(struct pci_bus *pci_bus, char *address)
sprintf(address, "%s^%d", address, geo_slot(geoid));
}
/*
* sn_pci_fixup_bus() - Perform SN specific setup of software structs
* (pcibus_bussoft, pcidev_info) and hardware
* registers, for the specified bus and devices under it.
*/
void __devinit
sn_pci_fixup_bus(struct pci_bus *bus)
{
@ -519,6 +469,15 @@ sn_io_early_init(void)
if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
return 0;
/* we set the acpi revision to that of the DSDT table OEM rev. */
{
struct acpi_table_header *header = NULL;
acpi_get_table_by_index(ACPI_TABLE_INDEX_DSDT, &header);
BUG_ON(header == NULL);
sn_acpi_rev = header->oem_revision;
}
/*
* prime sn_pci_provider[]. Individial provider init routines will
* override their respective default entries.
@ -544,8 +503,12 @@ sn_io_early_init(void)
register_sn_procfs();
#endif
{
struct acpi_table_header *header;
(void)acpi_get_table_by_index(ACPI_TABLE_INDEX_DSDT, &header);
printk(KERN_INFO "ACPI DSDT OEM Rev 0x%x\n",
acpi_gbl_DSDT->oem_revision);
header->oem_revision);
}
if (SN_ACPI_BASE_SUPPORT())
sn_io_acpi_init();
else
@ -605,7 +568,6 @@ sn_io_late_init(void)
fs_initcall(sn_io_late_init);
EXPORT_SYMBOL(sn_pci_fixup_slot);
EXPORT_SYMBOL(sn_pci_unfixup_slot);
EXPORT_SYMBOL(sn_bus_store_sysdata);
EXPORT_SYMBOL(sn_bus_free_sysdata);

View File

@ -56,6 +56,25 @@ static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address)
return ret_stuff.v0;
}
/*
* Retrieve the pci device information given the bus and device|function number.
*/
static inline u64
sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
u64 sn_irq_info)
{
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
SAL_CALL_NOLOCK(ret_stuff,
(u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
(u64) segment, (u64) bus_number, (u64) devfn,
(u64) pci_dev,
sn_irq_info, 0, 0);
return ret_stuff.v0;
}
/*
* sn_fixup_ionodes() - This routine initializes the HUB data structure for
@ -172,18 +191,40 @@ sn_pci_window_fixup(struct pci_dev *dev, unsigned int count,
}
/*
* sn_more_slot_fixup() - We are not running with an ACPI capable PROM,
* sn_io_slot_fixup() - We are not running with an ACPI capable PROM,
* and need to convert the pci_dev->resource
* 'start' and 'end' addresses to mapped addresses,
* and setup the pci_controller->window array entries.
*/
void
sn_more_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info)
sn_io_slot_fixup(struct pci_dev *dev)
{
unsigned int count = 0;
int idx;
s64 pci_addrs[PCI_ROM_RESOURCE + 1];
unsigned long addr, end, size, start;
struct pcidev_info *pcidev_info;
struct sn_irq_info *sn_irq_info;
int status;
pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
if (!pcidev_info)
panic("%s: Unable to alloc memory for pcidev_info", __FUNCTION__);
sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
if (!sn_irq_info)
panic("%s: Unable to alloc memory for sn_irq_info", __FUNCTION__);
/* Call to retrieve pci device information needed by kernel. */
status = sal_get_pcidev_info((u64) pci_domain_nr(dev),
(u64) dev->bus->number,
dev->devfn,
(u64) __pa(pcidev_info),
(u64) __pa(sn_irq_info));
if (status)
BUG(); /* Cannot get platform pci device information */
/* Copy over PIO Mapped Addresses */
for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
@ -219,8 +260,12 @@ sn_more_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info)
*/
if (count > 0)
sn_pci_window_fixup(dev, count, pci_addrs);
sn_pci_fixup_slot(dev, pcidev_info, sn_irq_info);
}
EXPORT_SYMBOL(sn_io_slot_fixup);
/*
* sn_pci_controller_fixup() - This routine sets up a bus's resources
* consistent with the Linux PCI abstraction layer.
@ -272,9 +317,6 @@ sn_bus_fixup(struct pci_bus *bus)
{
struct pci_dev *pci_dev = NULL;
struct pcibus_bussoft *prom_bussoft_ptr;
extern void sn_common_bus_fixup(struct pci_bus *,
struct pcibus_bussoft *);
if (!bus->parent) { /* If root bus */
prom_bussoft_ptr = PCI_CONTROLLER(bus)->platform_data;
@ -291,7 +333,7 @@ sn_bus_fixup(struct pci_bus *bus)
prom_bussoft_ptr->bs_legacy_mem);
}
list_for_each_entry(pci_dev, &bus->devices, bus_list) {
sn_pci_fixup_slot(pci_dev);
sn_io_slot_fixup(pci_dev);
}
}

View File

@ -29,6 +29,7 @@
* on IA64. This routine will convert a port number into a valid
* SN i/o address. Used by sn_in*() and sn_out*().
*/
void *sn_io_addr(unsigned long port)
{
if (!IS_RUNNING_ON_SIMULATOR()) {

View File

@ -59,13 +59,12 @@ void sn_teardown_msi_irq(unsigned int irq)
sn_intr_free(nasid, widget, sn_irq_info);
sn_msi_info[irq].sn_irq_info = NULL;
return;
destroy_irq(irq);
}
int sn_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry)
{
struct msi_msg msg;
struct msi_desc *entry;
int widget;
int status;
nasid_t nasid;
@ -73,8 +72,8 @@ int sn_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
struct sn_irq_info *sn_irq_info;
struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(pdev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
int irq;
entry = get_irq_data(irq);
if (!entry->msi_attrib.is_64)
return -EINVAL;
@ -84,6 +83,11 @@ int sn_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
if (provider == NULL || provider->dma_map_consistent == NULL)
return -EINVAL;
irq = create_irq();
if (irq < 0)
return irq;
set_irq_msi(irq, entry);
/*
* Set up the vector plumbing. Let the prom (via sn_intr_alloc)
* decide which cpu to direct this msi at by default.
@ -95,12 +99,15 @@ int sn_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
SWIN_WIDGETNUM(bussoft->bs_base);
sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
if (! sn_irq_info)
if (! sn_irq_info) {
destroy_irq(irq);
return -ENOMEM;
}
status = sn_intr_alloc(nasid, widget, sn_irq_info, irq, -1, -1);
if (status) {
kfree(sn_irq_info);
destroy_irq(irq);
return -ENOMEM;
}
@ -121,6 +128,7 @@ int sn_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
if (! bus_addr) {
sn_intr_free(nasid, widget, sn_irq_info);
kfree(sn_irq_info);
destroy_irq(irq);
return -ENOMEM;
}
@ -139,7 +147,7 @@ int sn_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
write_msi_msg(irq, &msg);
set_irq_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq);
return 0;
return irq;
}
#ifdef CONFIG_SMP

View File

@ -20,7 +20,8 @@
#include "xtalk/hubdev.h"
int
sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp)
sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp,
char **ssdt)
{
struct ia64_sal_retval ret_stuff;
u64 busnum;
@ -32,7 +33,8 @@ sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp)
segment = soft->pbi_buscommon.bs_persist_segment;
busnum = soft->pbi_buscommon.bs_persist_busnum;
SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, segment,
busnum, (u64) device, (u64) resp, 0, 0, 0);
busnum, (u64) device, (u64) resp, (u64)ia64_tpa(ssdt),
0, 0);
return (int)ret_stuff.v0;
}

View File

@ -575,6 +575,7 @@ config SGI_IP27
select DMA_IP27
select EARLY_PRINTK
select HW_HAS_PCI
select NR_CPUS_DEFAULT_64
select PCI_DOMAINS
select SYS_HAS_CPU_R10000
select SYS_SUPPORTS_64BIT_KERNEL
@ -612,6 +613,7 @@ config SIBYTE_BIGSUR
bool "Sibyte BCM91480B-BigSur"
select BOOT_ELF32
select DMA_COHERENT
select NR_CPUS_DEFAULT_4
select PCI_DOMAINS
select SIBYTE_BCM1x80
select SWAP_IO_SPACE
@ -623,6 +625,7 @@ config SIBYTE_SWARM
bool "Sibyte BCM91250A-SWARM"
select BOOT_ELF32
select DMA_COHERENT
select NR_CPUS_DEFAULT_2
select SIBYTE_SB1250
select SWAP_IO_SPACE
select SYS_HAS_CPU_SB1
@ -635,6 +638,7 @@ config SIBYTE_SENTOSA
depends on EXPERIMENTAL
select BOOT_ELF32
select DMA_COHERENT
select NR_CPUS_DEFAULT_2
select SIBYTE_SB1250
select SWAP_IO_SPACE
select SYS_HAS_CPU_SB1
@ -668,6 +672,7 @@ config SIBYTE_PTSWARM
depends on EXPERIMENTAL
select BOOT_ELF32
select DMA_COHERENT
select NR_CPUS_DEFAULT_2
select SIBYTE_SB1250
select SWAP_IO_SPACE
select SYS_HAS_CPU_SB1
@ -680,6 +685,7 @@ config SIBYTE_LITTLESUR
depends on EXPERIMENTAL
select BOOT_ELF32
select DMA_COHERENT
select NR_CPUS_DEFAULT_2
select SIBYTE_SB1250
select SWAP_IO_SPACE
select SYS_HAS_CPU_SB1
@ -790,23 +796,6 @@ config TOSHIBA_RBTX4938
endchoice
config KEXEC
bool "Kexec system call (EXPERIMENTAL)"
depends on EXPERIMENTAL
help
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
but it is indepedent of the system firmware. And like a reboot
you can start any kernel with it, not just Linux.
The name comes from the similiarity to the exec system call.
It is an ongoing process to be certain the hardware in a machine
is properly shutdown, so do not be surprised if this code does not
initially work for you. It may help to enable device hotplugging
support. As of this writing the exact hardware interface is
strongly in flux, so no good recommendation can be made.
source "arch/mips/ddb5xxx/Kconfig"
source "arch/mips/gt64120/ev64120/Kconfig"
source "arch/mips/jazz/Kconfig"
@ -1541,6 +1530,8 @@ config MIPS_MT_SMTC
select CPU_MIPSR2_IRQ_VI
select CPU_MIPSR2_SRS
select MIPS_MT
select NR_CPUS_DEFAULT_2
select NR_CPUS_DEFAULT_8
select SMP
select SYS_SUPPORTS_SMP
help
@ -1756,13 +1747,34 @@ config SMP
config SYS_SUPPORTS_SMP
bool
config NR_CPUS_DEFAULT_2
bool
config NR_CPUS_DEFAULT_4
bool
config NR_CPUS_DEFAULT_8
bool
config NR_CPUS_DEFAULT_16
bool
config NR_CPUS_DEFAULT_32
bool
config NR_CPUS_DEFAULT_64
bool
config NR_CPUS
int "Maximum number of CPUs (2-64)"
range 2 64
depends on SMP
default "64" if SGI_IP27
default "2"
default "8" if MIPS_MT_SMTC
default "2" if NR_CPUS_DEFAULT_2
default "4" if NR_CPUS_DEFAULT_4
default "8" if NR_CPUS_DEFAULT_8
default "16" if NR_CPUS_DEFAULT_16
default "32" if NR_CPUS_DEFAULT_32
default "64" if NR_CPUS_DEFAULT_64
help
This allows you to specify the maximum number of CPUs which this
kernel will support. The maximum supported value is 32 for 32-bit
@ -1859,6 +1871,40 @@ config MIPS_INSANE_LARGE
This will result in additional memory usage, so it is not
recommended for normal users.
config KEXEC
bool "Kexec system call (EXPERIMENTAL)"
depends on EXPERIMENTAL
help
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
but it is indepedent of the system firmware. And like a reboot
you can start any kernel with it, not just Linux.
The name comes from the similiarity to the exec system call.
It is an ongoing process to be certain the hardware in a machine
is properly shutdown, so do not be surprised if this code does not
initially work for you. It may help to enable device hotplugging
support. As of this writing the exact hardware interface is
strongly in flux, so no good recommendation can be made.
config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode"
depends on PROC_FS && BROKEN
default y
help
This kernel feature is useful for number crunching applications
that may need to compute untrusted bytecode during their
execution. By using pipes or other transports made available to
the process as file descriptors supporting the read/write
syscalls, it's possible to isolate those applications in
their own address space using seccomp. Once seccomp is
enabled via /proc/<pid>/seccomp, it cannot be disabled
and the task is only allowed to execute a few safe syscalls
defined by each seccomp mode.
If unsure, say Y. Only embedded should say N here.
endmenu
config RWSEM_GENERIC_SPINLOCK
@ -2025,23 +2071,6 @@ config BINFMT_ELF32
bool
default y if MIPS32_O32 || MIPS32_N32
config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode"
depends on PROC_FS && BROKEN
default y
help
This kernel feature is useful for number crunching applications
that may need to compute untrusted bytecode during their
execution. By using pipes or other transports made available to
the process as file descriptors supporting the read/write
syscalls, it's possible to isolate those applications in
their own address space using seccomp. Once seccomp is
enabled via /proc/<pid>/seccomp, it cannot be disabled
and the task is only allowed to execute a few safe syscalls
defined by each seccomp mode.
If unsure, say Y. Only embedded should say N here.
config PM
bool "Power Management support (EXPERIMENTAL)"
depends on EXPERIMENTAL && SOC_AU1X00

View File

@ -77,7 +77,7 @@ static struct smatch * __init string_to_mach(const char *s)
{
int i;
for (i = 0; i < (sizeof(mach_table) / sizeof (mach_table[0])); i++) {
for (i = 0; i < ARRAY_SIZE(mach_table); i++) {
if (!strcmp(s, mach_table[i].arcname))
return &mach_table[i];
}

View File

@ -141,30 +141,20 @@ void __init prom_meminit(void)
}
}
unsigned long __init prom_free_prom_memory(void)
void __init prom_free_prom_memory(void)
{
unsigned long freed = 0;
unsigned long addr;
int i;
if (prom_flags & PROM_FLAG_DONT_FREE_TEMP)
return 0;
return;
for (i = 0; i < boot_mem_map.nr_map; i++) {
if (boot_mem_map.map[i].type != BOOT_MEM_ROM_DATA)
continue;
addr = boot_mem_map.map[i].addr;
while (addr < boot_mem_map.map[i].addr
+ boot_mem_map.map[i].size) {
ClearPageReserved(virt_to_page(__va(addr)));
init_page_count(virt_to_page(__va(addr)));
free_page((unsigned long)__va(addr));
addr += PAGE_SIZE;
freed += PAGE_SIZE;
free_init_pages("prom memory",
addr, addr + boot_mem_map.map[i].size);
}
}
printk(KERN_INFO "Freeing prom memory: %ldkb freed\n", freed >> 10);
return freed;
}

View File

@ -233,7 +233,7 @@ void restore_local_and_enable(int controller, unsigned long mask)
static struct irq_chip rise_edge_irq_type = {
.typename = "Au1000 Rise Edge",
.name = "Au1000 Rise Edge",
.ack = mask_and_ack_rise_edge_irq,
.mask = local_disable_irq,
.mask_ack = mask_and_ack_rise_edge_irq,
@ -242,7 +242,7 @@ static struct irq_chip rise_edge_irq_type = {
};
static struct irq_chip fall_edge_irq_type = {
.typename = "Au1000 Fall Edge",
.name = "Au1000 Fall Edge",
.ack = mask_and_ack_fall_edge_irq,
.mask = local_disable_irq,
.mask_ack = mask_and_ack_fall_edge_irq,
@ -251,7 +251,7 @@ static struct irq_chip fall_edge_irq_type = {
};
static struct irq_chip either_edge_irq_type = {
.typename = "Au1000 Rise or Fall Edge",
.name = "Au1000 Rise or Fall Edge",
.ack = mask_and_ack_either_edge_irq,
.mask = local_disable_irq,
.mask_ack = mask_and_ack_either_edge_irq,
@ -260,7 +260,7 @@ static struct irq_chip either_edge_irq_type = {
};
static struct irq_chip level_irq_type = {
.typename = "Au1000 Level",
.name = "Au1000 Level",
.ack = mask_and_ack_level_irq,
.mask = local_disable_irq,
.mask_ack = mask_and_ack_level_irq,

View File

@ -76,14 +76,18 @@ static int __init au1x_pci_setup(void)
}
#ifdef CONFIG_DMA_NONCOHERENT
{
/*
* Set the NC bit in controller for Au1500 pre-AC silicon
*/
u32 prid = read_c0_prid();
if ( (prid & 0xFF000000) == 0x01000000 && prid < 0x01030202) {
au_writel( 1<<16 | au_readl(Au1500_PCI_CFG), Au1500_PCI_CFG);
if ((prid & 0xFF000000) == 0x01000000 && prid < 0x01030202) {
au_writel((1 << 16) | au_readl(Au1500_PCI_CFG),
Au1500_PCI_CFG);
printk("Non-coherent PCI accesses enabled\n");
}
}
#endif
set_io_port_base(virt_io_addr);

View File

@ -149,9 +149,8 @@ int get_ethernet_addr(char *ethernet_addr)
return 0;
}
unsigned long __init prom_free_prom_memory(void)
void __init prom_free_prom_memory(void)
{
return 0;
}
EXPORT_SYMBOL(prom_getcmdline);

View File

@ -141,17 +141,20 @@ void __init plat_mem_setup(void)
/* This routine should be valid for all Au1x based boards */
phys_t __fixup_bigphys_addr(phys_t phys_addr, phys_t size)
{
u32 start, end;
/* Don't fixup 36 bit addresses */
if ((phys_addr >> 32) != 0) return phys_addr;
if ((phys_addr >> 32) != 0)
return phys_addr;
#ifdef CONFIG_PCI
{
u32 start, end;
start = (u32)Au1500_PCI_MEM_START;
end = (u32)Au1500_PCI_MEM_END;
/* check for pci memory window */
if ((phys_addr >= start) && ((phys_addr + size) < end)) {
return (phys_t)((phys_addr - start) + Au1500_PCI_MEM_START);
if ((phys_addr >= start) && ((phys_addr + size) < end))
return (phys_t)
((phys_addr - start) + Au1500_PCI_MEM_START);
}
#endif

View File

@ -47,8 +47,7 @@ void board_reset (void)
void __init board_setup(void)
{
u32 pin_func;
u32 sys_freqctrl, sys_clksrc;
volatile void __iomem * base = (volatile void __iomem *) 0xac000000UL;
// set AUX clock to 12MHz * 8 = 96 MHz
au_writel(8, SYS_AUXPLL);
@ -56,6 +55,9 @@ void __init board_setup(void)
udelay(100);
#ifdef CONFIG_USB_OHCI
{
u32 pin_func, sys_freqctrl, sys_clksrc;
// configure pins GPIO[14:9] as GPIO
pin_func = au_readl(SYS_PINFUNC) & (u32)(~0x80);
@ -95,19 +97,20 @@ void __init board_setup(void)
// 2nd USB port is USB host
pin_func |= 0x8000;
au_writel(pin_func, SYS_PINFUNC);
}
#endif // defined (CONFIG_USB_OHCI)
/* Enable sys bus clock divider when IDLE state or no bus activity. */
au_writel(au_readl(SYS_POWERCTRL) | (0x3 << 5), SYS_POWERCTRL);
// Enable the RTC if not already enabled
if (!(readb(0xac000028) & 0x20)) {
writeb(readb(0xac000028) | 0x20, 0xac000028);
if (!(readb(base + 0x28) & 0x20)) {
writeb(readb(base + 0x28) | 0x20, base + 0x28);
au_sync();
}
// Put the clock in BCD mode
if (readb(0xac00002C) & 0x4) { /* reg B */
writeb(readb(0xac00002c) & ~0x4, 0xac00002c);
if (readb(base + 0x2C) & 0x4) { /* reg B */
writeb(readb(base + 0x2c) & ~0x4, base + 0x2c);
au_sync();
}
}

View File

@ -137,33 +137,20 @@ static void pb1200_shutdown_irq( unsigned int irq_nr )
return;
}
static inline void pb1200_mask_and_ack_irq(unsigned int irq_nr)
{
pb1200_disable_irq( irq_nr );
}
static void pb1200_end_irq(unsigned int irq_nr)
{
if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS))) {
pb1200_enable_irq(irq_nr);
}
}
static struct irq_chip external_irq_type =
{
#ifdef CONFIG_MIPS_PB1200
"Pb1200 Ext",
.name = "Pb1200 Ext",
#endif
#ifdef CONFIG_MIPS_DB1200
"Db1200 Ext",
.name = "Db1200 Ext",
#endif
pb1200_startup_irq,
pb1200_shutdown_irq,
pb1200_enable_irq,
pb1200_disable_irq,
pb1200_mask_and_ack_irq,
pb1200_end_irq,
NULL
.startup = pb1200_startup_irq,
.shutdown = pb1200_shutdown_irq,
.ack = pb1200_disable_irq,
.mask = pb1200_disable_irq,
.mask_ack = pb1200_disable_irq,
.unmask = pb1200_enable_irq,
};
void _board_init_irq(void)
@ -172,7 +159,8 @@ void _board_init_irq(void)
for (irq_nr = PB1200_INT_BEGIN; irq_nr <= PB1200_INT_END; irq_nr++)
{
irq_desc[irq_nr].chip = &external_irq_type;
set_irq_chip_and_handler(irq_nr, &external_irq_type,
handle_level_irq);
pb1200_disable_irq(irq_nr);
}

View File

@ -47,9 +47,9 @@ extern asmlinkage void excite_handle_int(void);
*/
void __init arch_init_irq(void)
{
mips_cpu_irq_init(0);
rm7k_cpu_irq_init(8);
rm9k_cpu_irq_init(12);
mips_cpu_irq_init();
rm7k_cpu_irq_init();
rm9k_cpu_irq_init();
#ifdef CONFIG_KGDB
excite_kgdb_init();

View File

@ -104,7 +104,7 @@ void __init arch_init_irq(void)
GT_WRITE(GT_INTRMASK_OFS, 0);
init_i8259_irqs(); /* 0 ... 15 */
mips_cpu_irq_init(COBALT_CPU_IRQ); /* 16 ... 23 */
mips_cpu_irq_init(); /* 16 ... 23 */
/*
* Mask all cpu interrupts

View File

@ -204,8 +204,7 @@ void __init prom_init(void)
add_memory_region(0x0, memsz, BOOT_MEM_RAM);
}
unsigned long __init prom_free_prom_memory(void)
void __init prom_free_prom_memory(void)
{
/* Nothing to do! */
return 0;
}

View File

@ -59,9 +59,8 @@ void __init prom_init(void)
#endif
}
unsigned long __init prom_free_prom_memory(void)
void __init prom_free_prom_memory(void)
{
return 0;
}
#if defined(CONFIG_DDB5477)

View File

@ -17,6 +17,7 @@
#include <linux/ptrace.h>
#include <asm/i8259.h>
#include <asm/irq_cpu.h>
#include <asm/system.h>
#include <asm/mipsregs.h>
#include <asm/debug.h>
@ -73,7 +74,6 @@ set_pci_int_attr(u32 pci, u32 intn, u32 active, u32 trigger)
}
extern void vrc5477_irq_init(u32 base);
extern void mips_cpu_irq_init(u32 base);
static struct irqaction irq_cascade = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL };
void __init arch_init_irq(void)
@ -125,7 +125,7 @@ void __init arch_init_irq(void)
/* init all controllers */
init_i8259_irqs();
mips_cpu_irq_init(CPU_IRQ_BASE);
mips_cpu_irq_init();
vrc5477_irq_init(VRC5477_IRQ_BASE);
@ -146,8 +146,7 @@ u8 i8259_interrupt_ack(void)
irq = *(volatile u8 *) KSEG1ADDR(DDB_PCI_IACK_BASE);
ddb_out32(DDB_PCIINIT10, reg);
/* i8259.c set the base vector to be 0x0 */
return irq + I8259_IRQ_BASE;
return irq;
}
/*
* the first level int-handler will jump here if it is a vrc5477 irq
@ -177,7 +176,7 @@ static void vrc5477_irq_dispatch(void)
/* check for i8259 interrupts */
if (intStatus & (1 << VRC5477_I8259_CASCADE)) {
int i8259_irq = i8259_interrupt_ack();
do_IRQ(I8259_IRQ_BASE + i8259_irq);
do_IRQ(i8259_irq);
return;
}
}

View File

@ -82,7 +82,7 @@ vrc5477_irq_end(unsigned int irq)
}
struct irq_chip vrc5477_irq_controller = {
.typename = "vrc5477_irq",
.name = "vrc5477_irq",
.ack = vrc5477_irq_ack,
.mask = vrc5477_irq_disable,
.mask_ack = vrc5477_irq_ack,

View File

@ -62,7 +62,7 @@ static inline void end_ioasic_irq(unsigned int irq)
}
static struct irq_chip ioasic_irq_type = {
.typename = "IO-ASIC",
.name = "IO-ASIC",
.ack = ack_ioasic_irq,
.mask = mask_ioasic_irq,
.mask_ack = ack_ioasic_irq,
@ -84,7 +84,7 @@ static inline void end_ioasic_dma_irq(unsigned int irq)
}
static struct irq_chip ioasic_dma_irq_type = {
.typename = "IO-ASIC-DMA",
.name = "IO-ASIC-DMA",
.ack = ack_ioasic_dma_irq,
.mask = mask_ioasic_dma_irq,
.mask_ack = ack_ioasic_dma_irq,

View File

@ -58,7 +58,7 @@ static void ack_kn02_irq(unsigned int irq)
}
static struct irq_chip kn02_irq_type = {
.typename = "KN02-CSR",
.name = "KN02-CSR",
.ack = ack_kn02_irq,
.mask = mask_kn02_irq,
.mask_ack = ack_kn02_irq,

View File

@ -92,9 +92,9 @@ void __init prom_meminit(u32 magic)
rex_setup_memory_region();
}
unsigned long __init prom_free_prom_memory(void)
void __init prom_free_prom_memory(void)
{
unsigned long addr, end;
unsigned long end;
/*
* Free everything below the kernel itself but leave
@ -114,16 +114,5 @@ unsigned long __init prom_free_prom_memory(void)
#endif
end = __pa(&_text);
addr = PAGE_SIZE;
while (addr < end) {
ClearPageReserved(virt_to_page(__va(addr)));
init_page_count(virt_to_page(__va(addr)));
free_page((unsigned long)__va(addr));
addr += PAGE_SIZE;
}
printk("Freeing unused PROM memory: %ldkb freed\n",
(end - PAGE_SIZE) >> 10);
return end - PAGE_SIZE;
free_init_pages("unused PROM memory", PAGE_SIZE, end);
}

View File

@ -234,7 +234,7 @@ static void __init dec_init_kn01(void)
memcpy(&cpu_mask_nr_tbl, &kn01_cpu_mask_nr_tbl,
sizeof(kn01_cpu_mask_nr_tbl));
mips_cpu_irq_init(DEC_CPU_IRQ_BASE);
mips_cpu_irq_init();
} /* dec_init_kn01 */
@ -309,7 +309,7 @@ static void __init dec_init_kn230(void)
memcpy(&cpu_mask_nr_tbl, &kn230_cpu_mask_nr_tbl,
sizeof(kn230_cpu_mask_nr_tbl));
mips_cpu_irq_init(DEC_CPU_IRQ_BASE);
mips_cpu_irq_init();
} /* dec_init_kn230 */
@ -403,7 +403,7 @@ static void __init dec_init_kn02(void)
memcpy(&asic_mask_nr_tbl, &kn02_asic_mask_nr_tbl,
sizeof(kn02_asic_mask_nr_tbl));
mips_cpu_irq_init(DEC_CPU_IRQ_BASE);
mips_cpu_irq_init();
init_kn02_irqs(KN02_IRQ_BASE);
} /* dec_init_kn02 */
@ -504,7 +504,7 @@ static void __init dec_init_kn02ba(void)
memcpy(&asic_mask_nr_tbl, &kn02ba_asic_mask_nr_tbl,
sizeof(kn02ba_asic_mask_nr_tbl));
mips_cpu_irq_init(DEC_CPU_IRQ_BASE);
mips_cpu_irq_init();
init_ioasic_irqs(IO_IRQ_BASE);
} /* dec_init_kn02ba */
@ -601,7 +601,7 @@ static void __init dec_init_kn02ca(void)
memcpy(&asic_mask_nr_tbl, &kn02ca_asic_mask_nr_tbl,
sizeof(kn02ca_asic_mask_nr_tbl));
mips_cpu_irq_init(DEC_CPU_IRQ_BASE);
mips_cpu_irq_init();
init_ioasic_irqs(IO_IRQ_BASE);
} /* dec_init_kn02ca */
@ -702,7 +702,7 @@ static void __init dec_init_kn03(void)
memcpy(&asic_mask_nr_tbl, &kn03_asic_mask_nr_tbl,
sizeof(kn03_asic_mask_nr_tbl));
mips_cpu_irq_init(DEC_CPU_IRQ_BASE);
mips_cpu_irq_init();
init_ioasic_irqs(IO_IRQ_BASE);
} /* dec_init_kn03 */

View File

@ -57,7 +57,7 @@ static void emma2rh_irq_disable(unsigned int irq)
}
struct irq_chip emma2rh_irq_controller = {
.typename = "emma2rh_irq",
.name = "emma2rh_irq",
.ack = emma2rh_irq_disable,
.mask = emma2rh_irq_disable,
.mask_ack = emma2rh_irq_disable,

View File

@ -106,7 +106,7 @@ void __init arch_init_irq(void)
emma2rh_irq_init(EMMA2RH_IRQ_BASE);
emma2rh_sw_irq_init(EMMA2RH_SW_IRQ_BASE);
emma2rh_gpio_irq_init(EMMA2RH_GPIO_IRQ_BASE);
mips_cpu_irq_init(CPU_IRQ_BASE);
mips_cpu_irq_init();
/* setup cascade interrupts */
setup_irq(EMMA2RH_IRQ_BASE + EMMA2RH_SW_CASCADE, &irq_cascade);

View File

@ -49,7 +49,7 @@ static void emma2rh_sw_irq_disable(unsigned int irq)
}
struct irq_chip emma2rh_sw_irq_controller = {
.typename = "emma2rh_sw_irq",
.name = "emma2rh_sw_irq",
.ack = emma2rh_sw_irq_disable,
.mask = emma2rh_sw_irq_disable,
.mask_ack = emma2rh_sw_irq_disable,
@ -115,7 +115,7 @@ static void emma2rh_gpio_irq_end(unsigned int irq)
}
struct irq_chip emma2rh_gpio_irq_controller = {
.typename = "emma2rh_gpio_irq",
.name = "emma2rh_gpio_irq",
.ack = emma2rh_gpio_irq_ack,
.mask = emma2rh_gpio_irq_disable,
.mask_ack = emma2rh_gpio_irq_ack,

View File

@ -88,7 +88,7 @@ static void end_ev64120_irq(unsigned int irq)
}
static struct irq_chip ev64120_irq_type = {
.typename = "EV64120",
.name = "EV64120",
.ack = disable_ev64120_irq,
.mask = disable_ev64120_irq,
.mask_ack = disable_ev64120_irq,

View File

@ -59,9 +59,8 @@ extern void galileo_machine_power_off(void);
*/
extern struct pci_ops galileo_pci_ops;
unsigned long __init prom_free_prom_memory(void)
void __init prom_free_prom_memory(void)
{
return 0;
}
/*

View File

@ -1,6 +1,4 @@
#ifdef CONFIG_KGDB
#include <asm/serial.h> /* For the serial port location and base baud */
/* --- CONFIG --- */
@ -121,5 +119,3 @@ int putDebugChar(uint8 byte)
UART16550_WRITE(OFS_SEND_BUFFER, byte);
return 1;
}
#endif

View File

@ -90,6 +90,6 @@ void __init arch_init_irq(void)
clear_c0_status(ST0_IM);
local_irq_disable();
mips_cpu_irq_init(0);
rm7k_cpu_irq_init(8);
mips_cpu_irq_init();
rm7k_cpu_irq_init();
}

View File

@ -67,7 +67,6 @@ void __init prom_init(void)
add_memory_region(0, 64 << 20, BOOT_MEM_RAM);
}
unsigned long __init prom_free_prom_memory(void)
void __init prom_free_prom_memory(void)
{
return 0;
}

View File

@ -63,7 +63,7 @@ void gt64120_init_pic(void)
void __init arch_init_irq(void)
{
/* IRQ 0 - 7 are for MIPS common irq_cpu controller */
mips_cpu_irq_init(0);
mips_cpu_irq_init();
gt64120_init_pic();
}

View File

@ -93,9 +93,8 @@ void __init wrppmc_early_printk(const char *fmt, ...)
}
#endif /* WRPPMC_EARLY_DEBUG */
unsigned long __init prom_free_prom_memory(void)
void __init prom_free_prom_memory(void)
{
return 0;
}
#ifdef CONFIG_SERIAL_8250

View File

@ -40,7 +40,7 @@ void disable_r4030_irq(unsigned int irq)
}
static struct irq_chip r4030_irq_type = {
.typename = "R4030",
.name = "R4030",
.ack = disable_r4030_irq,
.mask = disable_r4030_irq,
.mask_ack = disable_r4030_irq,

View File

@ -75,7 +75,6 @@ void __init prom_init_cmdline(void)
*cp = '\0';
}
unsigned long __init prom_free_prom_memory(void)
void __init prom_free_prom_memory(void)
{
return 0;
}

View File

@ -439,7 +439,7 @@ void __init arch_init_irq(void)
}
static struct irq_chip jmr3927_irq_controller = {
.typename = "jmr3927_irq",
.name = "jmr3927_irq",
.ack = jmr3927_irq_ack,
.mask = jmr3927_irq_disable,
.mask_ack = jmr3927_irq_ack,

View File

@ -434,7 +434,7 @@ void __init tx3927_setup(void)
/* DMA */
tx3927_dmaptr->mcr = 0;
for (i = 0; i < sizeof(tx3927_dmaptr->ch) / sizeof(tx3927_dmaptr->ch[0]); i++) {
for (i = 0; i < ARRAY_SIZE(tx3927_dmaptr->ch); i++) {
/* reset channel */
tx3927_dmaptr->ch[i].ccr = TX3927_DMA_CCR_CHRST;
tx3927_dmaptr->ch[i].ccr = 0;

View File

@ -234,10 +234,6 @@ void output_mm_defines(void)
constant("#define _PMD_SHIFT ", PMD_SHIFT);
constant("#define _PGDIR_SHIFT ", PGDIR_SHIFT);
linefeed;
constant("#define _PGD_ORDER ", PGD_ORDER);
constant("#define _PMD_ORDER ", PMD_ORDER);
constant("#define _PTE_ORDER ", PTE_ORDER);
linefeed;
constant("#define _PTRS_PER_PGD ", PTRS_PER_PGD);
constant("#define _PTRS_PER_PMD ", PTRS_PER_PMD);
constant("#define _PTRS_PER_PTE ", PTRS_PER_PTE);

View File

@ -231,28 +231,3 @@ NESTED(smp_bootstrap, 16, sp)
#endif /* CONFIG_SMP */
__FINIT
.comm kernelsp, NR_CPUS * 8, 8
.comm pgd_current, NR_CPUS * 8, 8
.comm fw_arg0, SZREG, SZREG # firmware arguments
.comm fw_arg1, SZREG, SZREG
.comm fw_arg2, SZREG, SZREG
.comm fw_arg3, SZREG, SZREG
.macro page name, order
.comm \name, (_PAGE_SIZE << \order), (_PAGE_SIZE << \order)
.endm
/*
* On 64-bit we've got three-level pagetables with a slightly
* different layout ...
*/
page swapper_pg_dir, _PGD_ORDER
#ifdef CONFIG_64BIT
#if defined(CONFIG_MODULES) && !defined(CONFIG_BUILD_ELF64)
page module_pg_dir, _PGD_ORDER
#endif
page invalid_pmd_table, _PMD_ORDER
#endif
page invalid_pte_table, _PTE_ORDER

View File

@ -54,9 +54,11 @@ static unsigned int cached_irq_mask = 0xffff;
void disable_8259A_irq(unsigned int irq)
{
unsigned int mask = 1 << irq;
unsigned int mask;
unsigned long flags;
irq -= I8259A_IRQ_BASE;
mask = 1 << irq;
spin_lock_irqsave(&i8259A_lock, flags);
cached_irq_mask |= mask;
if (irq & 8)
@ -68,9 +70,11 @@ void disable_8259A_irq(unsigned int irq)
void enable_8259A_irq(unsigned int irq)
{
unsigned int mask = ~(1 << irq);
unsigned int mask;
unsigned long flags;
irq -= I8259A_IRQ_BASE;
mask = ~(1 << irq);
spin_lock_irqsave(&i8259A_lock, flags);
cached_irq_mask &= mask;
if (irq & 8)
@ -82,10 +86,12 @@ void enable_8259A_irq(unsigned int irq)
int i8259A_irq_pending(unsigned int irq)
{
unsigned int mask = 1 << irq;
unsigned int mask;
unsigned long flags;
int ret;
irq -= I8259A_IRQ_BASE;
mask = 1 << irq;
spin_lock_irqsave(&i8259A_lock, flags);
if (irq < 8)
ret = inb(PIC_MASTER_CMD) & mask;
@ -134,9 +140,11 @@ static inline int i8259A_irq_real(unsigned int irq)
*/
void mask_and_ack_8259A(unsigned int irq)
{
unsigned int irqmask = 1 << irq;
unsigned int irqmask;
unsigned long flags;
irq -= I8259A_IRQ_BASE;
irqmask = 1 << irq;
spin_lock_irqsave(&i8259A_lock, flags);
/*
* Lightweight spurious IRQ detection. We do not want
@ -322,8 +330,8 @@ void __init init_i8259_irqs (void)
init_8259A(0);
for (i = 0; i < 16; i++)
for (i = I8259A_IRQ_BASE; i < I8259A_IRQ_BASE + 16; i++)
set_irq_chip_and_handler(i, &i8259A_chip, handle_level_irq);
setup_irq(PIC_CASCADE_IR, &irq2);
setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2);
}

View File

@ -10,6 +10,8 @@
* Copyright (C) 1996 - 2004 David S. Miller <dm@engr.sgi.com>
* Copyright (C) 2004 - 2005 Steven J. Hill <sjhill@realitydiluted.com>
*/
#undef DEBUG
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/stat.h>
@ -40,8 +42,6 @@
#include <linux/elf.h>
#undef DEBUG
static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs);
static int load_irix_library(struct file *);
static int irix_core_dump(long signr, struct pt_regs * regs,
@ -52,55 +52,86 @@ static struct linux_binfmt irix_format = {
irix_core_dump, PAGE_SIZE
};
#ifdef DEBUG
/* Debugging routines. */
static char *get_elf_p_type(Elf32_Word p_type)
{
int i = (int) p_type;
#ifdef DEBUG
switch (p_type) {
case PT_NULL:
return "PT_NULL";
break;
switch(i) {
case PT_NULL: return("PT_NULL"); break;
case PT_LOAD: return("PT_LOAD"); break;
case PT_DYNAMIC: return("PT_DYNAMIC"); break;
case PT_INTERP: return("PT_INTERP"); break;
case PT_NOTE: return("PT_NOTE"); break;
case PT_SHLIB: return("PT_SHLIB"); break;
case PT_PHDR: return("PT_PHDR"); break;
case PT_LOPROC: return("PT_LOPROC/REGINFO"); break;
case PT_HIPROC: return("PT_HIPROC"); break;
default: return("PT_BOGUS"); break;
case PT_LOAD:
return "PT_LOAD";
break;
case PT_DYNAMIC:
return "PT_DYNAMIC";
break;
case PT_INTERP:
return "PT_INTERP";
break;
case PT_NOTE:
return "PT_NOTE";
break;
case PT_SHLIB:
return "PT_SHLIB";
break;
case PT_PHDR:
return "PT_PHDR";
break;
case PT_LOPROC:
return "PT_LOPROC/REGINFO";
break;
case PT_HIPROC:
return "PT_HIPROC";
break;
default:
return "PT_BOGUS";
break;
}
#endif
}
static void print_elfhdr(struct elfhdr *ehp)
{
int i;
printk("ELFHDR: e_ident<");
for(i = 0; i < (EI_NIDENT - 1); i++) printk("%x ", ehp->e_ident[i]);
printk("%x>\n", ehp->e_ident[i]);
printk(" e_type[%04x] e_machine[%04x] e_version[%08lx]\n",
pr_debug("ELFHDR: e_ident<");
for (i = 0; i < (EI_NIDENT - 1); i++)
pr_debug("%x ", ehp->e_ident[i]);
pr_debug("%x>\n", ehp->e_ident[i]);
pr_debug(" e_type[%04x] e_machine[%04x] e_version[%08lx]\n",
(unsigned short) ehp->e_type, (unsigned short) ehp->e_machine,
(unsigned long) ehp->e_version);
printk(" e_entry[%08lx] e_phoff[%08lx] e_shoff[%08lx] "
pr_debug(" e_entry[%08lx] e_phoff[%08lx] e_shoff[%08lx] "
"e_flags[%08lx]\n",
(unsigned long) ehp->e_entry, (unsigned long) ehp->e_phoff,
(unsigned long) ehp->e_shoff, (unsigned long) ehp->e_flags);
printk(" e_ehsize[%04x] e_phentsize[%04x] e_phnum[%04x]\n",
(unsigned short) ehp->e_ehsize, (unsigned short) ehp->e_phentsize,
pr_debug(" e_ehsize[%04x] e_phentsize[%04x] e_phnum[%04x]\n",
(unsigned short) ehp->e_ehsize,
(unsigned short) ehp->e_phentsize,
(unsigned short) ehp->e_phnum);
printk(" e_shentsize[%04x] e_shnum[%04x] e_shstrndx[%04x]\n",
(unsigned short) ehp->e_shentsize, (unsigned short) ehp->e_shnum,
pr_debug(" e_shentsize[%04x] e_shnum[%04x] e_shstrndx[%04x]\n",
(unsigned short) ehp->e_shentsize,
(unsigned short) ehp->e_shnum,
(unsigned short) ehp->e_shstrndx);
}
static void print_phdr(int i, struct elf_phdr *ep)
{
printk("PHDR[%d]: p_type[%s] p_offset[%08lx] p_vaddr[%08lx] "
pr_debug("PHDR[%d]: p_type[%s] p_offset[%08lx] p_vaddr[%08lx] "
"p_paddr[%08lx]\n", i, get_elf_p_type(ep->p_type),
(unsigned long) ep->p_offset, (unsigned long) ep->p_vaddr,
(unsigned long) ep->p_paddr);
printk(" p_filesz[%08lx] p_memsz[%08lx] p_flags[%08lx] "
pr_debug(" p_filesz[%08lx] p_memsz[%08lx] p_flags[%08lx] "
"p_align[%08lx]\n", (unsigned long) ep->p_filesz,
(unsigned long) ep->p_memsz, (unsigned long) ep->p_flags,
(unsigned long) ep->p_align);
@ -110,14 +141,13 @@ static void dump_phdrs(struct elf_phdr *ep, int pnum)
{
int i;
for(i = 0; i < pnum; i++, ep++) {
if((ep->p_type == PT_LOAD) ||
for (i = 0; i < pnum; i++, ep++) {
if ((ep->p_type == PT_LOAD) ||
(ep->p_type == PT_INTERP) ||
(ep->p_type == PT_PHDR))
print_phdr(i, ep);
}
}
#endif /* DEBUG */
static void set_brk(unsigned long start, unsigned long end)
{
@ -156,11 +186,10 @@ static unsigned long * create_irix_tables(char * p, int argc, int envc,
elf_addr_t *envp;
elf_addr_t *sp, *csp;
#ifdef DEBUG
printk("create_irix_tables: p[%p] argc[%d] envc[%d] "
pr_debug("create_irix_tables: p[%p] argc[%d] envc[%d] "
"load_addr[%08x] interp_load_addr[%08x]\n",
p, argc, envc, load_addr, interp_load_addr);
#endif
sp = (elf_addr_t *) (~15UL & (unsigned long) p);
csp = sp;
csp -= exec ? DLINFO_ITEMS*2 : 2;
@ -181,7 +210,7 @@ static unsigned long * create_irix_tables(char * p, int argc, int envc,
sp -= 2;
NEW_AUX_ENT(0, AT_NULL, 0);
if(exec) {
if (exec) {
sp -= 11*2;
NEW_AUX_ENT (0, AT_PHDR, load_addr + exec->e_phoff);
@ -245,9 +274,7 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
last_bss = 0;
error = load_addr = 0;
#ifdef DEBUG
print_elfhdr(interp_elf_ex);
#endif
/* First of all, some simple consistency checks */
if ((interp_elf_ex->e_type != ET_EXEC &&
@ -258,7 +285,7 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
}
/* Now read in all of the header information */
if(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > PAGE_SIZE) {
if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > PAGE_SIZE) {
printk("IRIX interp header bigger than a page (%d)\n",
(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum));
return 0xffffffff;
@ -267,7 +294,7 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
elf_phdata = kmalloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum,
GFP_KERNEL);
if(!elf_phdata) {
if (!elf_phdata) {
printk("Cannot kmalloc phdata for IRIX interp.\n");
return 0xffffffff;
}
@ -275,7 +302,7 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
/* If the size of this structure has changed, then punt, since
* we will be doing the wrong thing.
*/
if(interp_elf_ex->e_phentsize != 32) {
if (interp_elf_ex->e_phentsize != 32) {
printk("IRIX interp e_phentsize == %d != 32 ",
interp_elf_ex->e_phentsize);
kfree(elf_phdata);
@ -286,27 +313,33 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
(char *) elf_phdata,
sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
#ifdef DEBUG
dump_phdrs(elf_phdata, interp_elf_ex->e_phnum);
#endif
eppnt = elf_phdata;
for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
if(eppnt->p_type == PT_LOAD) {
for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
if (eppnt->p_type == PT_LOAD) {
int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
int elf_prot = 0;
unsigned long vaddr = 0;
if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
if (eppnt->p_flags & PF_R)
elf_prot = PROT_READ;
if (eppnt->p_flags & PF_W)
elf_prot |= PROT_WRITE;
if (eppnt->p_flags & PF_X)
elf_prot |= PROT_EXEC;
elf_type |= MAP_FIXED;
vaddr = eppnt->p_vaddr;
pr_debug("INTERP do_mmap(%p, %08lx, %08lx, %08lx, %08lx, %08lx) ",
pr_debug("INTERP do_mmap"
"(%p, %08lx, %08lx, %08lx, %08lx, %08lx) ",
interpreter, vaddr,
(unsigned long) (eppnt->p_filesz + (eppnt->p_vaddr & 0xfff)),
(unsigned long) elf_prot, (unsigned long) elf_type,
(unsigned long) (eppnt->p_offset & 0xfffff000));
(unsigned long)
(eppnt->p_filesz + (eppnt->p_vaddr & 0xfff)),
(unsigned long)
elf_prot, (unsigned long) elf_type,
(unsigned long)
(eppnt->p_offset & 0xfffff000));
down_write(&current->mm->mmap_sem);
error = do_mmap(interpreter, vaddr,
eppnt->p_filesz + (eppnt->p_vaddr & 0xfff),
@ -314,33 +347,37 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
eppnt->p_offset & 0xfffff000);
up_write(&current->mm->mmap_sem);
if(error < 0 && error > -1024) {
printk("Aieee IRIX interp mmap error=%d\n", error);
if (error < 0 && error > -1024) {
printk("Aieee IRIX interp mmap error=%d\n",
error);
break; /* Real error */
}
pr_debug("error=%08lx ", (unsigned long) error);
if(!load_addr && interp_elf_ex->e_type == ET_DYN) {
if (!load_addr && interp_elf_ex->e_type == ET_DYN) {
load_addr = error;
pr_debug("load_addr = error ");
}
/* Find the end of the file mapping for this phdr, and keep
* track of the largest address we see for this.
/*
* Find the end of the file mapping for this phdr, and
* keep track of the largest address we see for this.
*/
k = eppnt->p_vaddr + eppnt->p_filesz;
if(k > elf_bss) elf_bss = k;
if (k > elf_bss)
elf_bss = k;
/* Do the same thing for the memory mapping - between
* elf_bss and last_bss is the bss section.
*/
k = eppnt->p_memsz + eppnt->p_vaddr;
if(k > last_bss) last_bss = k;
if (k > last_bss)
last_bss = k;
pr_debug("\n");
}
}
/* Now use mmap to map the library into memory. */
if(error < 0 && error > -1024) {
if (error < 0 && error > -1024) {
pr_debug("got error %d\n", error);
kfree(elf_phdata);
return 0xffffffff;
@ -377,7 +414,7 @@ static int verify_binary(struct elfhdr *ehp, struct linux_binprm *bprm)
return -ENOEXEC;
/* First of all, some simple consistency checks */
if((ehp->e_type != ET_EXEC && ehp->e_type != ET_DYN) ||
if ((ehp->e_type != ET_EXEC && ehp->e_type != ET_DYN) ||
!bprm->file->f_op->mmap) {
return -ENOEXEC;
}
@ -388,7 +425,7 @@ static int verify_binary(struct elfhdr *ehp, struct linux_binprm *bprm)
* XXX all registers as 64bits on cpu's capable of this at
* XXX exception time plus frob the XTLB exception vector.
*/
if((ehp->e_flags & EF_MIPS_ABI2))
if ((ehp->e_flags & EF_MIPS_ABI2))
return -ENOEXEC;
return 0;
@ -410,7 +447,7 @@ static inline int look_for_irix_interpreter(char **name,
struct file *file = NULL;
*name = NULL;
for(i = 0; i < pnum; i++, epp++) {
for (i = 0; i < pnum; i++, epp++) {
if (epp->p_type != PT_INTERP)
continue;
@ -467,8 +504,8 @@ static inline void map_executable(struct file *fp, struct elf_phdr *epp, int pnu
unsigned int tmp;
int i, prot;
for(i = 0; i < pnum; i++, epp++) {
if(epp->p_type != PT_LOAD)
for (i = 0; i < pnum; i++, epp++) {
if (epp->p_type != PT_LOAD)
continue;
/* Map it. */
@ -483,23 +520,23 @@ static inline void map_executable(struct file *fp, struct elf_phdr *epp, int pnu
up_write(&current->mm->mmap_sem);
/* Fixup location tracking vars. */
if((epp->p_vaddr & 0xfffff000) < *estack)
if ((epp->p_vaddr & 0xfffff000) < *estack)
*estack = (epp->p_vaddr & 0xfffff000);
if(!*laddr)
if (!*laddr)
*laddr = epp->p_vaddr - epp->p_offset;
if(epp->p_vaddr < *scode)
if (epp->p_vaddr < *scode)
*scode = epp->p_vaddr;
tmp = epp->p_vaddr + epp->p_filesz;
if(tmp > *ebss)
if (tmp > *ebss)
*ebss = tmp;
if((epp->p_flags & PF_X) && *ecode < tmp)
if ((epp->p_flags & PF_X) && *ecode < tmp)
*ecode = tmp;
if(*edata < tmp)
if (*edata < tmp)
*edata = tmp;
tmp = epp->p_vaddr + epp->p_memsz;
if(tmp > *ebrk)
if (tmp > *ebrk)
*ebrk = tmp;
}
@ -513,12 +550,12 @@ static inline int map_interpreter(struct elf_phdr *epp, struct elfhdr *ihp,
int i;
*eentry = 0xffffffff;
for(i = 0; i < pnum; i++, epp++) {
if(epp->p_type != PT_INTERP)
for (i = 0; i < pnum; i++, epp++) {
if (epp->p_type != PT_INTERP)
continue;
/* We should have fielded this error elsewhere... */
if(*eentry != 0xffffffff)
if (*eentry != 0xffffffff)
return -1;
set_fs(old_fs);
@ -604,9 +641,7 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
if (elf_ex.e_shnum > 20)
goto out;
#ifdef DEBUG
print_elfhdr(&elf_ex);
#endif
/* Now read in all of the header information */
size = elf_ex.e_phentsize * elf_ex.e_phnum;
@ -622,13 +657,11 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
if (retval < 0)
goto out_free_ph;
#ifdef DEBUG
dump_phdrs(elf_phdata, elf_ex.e_phnum);
#endif
/* Set some things for later. */
for(i = 0; i < elf_ex.e_phnum; i++) {
switch(elf_phdata[i].p_type) {
for (i = 0; i < elf_ex.e_phnum; i++) {
switch (elf_phdata[i].p_type) {
case PT_INTERP:
has_interp = 1;
elf_ihdr = &elf_phdata[i];
@ -667,7 +700,7 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
if (elf_interpreter) {
retval = verify_irix_interpreter(&interp_elf_ex);
if(retval)
if (retval)
goto out_free_interp;
}
@ -706,12 +739,12 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
&load_addr, &start_code, &elf_bss, &end_code,
&end_data, &elf_brk);
if(elf_interpreter) {
if (elf_interpreter) {
retval = map_interpreter(elf_phdata, &interp_elf_ex,
interpreter, &interp_load_addr,
elf_ex.e_phnum, old_fs, &elf_entry);
kfree(elf_interpreter);
if(retval) {
if (retval) {
set_fs(old_fs);
printk("Unable to load IRIX ELF interpreter\n");
send_sig(SIGSEGV, current, 0);
@ -809,12 +842,12 @@ static int load_irix_library(struct file *file)
return -ENOEXEC;
/* First of all, some simple consistency checks. */
if(elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
!file->f_op->mmap)
return -ENOEXEC;
/* Now read in all of the header information. */
if(sizeof(struct elf_phdr) * elf_ex.e_phnum > PAGE_SIZE)
if (sizeof(struct elf_phdr) * elf_ex.e_phnum > PAGE_SIZE)
return -ENOEXEC;
elf_phdata = kmalloc(sizeof(struct elf_phdr) * elf_ex.e_phnum, GFP_KERNEL);
@ -825,15 +858,15 @@ static int load_irix_library(struct file *file)
sizeof(struct elf_phdr) * elf_ex.e_phnum);
j = 0;
for(i=0; i<elf_ex.e_phnum; i++)
if((elf_phdata + i)->p_type == PT_LOAD) j++;
for (i=0; i<elf_ex.e_phnum; i++)
if ((elf_phdata + i)->p_type == PT_LOAD) j++;
if(j != 1) {
if (j != 1) {
kfree(elf_phdata);
return -ENOEXEC;
}
while(elf_phdata->p_type != PT_LOAD) elf_phdata++;
while (elf_phdata->p_type != PT_LOAD) elf_phdata++;
/* Now use mmap to map the library into memory. */
down_write(&current->mm->mmap_sem);
@ -889,9 +922,7 @@ unsigned long irix_mapelf(int fd, struct elf_phdr __user *user_phdrp, int cnt)
return -EFAULT;
}
#ifdef DEBUG
dump_phdrs(user_phdrp, cnt);
#endif
for (i = 0; i < cnt; i++, hp++) {
if (__get_user(type, &hp->p_type))
@ -905,14 +936,14 @@ unsigned long irix_mapelf(int fd, struct elf_phdr __user *user_phdrp, int cnt)
filp = fget(fd);
if (!filp)
return -EACCES;
if(!filp->f_op) {
if (!filp->f_op) {
printk("irix_mapelf: Bogon filp!\n");
fput(filp);
return -EACCES;
}
hp = user_phdrp;
for(i = 0; i < cnt; i++, hp++) {
for (i = 0; i < cnt; i++, hp++) {
int prot;
retval = __get_user(vaddr, &hp->p_vaddr);
@ -1015,8 +1046,6 @@ static int notesize(struct memelfnote *en)
return sz;
}
/* #define DEBUG */
#define DUMP_WRITE(addr, nr) \
if (!dump_write(file, (addr), (nr))) \
goto end_coredump;
@ -1093,9 +1122,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
segs++;
}
#ifdef DEBUG
printk("irix_core_dump: %d segs taking %d bytes\n", segs, size);
#endif
pr_debug("irix_core_dump: %d segs taking %d bytes\n", segs, size);
/* Set up header. */
memcpy(elf.e_ident, ELFMAG, SELFMAG);
@ -1221,7 +1248,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
struct elf_phdr phdr;
int sz = 0;
for(i = 0; i < numnote; i++)
for (i = 0; i < numnote; i++)
sz += notesize(&notes[i]);
phdr.p_type = PT_NOTE;
@ -1241,7 +1268,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
dataoff = offset = roundup(offset, PAGE_SIZE);
/* Write program headers for segments dump. */
for(vma = current->mm->mmap, i = 0;
for (vma = current->mm->mmap, i = 0;
i < segs && vma != NULL; vma = vma->vm_next) {
struct elf_phdr phdr;
size_t sz;
@ -1267,7 +1294,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
DUMP_WRITE(&phdr, sizeof(phdr));
}
for(i = 0; i < numnote; i++)
for (i = 0; i < numnote; i++)
if (!writenote(&notes[i], file))
goto end_coredump;
@ -1275,7 +1302,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
DUMP_SEEK(dataoff);
for(i = 0, vma = current->mm->mmap;
for (i = 0, vma = current->mm->mmap;
i < segs && vma != NULL;
vma = vma->vm_next) {
unsigned long addr = vma->vm_start;
@ -1284,9 +1311,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
if (!maydump(vma))
continue;
i++;
#ifdef DEBUG
printk("elf_core_dump: writing %08lx %lx\n", addr, len);
#endif
pr_debug("elf_core_dump: writing %08lx %lx\n", addr, len);
DUMP_WRITE((void __user *)addr, len);
}

View File

@ -112,7 +112,7 @@ msc_bind_eic_interrupt (unsigned int irq, unsigned int set)
}
struct irq_chip msc_levelirq_type = {
.typename = "SOC-it-Level",
.name = "SOC-it-Level",
.ack = level_mask_and_ack_msc_irq,
.mask = mask_msc_irq,
.mask_ack = level_mask_and_ack_msc_irq,
@ -122,7 +122,7 @@ struct irq_chip msc_levelirq_type = {
};
struct irq_chip msc_edgeirq_type = {
.typename = "SOC-it-Edge",
.name = "SOC-it-Edge",
.ack = edge_mask_and_ack_msc_irq,
.mask = mask_msc_irq,
.mask_ack = edge_mask_and_ack_msc_irq,

View File

@ -92,7 +92,7 @@ void ll_mv64340_irq(void)
}
struct irq_chip mv64340_irq_type = {
.typename = "MV-64340",
.name = "MV-64340",
.ack = mask_mv64340_irq,
.mask = mask_mv64340_irq,
.mask_ack = mask_mv64340_irq,

View File

@ -17,28 +17,27 @@
#include <asm/mipsregs.h>
#include <asm/system.h>
static int irq_base;
static inline void unmask_rm7k_irq(unsigned int irq)
{
set_c0_intcontrol(0x100 << (irq - irq_base));
set_c0_intcontrol(0x100 << (irq - RM7K_CPU_IRQ_BASE));
}
static inline void mask_rm7k_irq(unsigned int irq)
{
clear_c0_intcontrol(0x100 << (irq - irq_base));
clear_c0_intcontrol(0x100 << (irq - RM7K_CPU_IRQ_BASE));
}
static struct irq_chip rm7k_irq_controller = {
.typename = "RM7000",
.name = "RM7000",
.ack = mask_rm7k_irq,
.mask = mask_rm7k_irq,
.mask_ack = mask_rm7k_irq,
.unmask = unmask_rm7k_irq,
};
void __init rm7k_cpu_irq_init(int base)
void __init rm7k_cpu_irq_init(void)
{
int base = RM7K_CPU_IRQ_BASE;
int i;
clear_c0_intcontrol(0x00000f00); /* Mask all */
@ -46,6 +45,4 @@ void __init rm7k_cpu_irq_init(int base)
for (i = base; i < base + 4; i++)
set_irq_chip_and_handler(i, &rm7k_irq_controller,
handle_level_irq);
irq_base = base;
}

View File

@ -18,16 +18,14 @@
#include <asm/mipsregs.h>
#include <asm/system.h>
static int irq_base;
static inline void unmask_rm9k_irq(unsigned int irq)
{
set_c0_intcontrol(0x1000 << (irq - irq_base));
set_c0_intcontrol(0x1000 << (irq - RM9K_CPU_IRQ_BASE));
}
static inline void mask_rm9k_irq(unsigned int irq)
{
clear_c0_intcontrol(0x1000 << (irq - irq_base));
clear_c0_intcontrol(0x1000 << (irq - RM9K_CPU_IRQ_BASE));
}
static inline void rm9k_cpu_irq_enable(unsigned int irq)
@ -39,15 +37,6 @@ static inline void rm9k_cpu_irq_enable(unsigned int irq)
local_irq_restore(flags);
}
static void rm9k_cpu_irq_disable(unsigned int irq)
{
unsigned long flags;
local_irq_save(flags);
mask_rm9k_irq(irq);
local_irq_restore(flags);
}
/*
* Performance counter interrupts are global on all processors.
*/
@ -81,7 +70,7 @@ static void rm9k_perfcounter_irq_shutdown(unsigned int irq)
}
static struct irq_chip rm9k_irq_controller = {
.typename = "RM9000",
.name = "RM9000",
.ack = mask_rm9k_irq,
.mask = mask_rm9k_irq,
.mask_ack = mask_rm9k_irq,
@ -89,7 +78,7 @@ static struct irq_chip rm9k_irq_controller = {
};
static struct irq_chip rm9k_perfcounter_irq = {
.typename = "RM9000",
.name = "RM9000",
.startup = rm9k_perfcounter_irq_startup,
.shutdown = rm9k_perfcounter_irq_shutdown,
.ack = mask_rm9k_irq,
@ -102,8 +91,9 @@ unsigned int rm9000_perfcount_irq;
EXPORT_SYMBOL(rm9000_perfcount_irq);
void __init rm9k_cpu_irq_init(int base)
void __init rm9k_cpu_irq_init(void)
{
int base = RM9K_CPU_IRQ_BASE;
int i;
clear_c0_intcontrol(0x0000f000); /* Mask all */
@ -115,6 +105,4 @@ void __init rm9k_cpu_irq_init(int base)
rm9000_perfcount_irq = base + 1;
set_irq_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq,
handle_level_irq);
irq_base = base;
}

View File

@ -25,7 +25,7 @@
* Don't even think about using this on SMP. You have been warned.
*
* This file exports one global function:
* void mips_cpu_irq_init(int irq_base);
* void mips_cpu_irq_init(void);
*/
#include <linux/init.h>
#include <linux/interrupt.h>
@ -36,22 +36,20 @@
#include <asm/mipsmtregs.h>
#include <asm/system.h>
static int mips_cpu_irq_base;
static inline void unmask_mips_irq(unsigned int irq)
{
set_c0_status(0x100 << (irq - mips_cpu_irq_base));
set_c0_status(0x100 << (irq - MIPS_CPU_IRQ_BASE));
irq_enable_hazard();
}
static inline void mask_mips_irq(unsigned int irq)
{
clear_c0_status(0x100 << (irq - mips_cpu_irq_base));
clear_c0_status(0x100 << (irq - MIPS_CPU_IRQ_BASE));
irq_disable_hazard();
}
static struct irq_chip mips_cpu_irq_controller = {
.typename = "MIPS",
.name = "MIPS",
.ack = mask_mips_irq,
.mask = mask_mips_irq,
.mask_ack = mask_mips_irq,
@ -70,7 +68,7 @@ static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
{
unsigned int vpflags = dvpe();
clear_c0_cause(0x100 << (irq - mips_cpu_irq_base));
clear_c0_cause(0x100 << (irq - MIPS_CPU_IRQ_BASE));
evpe(vpflags);
unmask_mips_mt_irq(irq);
@ -84,13 +82,13 @@ static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
static void mips_mt_cpu_irq_ack(unsigned int irq)
{
unsigned int vpflags = dvpe();
clear_c0_cause(0x100 << (irq - mips_cpu_irq_base));
clear_c0_cause(0x100 << (irq - MIPS_CPU_IRQ_BASE));
evpe(vpflags);
mask_mips_mt_irq(irq);
}
static struct irq_chip mips_mt_cpu_irq_controller = {
.typename = "MIPS",
.name = "MIPS",
.startup = mips_mt_cpu_irq_startup,
.ack = mips_mt_cpu_irq_ack,
.mask = mask_mips_mt_irq,
@ -99,8 +97,9 @@ static struct irq_chip mips_mt_cpu_irq_controller = {
.eoi = unmask_mips_mt_irq,
};
void __init mips_cpu_irq_init(int irq_base)
void __init mips_cpu_irq_init(void)
{
int irq_base = MIPS_CPU_IRQ_BASE;
int i;
/* Mask interrupts. */
@ -118,6 +117,4 @@ void __init mips_cpu_irq_init(int irq_base)
for (i = irq_base + 2; i < irq_base + 8; i++)
set_irq_chip_and_handler(i, &mips_cpu_irq_controller,
handle_level_irq);
mips_cpu_irq_base = irq_base;
}

View File

@ -96,6 +96,10 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
goto out_unlock;
}
retval = security_task_setscheduler(p, 0, NULL);
if (retval)
goto out_unlock;
/* Record new user-specified CPU set for future reference */
p->thread.user_cpus_allowed = new_mask;
@ -141,8 +145,9 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
p = find_process_by_pid(pid);
if (!p)
goto out_unlock;
retval = 0;
retval = security_task_getscheduler(p);
if (retval)
goto out_unlock;
cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map);

View File

@ -41,10 +41,6 @@
#include <asm/isadep.h>
#include <asm/inst.h>
#include <asm/stacktrace.h>
#ifdef CONFIG_MIPS_MT_SMTC
#include <asm/mipsmtregs.h>
extern void smtc_idle_loop_hook(void);
#endif /* CONFIG_MIPS_MT_SMTC */
/*
* The idle thread. There's no useful work to be done, so just try to conserve
@ -57,6 +53,8 @@ ATTRIB_NORET void cpu_idle(void)
while (1) {
while (!need_resched()) {
#ifdef CONFIG_MIPS_MT_SMTC
extern void smtc_idle_loop_hook(void);
smtc_idle_loop_hook();
#endif /* CONFIG_MIPS_MT_SMTC */
if (cpu_wait)

View File

@ -114,6 +114,14 @@ LEAF(_save_fp_context32)
*/
LEAF(_restore_fp_context)
EX lw t0, SC_FPC_CSR(a0)
/* Fail if the CSR has exceptions pending */
srl t1, t0, 5
and t1, t0
andi t1, 0x1f << 7
bnez t1, fault
nop
#ifdef CONFIG_64BIT
EX ldc1 $f1, SC_FPREGS+8(a0)
EX ldc1 $f3, SC_FPREGS+24(a0)
@ -157,6 +165,14 @@ LEAF(_restore_fp_context)
LEAF(_restore_fp_context32)
/* Restore an o32 sigcontext. */
EX lw t0, SC32_FPC_CSR(a0)
/* Fail if the CSR has exceptions pending */
srl t1, t0, 5
and t1, t0
andi t1, 0x1f << 7
bnez t1, fault
nop
EX ldc1 $f0, SC32_FPREGS+0(a0)
EX ldc1 $f2, SC32_FPREGS+16(a0)
EX ldc1 $f4, SC32_FPREGS+32(a0)
@ -177,9 +193,10 @@ LEAF(_restore_fp_context32)
jr ra
li v0, 0 # success
END(_restore_fp_context32)
.set reorder
#endif
.set reorder
.type fault@function
.ent fault
fault: li v0, -EFAULT # failure

View File

@ -63,7 +63,7 @@ extern void *vpe_get_shared(int index);
static void rtlx_dispatch(void)
{
do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ);
do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ);
}
@ -491,7 +491,7 @@ static struct irqaction rtlx_irq = {
.name = "RTLX",
};
static int rtlx_irq_num = MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ;
static int rtlx_irq_num = MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ;
static char register_chrdev_failed[] __initdata =
KERN_ERR "rtlx_module_init: unable to register device\n";

View File

@ -384,7 +384,7 @@ EXPORT(sysn32_call_table)
PTR sys_readlinkat
PTR sys_fchmodat
PTR sys_faccessat
PTR sys_pselect6
PTR compat_sys_pselect6
PTR sys_ppoll /* 6265 */
PTR sys_unshare
PTR sys_splice

View File

@ -506,7 +506,7 @@ sys_call_table:
PTR sys_readlinkat
PTR sys_fchmodat
PTR sys_faccessat /* 4300 */
PTR sys_pselect6
PTR compat_sys_pselect6
PTR sys_ppoll
PTR sys_unshare
PTR sys_splice

View File

@ -271,8 +271,7 @@ static void __init bootmem_init(void)
static void __init bootmem_init(void)
{
unsigned long reserved_end;
unsigned long highest = 0;
unsigned long mapstart = -1UL;
unsigned long mapstart = ~0UL;
unsigned long bootmap_size;
int i;
@ -283,6 +282,13 @@ static void __init bootmem_init(void)
*/
reserved_end = max(init_initrd(), PFN_UP(__pa_symbol(&_end)));
/*
* max_low_pfn is not a number of pages. The number of pages
* of the system is given by 'max_low_pfn - min_low_pfn'.
*/
min_low_pfn = ~0UL;
max_low_pfn = 0;
/*
* Find the highest page frame number we have available.
*/
@ -296,8 +302,10 @@ static void __init bootmem_init(void)
end = PFN_DOWN(boot_mem_map.map[i].addr
+ boot_mem_map.map[i].size);
if (end > highest)
highest = end;
if (end > max_low_pfn)
max_low_pfn = end;
if (start < min_low_pfn)
min_low_pfn = start;
if (end <= reserved_end)
continue;
if (start >= mapstart)
@ -305,22 +313,36 @@ static void __init bootmem_init(void)
mapstart = max(reserved_end, start);
}
if (min_low_pfn >= max_low_pfn)
panic("Incorrect memory mapping !!!");
if (min_low_pfn > ARCH_PFN_OFFSET) {
printk(KERN_INFO
"Wasting %lu bytes for tracking %lu unused pages\n",
(min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page),
min_low_pfn - ARCH_PFN_OFFSET);
} else if (min_low_pfn < ARCH_PFN_OFFSET) {
printk(KERN_INFO
"%lu free pages won't be used\n",
ARCH_PFN_OFFSET - min_low_pfn);
}
min_low_pfn = ARCH_PFN_OFFSET;
/*
* Determine low and high memory ranges
*/
if (highest > PFN_DOWN(HIGHMEM_START)) {
if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) {
#ifdef CONFIG_HIGHMEM
highstart_pfn = PFN_DOWN(HIGHMEM_START);
highend_pfn = highest;
highend_pfn = max_low_pfn;
#endif
highest = PFN_DOWN(HIGHMEM_START);
max_low_pfn = PFN_DOWN(HIGHMEM_START);
}
/*
* Initialize the boot-time allocator with low memory only.
*/
bootmap_size = init_bootmem(mapstart, highest);
bootmap_size = init_bootmem_node(NODE_DATA(0), mapstart,
min_low_pfn, max_low_pfn);
/*
* Register fully available low RAM pages with the bootmem allocator.
*/
@ -541,3 +563,6 @@ int __init dsp_disable(char *s)
}
__setup("nodsp", dsp_disable);
unsigned long kernelsp[NR_CPUS];
unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;

View File

@ -35,7 +35,6 @@
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/mips_mt.h>
#include <asm/mips-boards/maltaint.h> /* This is f*cking wrong */
#define MIPS_CPU_IPI_RESCHED_IRQ 0
#define MIPS_CPU_IPI_CALL_IRQ 1
@ -108,12 +107,12 @@ void __init sanitize_tlb_entries(void)
static void ipi_resched_dispatch(void)
{
do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ);
do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ);
}
static void ipi_call_dispatch(void)
{
do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ);
do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ);
}
static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
@ -270,8 +269,8 @@ void __init plat_prepare_cpus(unsigned int max_cpus)
set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
}
cpu_ipi_resched_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
cpu_ipi_call_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ;
cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ;
setup_irq(cpu_ipi_resched_irq, &irq_resched);
setup_irq(cpu_ipi_call_irq, &irq_call);

View File

@ -26,16 +26,6 @@
* This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set.
*/
/*
* MIPSCPU_INT_BASE is identically defined in both
* asm-mips/mips-boards/maltaint.h and asm-mips/mips-boards/simint.h,
* but as yet there's no properly organized include structure that
* will ensure that the right *int.h file will be included for a
* given platform build.
*/
#define MIPSCPU_INT_BASE 16
#define MIPS_CPU_IPI_IRQ 1
#define LOCK_MT_PRA() \
@ -77,15 +67,15 @@ unsigned int ipi_timer_latch[NR_CPUS];
#define IPIBUF_PER_CPU 4
struct smtc_ipi_q IPIQ[NR_CPUS];
struct smtc_ipi_q freeIPIq;
static struct smtc_ipi_q IPIQ[NR_CPUS];
static struct smtc_ipi_q freeIPIq;
/* Forward declarations */
void ipi_decode(struct smtc_ipi *);
void post_direct_ipi(int cpu, struct smtc_ipi *pipi);
void setup_cross_vpe_interrupts(void);
static void post_direct_ipi(int cpu, struct smtc_ipi *pipi);
static void setup_cross_vpe_interrupts(void);
void init_smtc_stats(void);
/* Global SMTC Status */
@ -200,7 +190,7 @@ void __init sanitize_tlb_entries(void)
* Configure shared TLB - VPC configuration bit must be set by caller
*/
void smtc_configure_tlb(void)
static void smtc_configure_tlb(void)
{
int i,tlbsiz,vpes;
unsigned long mvpconf0;
@ -648,7 +638,7 @@ int setup_irq_smtc(unsigned int irq, struct irqaction * new,
* the VPE.
*/
void smtc_ipi_qdump(void)
static void smtc_ipi_qdump(void)
{
int i;
@ -686,28 +676,6 @@ static __inline__ int atomic_postincrement(unsigned int *pv)
return result;
}
/* No longer used in IPI dispatch, but retained for future recycling */
static __inline__ int atomic_postclear(unsigned int *pv)
{
unsigned long result;
unsigned long temp;
__asm__ __volatile__(
"1: ll %0, %2 \n"
" or %1, $0, $0 \n"
" sc %1, %2 \n"
" beqz %1, 1b \n"
" sync \n"
: "=&r" (result), "=&r" (temp), "=m" (*pv)
: "m" (*pv)
: "memory");
return result;
}
void smtc_send_ipi(int cpu, int type, unsigned int action)
{
int tcstatus;
@ -781,7 +749,7 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
/*
* Send IPI message to Halted TC, TargTC/TargVPE already having been set
*/
void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
{
struct pt_regs *kstack;
unsigned long tcstatus;
@ -921,7 +889,7 @@ void smtc_timer_broadcast(int vpe)
* interrupts.
*/
static int cpu_ipi_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_IRQ;
static int cpu_ipi_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_IRQ;
static irqreturn_t ipi_interrupt(int irq, void *dev_idm)
{
@ -1000,7 +968,7 @@ static void ipi_irq_dispatch(void)
static struct irqaction irq_ipi;
void setup_cross_vpe_interrupts(void)
static void setup_cross_vpe_interrupts(void)
{
if (!cpu_has_vint)
panic("SMTC Kernel requires Vectored Interupt support");
@ -1296,7 +1264,7 @@ void smtc_flush_tlb_asid(unsigned long asid)
* Support for single-threading cache flush operations.
*/
int halt_state_save[NR_CPUS];
static int halt_state_save[NR_CPUS];
/*
* To really, really be sure that nothing is being done

View File

@ -695,7 +695,7 @@ static void dump_tclist(void)
}
/* We are prepared so configure and start the VPE... */
int vpe_run(struct vpe * v)
static int vpe_run(struct vpe * v)
{
struct vpe_notifications *n;
unsigned long val, dmt_flag;
@ -832,7 +832,7 @@ static int find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs,
* contents of the program (p)buffer performing relocatations/etc, free's it
* when finished.
*/
int vpe_elfload(struct vpe * v)
static int vpe_elfload(struct vpe * v)
{
Elf_Ehdr *hdr;
Elf_Shdr *sechdrs;

View File

@ -45,7 +45,7 @@ void enable_lasat_irq(unsigned int irq_nr)
}
static struct irq_chip lasat_irq_type = {
.typename = "Lasat",
.name = "Lasat",
.ack = disable_lasat_irq,
.mask = disable_lasat_irq,
.mask_ack = disable_lasat_irq,

View File

@ -132,9 +132,8 @@ void __init prom_init(void)
add_memory_region(0, lasat_board_info.li_memsize, BOOT_MEM_RAM);
}
unsigned long __init prom_free_prom_memory(void)
void __init prom_free_prom_memory(void)
{
return 0;
}
const char *get_system_type(void)

View File

@ -2,7 +2,7 @@
# Makefile for MIPS-specific library files..
#
lib-y += memset.o watch.o
lib-y += watch.o
obj-$(CONFIG_CPU_MIPS32) += dump_tlb.o
obj-$(CONFIG_CPU_MIPS64) += dump_tlb.o

View File

@ -2,7 +2,7 @@
# Makefile for MIPS-specific library files..
#
lib-y += memset.o watch.o
lib-y += watch.o
obj-$(CONFIG_CPU_MIPS32) += dump_tlb.o
obj-$(CONFIG_CPU_MIPS64) += dump_tlb.o

View File

@ -1,142 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1998, 1999, 2000 by Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
#include <asm/asm.h>
#include <asm/asm-offsets.h>
#include <asm/regdef.h>
#define EX(insn,reg,addr,handler) \
9: insn reg, addr; \
.section __ex_table,"a"; \
PTR 9b, handler; \
.previous
.macro f_fill64 dst, offset, val, fixup
EX(LONG_S, \val, (\offset + 0 * LONGSIZE)(\dst), \fixup)
EX(LONG_S, \val, (\offset + 1 * LONGSIZE)(\dst), \fixup)
EX(LONG_S, \val, (\offset + 2 * LONGSIZE)(\dst), \fixup)
EX(LONG_S, \val, (\offset + 3 * LONGSIZE)(\dst), \fixup)
EX(LONG_S, \val, (\offset + 4 * LONGSIZE)(\dst), \fixup)
EX(LONG_S, \val, (\offset + 5 * LONGSIZE)(\dst), \fixup)
EX(LONG_S, \val, (\offset + 6 * LONGSIZE)(\dst), \fixup)
EX(LONG_S, \val, (\offset + 7 * LONGSIZE)(\dst), \fixup)
.endm
/*
* memset(void *s, int c, size_t n)
*
* a0: start of area to clear
* a1: char to fill with
* a2: size of area to clear
*/
.set noreorder
.align 5
LEAF(memset)
beqz a1, 1f
move v0, a0 /* result */
andi a1, 0xff /* spread fillword */
dsll t1, a1, 8
or a1, t1
dsll t1, a1, 16
or a1, t1
dsll t1, a1, 32
or a1, t1
1:
FEXPORT(__bzero)
sltiu t0, a2, LONGSIZE /* very small region? */
bnez t0, small_memset
andi t0, a0, LONGMASK /* aligned? */
beqz t0, 1f
PTR_SUBU t0, LONGSIZE /* alignment in bytes */
#ifdef __MIPSEB__
EX(sdl, a1, (a0), first_fixup) /* make dword aligned */
#endif
#ifdef __MIPSEL__
EX(sdr, a1, (a0), first_fixup) /* make dword aligned */
#endif
PTR_SUBU a0, t0 /* long align ptr */
PTR_ADDU a2, t0 /* correct size */
1: ori t1, a2, 0x3f /* # of full blocks */
xori t1, 0x3f
beqz t1, memset_partial /* no block to fill */
andi t0, a2, 0x38
PTR_ADDU t1, a0 /* end address */
.set reorder
1: PTR_ADDIU a0, 64
f_fill64 a0, -64, a1, fwd_fixup
bne t1, a0, 1b
.set noreorder
memset_partial:
PTR_LA t1, 2f /* where to start */
.set noat
dsrl AT, t0, 1
PTR_SUBU t1, AT
.set noat
jr t1
PTR_ADDU a0, t0 /* dest ptr */
.set push
.set noreorder
.set nomacro
f_fill64 a0, -64, a1, partial_fixup /* ... but first do longs ... */
2: .set pop
andi a2, LONGMASK /* At most one long to go */
beqz a2, 1f
PTR_ADDU a0, a2 /* What's left */
#ifdef __MIPSEB__
EX(sdr, a1, -1(a0), last_fixup)
#endif
#ifdef __MIPSEL__
EX(sdl, a1, -1(a0), last_fixup)
#endif
1: jr ra
move a2, zero
small_memset:
beqz a2, 2f
PTR_ADDU t1, a0, a2
1: PTR_ADDIU a0, 1 /* fill bytewise */
bne t1, a0, 1b
sb a1, -1(a0)
2: jr ra /* done */
move a2, zero
END(memset)
first_fixup:
jr ra
nop
fwd_fixup:
PTR_L t0, TI_TASK($28)
LONG_L t0, THREAD_BUADDR(t0)
andi a2, 0x3f
LONG_ADDU a2, t1
jr ra
LONG_SUBU a2, t0
partial_fixup:
PTR_L t0, TI_TASK($28)
LONG_L t0, THREAD_BUADDR(t0)
andi a2, LONGMASK
LONG_ADDU a2, t1
jr ra
LONG_SUBU a2, t0
last_fixup:
jr ra
andi v1, a2, LONGMASK

Some files were not shown because too many files have changed in this diff Show More