Merge branch 'linus' into sched/core

Merge reason: We are queueing up a dependent patch.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Ingo Molnar 2011-09-18 14:01:26 +02:00
commit bfa322c48d
893 changed files with 12531 additions and 4642 deletions

View File

@ -272,6 +272,8 @@ printk-formats.txt
- how to get printk format specifiers right
prio_tree.txt
- info on radix-priority-search-tree use for indexing vmas.
ramoops.txt
- documentation of the ramoops oops/panic logging module.
rbtree.txt
- info on what red-black trees are and what they are for.
robust-futex-ABI.txt

View File

@ -1455,7 +1455,7 @@ Applicable to the H264 encoder.</entry>
</row>
<row><entry></entry></row>
<row>
<row id="v4l2-mpeg-video-h264-vui-sar-idc">
<entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC</constant>&nbsp;</entry>
<entry>enum&nbsp;v4l2_mpeg_video_h264_vui_sar_idc</entry>
</row>
@ -1561,7 +1561,7 @@ Applicable to the H264 encoder.</entry>
</row>
<row><entry></entry></row>
<row>
<row id="v4l2-mpeg-video-h264-level">
<entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_LEVEL</constant>&nbsp;</entry>
<entry>enum&nbsp;v4l2_mpeg_video_h264_level</entry>
</row>
@ -1641,7 +1641,7 @@ Possible values are:</entry>
</row>
<row><entry></entry></row>
<row>
<row id="v4l2-mpeg-video-mpeg4-level">
<entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL</constant>&nbsp;</entry>
<entry>enum&nbsp;v4l2_mpeg_video_mpeg4_level</entry>
</row>
@ -1689,9 +1689,9 @@ Possible values are:</entry>
</row>
<row><entry></entry></row>
<row>
<row id="v4l2-mpeg-video-h264-profile">
<entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_PROFILE</constant>&nbsp;</entry>
<entry>enum&nbsp;v4l2_mpeg_h264_profile</entry>
<entry>enum&nbsp;v4l2_mpeg_video_h264_profile</entry>
</row>
<row><entry spanname="descr">The profile information for H264.
Applicable to the H264 encoder.
@ -1774,9 +1774,9 @@ Possible values are:</entry>
</row>
<row><entry></entry></row>
<row>
<row id="v4l2-mpeg-video-mpeg4-profile">
<entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE</constant>&nbsp;</entry>
<entry>enum&nbsp;v4l2_mpeg_mpeg4_profile</entry>
<entry>enum&nbsp;v4l2_mpeg_video_mpeg4_profile</entry>
</row>
<row><entry spanname="descr">The profile information for MPEG4.
Applicable to the MPEG4 encoder.
@ -1820,9 +1820,9 @@ Applicable to the encoder.
</row>
<row><entry></entry></row>
<row>
<row id="v4l2-mpeg-video-multi-slice-mode">
<entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE</constant>&nbsp;</entry>
<entry>enum&nbsp;v4l2_mpeg_multi_slice_mode</entry>
<entry>enum&nbsp;v4l2_mpeg_video_multi_slice_mode</entry>
</row>
<row><entry spanname="descr">Determines how the encoder should handle division of frame into slices.
Applicable to the encoder.
@ -1868,9 +1868,9 @@ Applicable to the encoder.</entry>
</row>
<row><entry></entry></row>
<row>
<row id="v4l2-mpeg-video-h264-loop-filter-mode">
<entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE</constant>&nbsp;</entry>
<entry>enum&nbsp;v4l2_mpeg_h264_loop_filter_mode</entry>
<entry>enum&nbsp;v4l2_mpeg_video_h264_loop_filter_mode</entry>
</row>
<row><entry spanname="descr">Loop filter mode for H264 encoder.
Possible values are:</entry>
@ -1913,9 +1913,9 @@ Applicable to the H264 encoder.</entry>
</row>
<row><entry></entry></row>
<row>
<row id="v4l2-mpeg-video-h264-entropy-mode">
<entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE</constant>&nbsp;</entry>
<entry>enum&nbsp;v4l2_mpeg_h264_symbol_mode</entry>
<entry>enum&nbsp;v4l2_mpeg_video_h264_entropy_mode</entry>
</row>
<row><entry spanname="descr">Entropy coding mode for H264 - CABAC/CAVALC.
Applicable to the H264 encoder.
@ -2140,9 +2140,9 @@ previous frames. Applicable to the H264 encoder.</entry>
</row>
<row><entry></entry></row>
<row>
<row id="v4l2-mpeg-video-header-mode">
<entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_HEADER_MODE</constant>&nbsp;</entry>
<entry>enum&nbsp;v4l2_mpeg_header_mode</entry>
<entry>enum&nbsp;v4l2_mpeg_video_header_mode</entry>
</row>
<row><entry spanname="descr">Determines whether the header is returned as the first buffer or is
it returned together with the first frame. Applicable to encoders.
@ -2320,9 +2320,9 @@ Valid only when H.264 and macroblock level RC is enabled (<constant>V4L2_CID_MPE
Applicable to the H264 encoder.</entry>
</row>
<row><entry></entry></row>
<row>
<row id="v4l2-mpeg-mfc51-video-frame-skip-mode">
<entry spanname="id"><constant>V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE</constant>&nbsp;</entry>
<entry>enum&nbsp;v4l2_mpeg_mfc51_frame_skip_mode</entry>
<entry>enum&nbsp;v4l2_mpeg_mfc51_video_frame_skip_mode</entry>
</row>
<row><entry spanname="descr">
Indicates in what conditions the encoder should skip frames. If encoding a frame would cause the encoded stream to be larger then
@ -2361,9 +2361,9 @@ the stream will meet tight bandwidth contraints. Applicable to encoders.
</entry>
</row>
<row><entry></entry></row>
<row>
<row id="v4l2-mpeg-mfc51-video-force-frame-type">
<entry spanname="id"><constant>V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE</constant>&nbsp;</entry>
<entry>enum&nbsp;v4l2_mpeg_mfc51_force_frame_type</entry>
<entry>enum&nbsp;v4l2_mpeg_mfc51_video_force_frame_type</entry>
</row>
<row><entry spanname="descr">Force a frame type for the next queued buffer. Applicable to encoders.
Possible values are:</entry>

View File

@ -45,7 +45,7 @@ arrived in memory (this becomes more likely with devices behind PCI-PCI
bridges). In order to ensure that all the data has arrived in memory,
the interrupt handler must read a register on the device which raised
the interrupt. PCI transaction ordering rules require that all the data
arrives in memory before the value can be returned from the register.
arrive in memory before the value may be returned from the register.
Using MSIs avoids this problem as the interrupt-generating write cannot
pass the data writes, so by the time the interrupt is raised, the driver
knows that all the data has arrived in memory.
@ -86,13 +86,13 @@ device.
int pci_enable_msi(struct pci_dev *dev)
A successful call will allocate ONE interrupt to the device, regardless
of how many MSIs the device supports. The device will be switched from
A successful call allocates ONE interrupt to the device, regardless
of how many MSIs the device supports. The device is switched from
pin-based interrupt mode to MSI mode. The dev->irq number is changed
to a new number which represents the message signaled interrupt.
This function should be called before the driver calls request_irq()
since enabling MSIs disables the pin-based IRQ and the driver will not
receive interrupts on the old interrupt.
to a new number which represents the message signaled interrupt;
consequently, this function should be called before the driver calls
request_irq(), because an MSI is delivered via a vector that is
different from the vector of a pin-based interrupt.
4.2.2 pci_enable_msi_block
@ -111,20 +111,20 @@ the device are in the range dev->irq to dev->irq + count - 1.
If this function returns a negative number, it indicates an error and
the driver should not attempt to request any more MSI interrupts for
this device. If this function returns a positive number, it will be
less than 'count' and indicate the number of interrupts that could have
been allocated. In neither case will the irq value have been
updated, nor will the device have been switched into MSI mode.
this device. If this function returns a positive number, it is
less than 'count' and indicates the number of interrupts that could have
been allocated. In neither case is the irq value updated or the device
switched into MSI mode.
The device driver must decide what action to take if
pci_enable_msi_block() returns a value less than the number asked for.
Some devices can make use of fewer interrupts than the maximum they
request; in this case the driver should call pci_enable_msi_block()
pci_enable_msi_block() returns a value less than the number requested.
For instance, the driver could still make use of fewer interrupts;
in this case the driver should call pci_enable_msi_block()
again. Note that it is not guaranteed to succeed, even when the
'count' has been reduced to the value returned from a previous call to
pci_enable_msi_block(). This is because there are multiple constraints
on the number of vectors that can be allocated; pci_enable_msi_block()
will return as soon as it finds any constraint that doesn't allow the
returns as soon as it finds any constraint that doesn't allow the
call to succeed.
4.2.3 pci_disable_msi
@ -137,10 +137,10 @@ interrupt number and frees the previously allocated message signaled
interrupt(s). The interrupt may subsequently be assigned to another
device, so drivers should not cache the value of dev->irq.
A device driver must always call free_irq() on the interrupt(s)
for which it has called request_irq() before calling this function.
Failure to do so will result in a BUG_ON(), the device will be left with
MSI enabled and will leak its vector.
Before calling this function, a device driver must always call free_irq()
on any interrupt for which it previously called request_irq().
Failure to do so results in a BUG_ON(), leaving the device with
MSI enabled and thus leaking its vector.
4.3 Using MSI-X
@ -155,10 +155,10 @@ struct msix_entry {
};
This allows for the device to use these interrupts in a sparse fashion;
for example it could use interrupts 3 and 1027 and allocate only a
for example, it could use interrupts 3 and 1027 and yet allocate only a
two-element array. The driver is expected to fill in the 'entry' value
in each element of the array to indicate which entries it wants the kernel
to assign interrupts for. It is invalid to fill in two entries with the
in each element of the array to indicate for which entries the kernel
should assign interrupts; it is invalid to fill in two entries with the
same number.
4.3.1 pci_enable_msix
@ -168,10 +168,11 @@ int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
Calling this function asks the PCI subsystem to allocate 'nvec' MSIs.
The 'entries' argument is a pointer to an array of msix_entry structs
which should be at least 'nvec' entries in size. On success, the
function will return 0 and the device will have been switched into
MSI-X interrupt mode. The 'vector' elements in each entry will have
been filled in with the interrupt number. The driver should then call
request_irq() for each 'vector' that it decides to use.
device is switched into MSI-X mode and the function returns 0.
The 'vector' member in each entry is populated with the interrupt number;
the driver should then call request_irq() for each 'vector' that it
decides to use. The device driver is responsible for keeping track of the
interrupts assigned to the MSI-X vectors so it can free them again later.
If this function returns a negative number, it indicates an error and
the driver should not attempt to allocate any more MSI-X interrupts for
@ -181,16 +182,14 @@ below.
This function, in contrast with pci_enable_msi(), does not adjust
dev->irq. The device will not generate interrupts for this interrupt
number once MSI-X is enabled. The device driver is responsible for
keeping track of the interrupts assigned to the MSI-X vectors so it can
free them again later.
number once MSI-X is enabled.
Device drivers should normally call this function once per device
during the initialization phase.
It is ideal if drivers can cope with a variable number of MSI-X interrupts,
It is ideal if drivers can cope with a variable number of MSI-X interrupts;
there are many reasons why the platform may not be able to provide the
exact number a driver asks for.
exact number that a driver asks for.
A request loop to achieve that might look like:
@ -212,15 +211,15 @@ static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec)
void pci_disable_msix(struct pci_dev *dev)
This API should be used to undo the effect of pci_enable_msix(). It frees
This function should be used to undo the effect of pci_enable_msix(). It frees
the previously allocated message signaled interrupts. The interrupts may
subsequently be assigned to another device, so drivers should not cache
the value of the 'vector' elements over a call to pci_disable_msix().
A device driver must always call free_irq() on the interrupt(s)
for which it has called request_irq() before calling this function.
Failure to do so will result in a BUG_ON(), the device will be left with
MSI enabled and will leak its vector.
Before calling this function, a device driver must always call free_irq()
on any interrupt for which it previously called request_irq().
Failure to do so results in a BUG_ON(), leaving the device with
MSI-X enabled and thus leaking its vector.
4.3.3 The MSI-X Table
@ -232,10 +231,10 @@ mask or unmask an interrupt, it should call disable_irq() / enable_irq().
4.4 Handling devices implementing both MSI and MSI-X capabilities
If a device implements both MSI and MSI-X capabilities, it can
run in either MSI mode or MSI-X mode but not both simultaneously.
run in either MSI mode or MSI-X mode, but not both simultaneously.
This is a requirement of the PCI spec, and it is enforced by the
PCI layer. Calling pci_enable_msi() when MSI-X is already enabled or
pci_enable_msix() when MSI is already enabled will result in an error.
pci_enable_msix() when MSI is already enabled results in an error.
If a device driver wishes to switch between MSI and MSI-X at runtime,
it must first quiesce the device, then switch it back to pin-interrupt
mode, before calling pci_enable_msi() or pci_enable_msix() and resuming
@ -251,7 +250,7 @@ the MSI-X facilities in preference to the MSI facilities. As mentioned
above, MSI-X supports any number of interrupts between 1 and 2048.
In constrast, MSI is restricted to a maximum of 32 interrupts (and
must be a power of two). In addition, the MSI interrupt vectors must
be allocated consecutively, so the system may not be able to allocate
be allocated consecutively, so the system might not be able to allocate
as many vectors for MSI as it could for MSI-X. On some platforms, MSI
interrupts must all be targeted at the same set of CPUs whereas MSI-X
interrupts can all be targeted at different CPUs.
@ -281,7 +280,7 @@ disabled to enabled and back again.
Using 'lspci -v' (as root) may show some devices with "MSI", "Message
Signalled Interrupts" or "MSI-X" capabilities. Each of these capabilities
has an 'Enable' flag which will be followed with either "+" (enabled)
has an 'Enable' flag which is followed with either "+" (enabled)
or "-" (disabled).
@ -298,7 +297,7 @@ The PCI stack provides three ways to disable MSIs:
Some host chipsets simply don't support MSIs properly. If we're
lucky, the manufacturer knows this and has indicated it in the ACPI
FADT table. In this case, Linux will automatically disable MSIs.
FADT table. In this case, Linux automatically disables MSIs.
Some boards don't include this information in the table and so we have
to detect them ourselves. The complete list of these is found near the
quirk_disable_all_msi() function in drivers/pci/quirks.c.
@ -317,7 +316,7 @@ Some bridges allow you to enable MSIs by changing some bits in their
PCI configuration space (especially the Hypertransport chipsets such
as the nVidia nForce and Serverworks HT2000). As with host chipsets,
Linux mostly knows about them and automatically enables MSIs if it can.
If you have a bridge which Linux doesn't yet know about, you can enable
If you have a bridge unknown to Linux, you can enable
MSIs in configuration space using whatever method you know works, then
enable MSIs on that bridge by doing:
@ -327,7 +326,7 @@ where $bridge is the PCI address of the bridge you've enabled (eg
0000:00:0e.0).
To disable MSIs, echo 0 instead of 1. Changing this value should be
done with caution as it can break interrupt handling for all devices
done with caution as it could break interrupt handling for all devices
below this bridge.
Again, please notify linux-pci@vger.kernel.org of any bridges that need
@ -336,7 +335,7 @@ special handling.
5.3. Disabling MSIs on a single device
Some devices are known to have faulty MSI implementations. Usually this
is handled in the individual device driver but occasionally it's necessary
is handled in the individual device driver, but occasionally it's necessary
to handle this with a quirk. Some drivers have an option to disable use
of MSI. While this is a convenient workaround for the driver author,
it is not good practise, and should not be emulated.
@ -350,7 +349,7 @@ for your machine. You should also check your .config to be sure you
have enabled CONFIG_PCI_MSI.
Then, 'lspci -t' gives the list of bridges above a device. Reading
/sys/bus/pci/devices/*/msi_bus will tell you whether MSI are enabled (1)
/sys/bus/pci/devices/*/msi_bus will tell you whether MSIs are enabled (1)
or disabled (0). If 0 is found in any of the msi_bus files belonging
to bridges between the PCI root and the device, MSIs are disabled.

View File

@ -130,7 +130,7 @@ Linux kernel master tree:
ftp.??.kernel.org:/pub/linux/kernel/...
?? == your country code, such as "us", "uk", "fr", etc.
http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git
http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git
Linux kernel mailing list:
linux-kernel@vger.kernel.org

View File

@ -303,7 +303,7 @@ patches that are being emailed around.
The sign-off is a simple line at the end of the explanation for the
patch, which certifies that you wrote it or otherwise have the right to
pass it on as a open-source patch. The rules are pretty simple: if you
pass it on as an open-source patch. The rules are pretty simple: if you
can certify the below:
Developer's Certificate of Origin 1.1

View File

@ -43,3 +43,74 @@ If one sets slice_idle=0 and if storage supports NCQ, CFQ internally switches
to IOPS mode and starts providing fairness in terms of number of requests
dispatched. Note that this mode switching takes effect only for group
scheduling. For non-cgroup users nothing should change.
CFQ IO scheduler Idling Theory
===============================
Idling on a queue is primarily about waiting for the next request to come
on same queue after completion of a request. In this process CFQ will not
dispatch requests from other cfq queues even if requests are pending there.
The rationale behind idling is that it can cut down on number of seeks
on rotational media. For example, if a process is doing dependent
sequential reads (next read will come on only after completion of previous
one), then not dispatching request from other queue should help as we
did not move the disk head and kept on dispatching sequential IO from
one queue.
CFQ has following service trees and various queues are put on these trees.
sync-idle sync-noidle async
All cfq queues doing synchronous sequential IO go on to sync-idle tree.
On this tree we idle on each queue individually.
All synchronous non-sequential queues go on sync-noidle tree. Also any
request which are marked with REQ_NOIDLE go on this service tree. On this
tree we do not idle on individual queues instead idle on the whole group
of queues or the tree. So if there are 4 queues waiting for IO to dispatch
we will idle only once last queue has dispatched the IO and there is
no more IO on this service tree.
All async writes go on async service tree. There is no idling on async
queues.
CFQ has some optimizations for SSDs and if it detects a non-rotational
media which can support higher queue depth (multiple requests at in
flight at a time), then it cuts down on idling of individual queues and
all the queues move to sync-noidle tree and only tree idle remains. This
tree idling provides isolation with buffered write queues on async tree.
FAQ
===
Q1. Why to idle at all on queues marked with REQ_NOIDLE.
A1. We only do tree idle (all queues on sync-noidle tree) on queues marked
with REQ_NOIDLE. This helps in providing isolation with all the sync-idle
queues. Otherwise in presence of many sequential readers, other
synchronous IO might not get fair share of disk.
For example, if there are 10 sequential readers doing IO and they get
100ms each. If a REQ_NOIDLE request comes in, it will be scheduled
roughly after 1 second. If after completion of REQ_NOIDLE request we
do not idle, and after a couple of milli seconds a another REQ_NOIDLE
request comes in, again it will be scheduled after 1second. Repeat it
and notice how a workload can lose its disk share and suffer due to
multiple sequential readers.
fsync can generate dependent IO where bunch of data is written in the
context of fsync, and later some journaling data is written. Journaling
data comes in only after fsync has finished its IO (atleast for ext4
that seemed to be the case). Now if one decides not to idle on fsync
thread due to REQ_NOIDLE, then next journaling write will not get
scheduled for another second. A process doing small fsync, will suffer
badly in presence of multiple sequential readers.
Hence doing tree idling on threads using REQ_NOIDLE flag on requests
provides isolation from multiple sequential readers and at the same
time we do not idle on individual threads.
Q2. When to specify REQ_NOIDLE
A2. I would think whenever one is doing synchronous write and not expecting
more writes to be dispatched from same context soon, should be able
to specify REQ_NOIDLE on writes and that probably should work well for
most of the cases.

View File

@ -199,18 +199,16 @@ to coerce it into behaving.
To beat some sense out of the internal editor, do this:
- Under account settings, composition and addressing, uncheck "Compose
messages in HTML format".
- Edit your Thunderbird config settings so that it won't use format=flowed.
Go to "edit->preferences->advanced->config editor" to bring up the
thunderbird's registry editor, and set "mailnews.send_plaintext_flowed" to
"false".
- Enable "preformat" mode: Shft-click on the Write icon to bring up the HTML
composer, select "Preformat" from the drop-down box just under the subject
line, then close the message without saving. (This setting also applies to
the text composer, but the only control for it is in the HTML composer.)
- Disable HTML Format: Set "mail.identity.id1.compose_html" to "false".
- Enable "preformat" mode: Set "editor.quotesPreformatted" to "true".
- Enable UTF8: Set "prefs.converted-to-utf8" to "true".
- Install the "toggle wordwrap" extension. Download the file from:
https://addons.mozilla.org/thunderbird/addon/2351/

View File

@ -592,3 +592,11 @@ Why: In 3.0, we can now autodetect internal 3G device and already have
interface that was used by acer-wmi driver. It will replaced by
information log when acer-wmi initial.
Who: Lee, Chun-Yi <jlee@novell.com>
----------------------------
What: The XFS nodelaylog mount option
When: 3.3
Why: The delaylog mode that has been the default since 2.6.39 has proven
stable, and the old code is in the way of additional improvements in
the log code.
Who: Christoph Hellwig <hch@lst.de>

View File

@ -27,7 +27,7 @@ His original code can still be found at:
Does anyone know of a more current email address for Makoto? He doesn't
respond to the address given above...
Current maintainer: Sergey S. Kostyliov <rathamahata@php4.ru>
This filesystem doesn't have a maintainer.
WHAT IS THIS DRIVER?
==================

View File

@ -62,6 +62,13 @@ can be safely used to identify the chip. You will have to instantiate
the devices explicitly. Please see Documentation/i2c/instantiating-devices for
details.
WARNING: Do not access chip registers using the i2cdump command, and do not use
any of the i2ctools commands on a command register (0xa5 to 0xac). The chips
supported by this driver interpret any access to a command register (including
read commands) as request to execute the command in question. This may result in
power loss, board resets, and/or Flash corruption. Worst case, your board may
turn into a brick.
Sysfs entries
-------------

View File

@ -319,4 +319,6 @@ Code Seq#(hex) Include File Comments
<mailto:thomas@winischhofer.net>
0xF4 00-1F video/mbxfb.h mbxfb
<mailto:raph@8d.com>
0xF6 all LTTng Linux Trace Toolkit Next Generation
<mailto:mathieu.desnoyers@efficios.com>
0xFD all linux/dm-ioctl.h

View File

@ -620,17 +620,6 @@
(including this document itself) have been moved there, and might
be more up to date than the web version.
* Name: "Linux Source Driver"
URL: http://lsd.linux.cz
Keywords: Browsing source code.
Description: "Linux Source Driver (LSD) is an application, which
can make browsing source codes of Linux kernel easier than you can
imagine. You can select between multiple versions of kernel (e.g.
0.01, 1.0.0, 2.0.33, 2.0.34pre13, 2.0.0, 2.1.101 etc.). With LSD
you can search Linux kernel (fulltext, macros, types, functions
and variables) and LSD can generate patches for you on the fly
(files, directories or kernel)".
* Name: "Linux Kernel Source Reference"
Author: Thomas Graichen.
URL: http://marc.info/?l=linux-kernel&m=96446640102205&w=4

View File

@ -40,6 +40,7 @@ parameter is applicable:
ALSA ALSA sound support is enabled.
APIC APIC support is enabled.
APM Advanced Power Management support is enabled.
ARM ARM architecture is enabled.
AVR32 AVR32 architecture is enabled.
AX25 Appropriate AX.25 support is enabled.
BLACKFIN Blackfin architecture is enabled.
@ -49,6 +50,7 @@ parameter is applicable:
EFI EFI Partitioning (GPT) is enabled
EIDE EIDE/ATAPI support is enabled.
FB The frame buffer device is enabled.
FTRACE Function tracing enabled.
GCOV GCOV profiling is enabled.
HW Appropriate hardware is enabled.
IA-64 IA-64 architecture is enabled.
@ -69,6 +71,7 @@ parameter is applicable:
Documentation/m68k/kernel-options.txt.
MCA MCA bus support is enabled.
MDA MDA console support is enabled.
MIPS MIPS architecture is enabled.
MOUSE Appropriate mouse support is enabled.
MSI Message Signaled Interrupts (PCI).
MTD MTD (Memory Technology Device) support is enabled.
@ -100,7 +103,6 @@ parameter is applicable:
SPARC Sparc architecture is enabled.
SWSUSP Software suspend (hibernation) is enabled.
SUSPEND System suspend states are enabled.
FTRACE Function tracing enabled.
TPM TPM drivers are enabled.
TS Appropriate touchscreen support is enabled.
UMS USB Mass Storage support is enabled.
@ -115,7 +117,7 @@ parameter is applicable:
X86-64 X86-64 architecture is enabled.
More X86-64 boot options can be found in
Documentation/x86/x86_64/boot-options.txt .
X86 Either 32bit or 64bit x86 (same as X86-32+X86-64)
X86 Either 32-bit or 64-bit x86 (same as X86-32+X86-64)
XEN Xen support is enabled
In addition, the following text indicates that the option:
@ -376,7 +378,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
atkbd.softrepeat= [HW]
Use software keyboard repeat
autotest [IA64]
autotest [IA-64]
baycom_epp= [HW,AX25]
Format: <io>,<mode>
@ -681,8 +683,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
uart[8250],mmio32,<addr>[,options]
Start an early, polled-mode console on the 8250/16550
UART at the specified I/O port or MMIO address.
MMIO inter-register address stride is either 8bit (mmio)
or 32bit (mmio32).
MMIO inter-register address stride is either 8-bit
(mmio) or 32-bit (mmio32).
The options are the same as for ttyS, above.
earlyprintk= [X86,SH,BLACKFIN]
@ -725,7 +727,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
See Documentation/block/as-iosched.txt and
Documentation/block/deadline-iosched.txt for details.
elfcorehdr= [IA64,PPC,SH,X86]
elfcorehdr= [IA-64,PPC,SH,X86]
Specifies physical address of start of kernel core
image elf header. Generally kexec loader will
pass this option to capture kernel.
@ -791,7 +793,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
tracer at boot up. function-list is a comma separated
list of functions. This list can be changed at run
time by the set_ftrace_filter file in the debugfs
tracing directory.
tracing directory.
ftrace_notrace=[function-list]
[FTRACE] Do not trace the functions specified in
@ -829,7 +831,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
hashdist= [KNL,NUMA] Large hashes allocated during boot
are distributed across NUMA nodes. Defaults on
for 64bit NUMA, off otherwise.
for 64-bit NUMA, off otherwise.
Format: 0 | 1 (for off | on)
hcl= [IA-64] SGI's Hardware Graph compatibility layer
@ -998,10 +1000,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
DMA.
forcedac [x86_64]
With this option iommu will not optimize to look
for io virtual address below 32 bit forcing dual
for io virtual address below 32-bit forcing dual
address cycle on pci bus for cards supporting greater
than 32 bit addressing. The default is to look
for translation below 32 bit and if not available
than 32-bit addressing. The default is to look
for translation below 32-bit and if not available
then look in the higher range.
strict [Default Off]
With this option on every unmap_single operation will
@ -1017,7 +1019,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
off disable Interrupt Remapping
nosid disable Source ID checking
inttest= [IA64]
inttest= [IA-64]
iomem= Disable strict checking of access to MMIO memory
strict regions from userspace.
@ -1034,7 +1036,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
nomerge
forcesac
soft
pt [x86, IA64]
pt [x86, IA-64]
io7= [HW] IO7 for Marvel based alpha systems
See comment before marvel_specify_io7 in
@ -1165,7 +1167,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
kvm-amd.npt= [KVM,AMD] Disable nested paging (virtualized MMU)
for all guests.
Default is 1 (enabled) if in 64bit or 32bit-PAE mode
Default is 1 (enabled) if in 64-bit or 32-bit PAE mode.
kvm-intel.ept= [KVM,Intel] Disable extended page tables
(virtualized MMU) support on capable Intel chips.
@ -1202,10 +1204,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
libata.dma=0 Disable all PATA and SATA DMA
libata.dma=1 PATA and SATA Disk DMA only
libata.dma=2 ATAPI (CDROM) DMA only
libata.dma=4 Compact Flash DMA only
libata.dma=4 Compact Flash DMA only
Combinations also work, so libata.dma=3 enables DMA
for disks and CDROMs, but not CFs.
libata.ignore_hpa= [LIBATA] Ignore HPA limit
libata.ignore_hpa=0 keep BIOS limits (default)
libata.ignore_hpa=1 ignore limits, using full disk
@ -1331,7 +1333,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
ltpc= [NET]
Format: <io>,<irq>,<dma>
machvec= [IA64] Force the use of a particular machine-vector
machvec= [IA-64] Force the use of a particular machine-vector
(machvec) in a generic kernel.
Example: machvec=hpzx1_swiotlb
@ -1348,9 +1350,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
it is equivalent to "nosmp", which also disables
the IO APIC.
max_loop= [LOOP] Maximum number of loopback devices that can
be mounted
Format: <1-256>
max_loop= [LOOP] The number of loop block devices that get
(loop.max_loop) unconditionally pre-created at init time. The default
number is configured by BLK_DEV_LOOP_MIN_COUNT. Instead
of statically allocating a predefined number, loop
devices can be requested on-demand with the
/dev/loop-control interface.
mcatest= [IA-64]
@ -1734,7 +1739,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
nointroute [IA-64]
nojitter [IA64] Disables jitter checking for ITC timers.
nojitter [IA-64] Disables jitter checking for ITC timers.
no-kvmclock [X86,KVM] Disable paravirtualized KVM clock driver
@ -1800,7 +1805,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
nox2apic [X86-64,APIC] Do not enable x2APIC mode.
nptcg= [IA64] Override max number of concurrent global TLB
nptcg= [IA-64] Override max number of concurrent global TLB
purges which is reported from either PAL_VM_SUMMARY or
SAL PALO.
@ -2077,13 +2082,16 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Format: { parport<nr> | timid | 0 }
See also Documentation/parport.txt.
pmtmr= [X86] Manual setup of pmtmr I/O Port.
pmtmr= [X86] Manual setup of pmtmr I/O Port.
Override pmtimer IOPort with a hex value.
e.g. pmtmr=0x508
pnp.debug [PNP]
Enable PNP debug messages. This depends on the
CONFIG_PNP_DEBUG_MESSAGES option.
pnp.debug=1 [PNP]
Enable PNP debug messages (depends on the
CONFIG_PNP_DEBUG_MESSAGES option). Change at run-time
via /sys/module/pnp/parameters/debug. We always show
current resource usage; turning this on also shows
possible settings and some assignment information.
pnpacpi= [ACPI]
{ off }
@ -2635,6 +2643,16 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
medium is write-protected).
Example: quirks=0419:aaf5:rl,0421:0433:rc
user_debug= [KNL,ARM]
Format: <int>
See arch/arm/Kconfig.debug help text.
1 - undefined instruction events
2 - system calls
4 - invalid data aborts
8 - SIGSEGV faults
16 - SIGBUS faults
Example: user_debug=31
userpte=
[X86] Flags controlling user PTE allocations.
@ -2680,6 +2698,27 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
vmpoff= [KNL,S390] Perform z/VM CP command after power off.
Format: <command>
vsyscall= [X86-64]
Controls the behavior of vsyscalls (i.e. calls to
fixed addresses of 0xffffffffff600x00 from legacy
code). Most statically-linked binaries and older
versions of glibc use these calls. Because these
functions are at fixed addresses, they make nice
targets for exploits that can control RIP.
emulate [default] Vsyscalls turn into traps and are
emulated reasonably safely.
native Vsyscalls are native syscall instructions.
This is a little bit faster than trapping
and makes a few dynamic recompilers work
better than they would in emulation mode.
It also makes exploits much easier to write.
none Vsyscalls don't work at all. This makes
them quite hard to use for exploits but
might break your system.
vt.cur_default= [VT] Default cursor shape.
Format: 0xCCBBAA, where AA, BB, and CC are the same as
the parameters of the <Esc>[?A;B;Cc escape sequence;

View File

@ -1,13 +1,21 @@
00-INDEX
- this file
3c359.txt
- information on the 3Com TokenLink Velocity XL (3c5359) driver.
3c505.txt
- information on the 3Com EtherLink Plus (3c505) driver.
3c509.txt
- information on the 3Com Etherlink III Series Ethernet cards.
6pack.txt
- info on the 6pack protocol, an alternative to KISS for AX.25
DLINK.txt
- info on the D-Link DE-600/DE-620 parallel port pocket adapters
PLIP.txt
- PLIP: The Parallel Line Internet Protocol device driver
README.ipw2100
- README for the Intel PRO/Wireless 2100 driver.
README.ipw2200
- README for the Intel PRO/Wireless 2915ABG and 2200BG driver.
README.sb1000
- info on General Instrument/NextLevel SURFboard1000 cable modem.
alias.txt
@ -20,8 +28,12 @@ atm.txt
- info on where to get ATM programs and support for Linux.
ax25.txt
- info on using AX.25 and NET/ROM code for Linux
batman-adv.txt
- B.A.T.M.A.N routing protocol on top of layer 2 Ethernet Frames.
baycom.txt
- info on the driver for Baycom style amateur radio modems
bonding.txt
- Linux Ethernet Bonding Driver HOWTO: link aggregation in Linux.
bridge.txt
- where to get user space programs for ethernet bridging with Linux.
can.txt
@ -34,32 +46,60 @@ cxacru.txt
- Conexant AccessRunner USB ADSL Modem
cxacru-cf.py
- Conexant AccessRunner USB ADSL Modem configuration file parser
cxgb.txt
- Release Notes for the Chelsio N210 Linux device driver.
dccp.txt
- the Datagram Congestion Control Protocol (DCCP) (RFC 4340..42).
de4x5.txt
- the Digital EtherWORKS DE4?? and DE5?? PCI Ethernet driver
decnet.txt
- info on using the DECnet networking layer in Linux.
depca.txt
- the Digital DEPCA/EtherWORKS DE1?? and DE2?? LANCE Ethernet driver
dl2k.txt
- README for D-Link DL2000-based Gigabit Ethernet Adapters (dl2k.ko).
dm9000.txt
- README for the Simtec DM9000 Network driver.
dmfe.txt
- info on the Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver.
dns_resolver.txt
- The DNS resolver module allows kernel servies to make DNS queries.
driver.txt
- Softnet driver issues.
e100.txt
- info on Intel's EtherExpress PRO/100 line of 10/100 boards
e1000.txt
- info on Intel's E1000 line of gigabit ethernet boards
e1000e.txt
- README for the Intel Gigabit Ethernet Driver (e1000e).
eql.txt
- serial IP load balancing
ewrk3.txt
- the Digital EtherWORKS 3 DE203/4/5 Ethernet driver
fib_trie.txt
- Level Compressed Trie (LC-trie) notes: a structure for routing.
filter.txt
- Linux Socket Filtering
fore200e.txt
- FORE Systems PCA-200E/SBA-200E ATM NIC driver info.
framerelay.txt
- info on using Frame Relay/Data Link Connection Identifier (DLCI).
gen_stats.txt
- Generic networking statistics for netlink users.
generic_hdlc.txt
- The generic High Level Data Link Control (HDLC) layer.
generic_netlink.txt
- info on Generic Netlink
gianfar.txt
- Gianfar Ethernet Driver.
ieee802154.txt
- Linux IEEE 802.15.4 implementation, API and drivers
ifenslave.c
- Configure network interfaces for parallel routing (bonding).
igb.txt
- README for the Intel Gigabit Ethernet Driver (igb).
igbvf.txt
- README for the Intel Gigabit Ethernet Driver (igbvf).
ip-sysctl.txt
- /proc/sys/net/ipv4/* variables
ip_dynaddr.txt
@ -68,41 +108,117 @@ ipddp.txt
- AppleTalk-IP Decapsulation and AppleTalk-IP Encapsulation
iphase.txt
- Interphase PCI ATM (i)Chip IA Linux driver info.
ipv6.txt
- Options to the ipv6 kernel module.
ipvs-sysctl.txt
- Per-inode explanation of the /proc/sys/net/ipv4/vs interface.
irda.txt
- where to get IrDA (infrared) utilities and info for Linux.
ixgb.txt
- README for the Intel 10 Gigabit Ethernet Driver (ixgb).
ixgbe.txt
- README for the Intel 10 Gigabit Ethernet Driver (ixgbe).
ixgbevf.txt
- README for the Intel Virtual Function (VF) Driver (ixgbevf).
l2tp.txt
- User guide to the L2TP tunnel protocol.
lapb-module.txt
- programming information of the LAPB module.
ltpc.txt
- the Apple or Farallon LocalTalk PC card driver
mac80211-injection.txt
- HOWTO use packet injection with mac80211
multicast.txt
- Behaviour of cards under Multicast
multiqueue.txt
- HOWTO for multiqueue network device support.
netconsole.txt
- The network console module netconsole.ko: configuration and notes.
netdev-features.txt
- Network interface features API description.
netdevices.txt
- info on network device driver functions exported to the kernel.
netif-msg.txt
- Design of the network interface message level setting (NETIF_MSG_*).
nfc.txt
- The Linux Near Field Communication (NFS) subsystem.
olympic.txt
- IBM PCI Pit/Pit-Phy/Olympic Token Ring driver info.
operstates.txt
- Overview of network interface operational states.
packet_mmap.txt
- User guide to memory mapped packet socket rings (PACKET_[RT]X_RING).
phonet.txt
- The Phonet packet protocol used in Nokia cellular modems.
phy.txt
- The PHY abstraction layer.
pktgen.txt
- User guide to the kernel packet generator (pktgen.ko).
policy-routing.txt
- IP policy-based routing
ppp_generic.txt
- Information about the generic PPP driver.
proc_net_tcp.txt
- Per inode overview of the /proc/net/tcp and /proc/net/tcp6 interfaces.
radiotap-headers.txt
- Background on radiotap headers.
ray_cs.txt
- Raylink Wireless LAN card driver info.
rds.txt
- Background on the reliable, ordered datagram delivery method RDS.
regulatory.txt
- Overview of the Linux wireless regulatory infrastructure.
rxrpc.txt
- Guide to the RxRPC protocol.
s2io.txt
- Release notes for Neterion Xframe I/II 10GbE driver.
scaling.txt
- Explanation of network scaling techniques: RSS, RPS, RFS, aRFS, XPS.
sctp.txt
- Notes on the Linux kernel implementation of the SCTP protocol.
secid.txt
- Explanation of the secid member in flow structures.
skfp.txt
- SysKonnect FDDI (SK-5xxx, Compaq Netelligent) driver info.
smc9.txt
- the driver for SMC's 9000 series of Ethernet cards
smctr.txt
- SMC TokenCard TokenRing Linux driver info.
spider-net.txt
- README for the Spidernet Driver (as found in PS3 / Cell BE).
stmmac.txt
- README for the STMicro Synopsys Ethernet driver.
tc-actions-env-rules.txt
- rules for traffic control (tc) actions.
timestamping.txt
- overview of network packet timestamping variants.
tcp.txt
- short blurb on how TCP output takes place.
tcp-thin.txt
- kernel tuning options for low rate 'thin' TCP streams.
tlan.txt
- ThunderLAN (Compaq Netelligent 10/100, Olicom OC-2xxx) driver info.
tms380tr.txt
- SysKonnect Token Ring ISA/PCI adapter driver info.
tproxy.txt
- Transparent proxy support user guide.
tuntap.txt
- TUN/TAP device driver, allowing user space Rx/Tx of packets.
udplite.txt
- UDP-Lite protocol (RFC 3828) introduction.
vortex.txt
- info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards.
vxge.txt
- README for the Neterion X3100 PCIe Server Adapter.
x25.txt
- general info on X.25 development.
x25-iface.txt
- description of the X.25 Packet Layer to LAPB device interface.
xfrm_proc.txt
- description of the statistics package for XFRM.
xfrm_sync.txt
- sync patches for XFRM enable migration of an SA between hosts.
xfrm_sysctl.txt
- description of the XFRM configuration options.
z8530drv.txt
- info about Linux driver for Z8530 based HDLC cards for AX.25

View File

@ -238,6 +238,18 @@ ad_select
This option was added in bonding version 3.4.0.
all_slaves_active
Specifies that duplicate frames (received on inactive ports) should be
dropped (0) or delivered (1).
Normally, bonding will drop duplicate frames (received on inactive
ports), which is desirable for most users. But there are some times
it is nice to allow duplicate frames to be delivered.
The default value is 0 (drop duplicate frames received on inactive
ports).
arp_interval
Specifies the ARP link monitoring frequency in milliseconds.
@ -433,6 +445,23 @@ miimon
determined. See the High Availability section for additional
information. The default value is 0.
min_links
Specifies the minimum number of links that must be active before
asserting carrier. It is similar to the Cisco EtherChannel min-links
feature. This allows setting the minimum number of member ports that
must be up (link-up state) before marking the bond device as up
(carrier on). This is useful for situations where higher level services
such as clustering want to ensure a minimum number of low bandwidth
links are active before switchover. This option only affect 802.3ad
mode.
The default value is 0. This will cause carrier to be asserted (for
802.3ad mode) whenever there is an active aggregator, regardless of the
number of available links in that aggregator. Note that, because an
aggregator cannot be active without at least one available link,
setting this option to 0 or to 1 has the exact same effect.
mode
Specifies one of the bonding policies. The default is

View File

@ -992,7 +992,7 @@ bindv6only - BOOLEAN
TRUE: disable IPv4-mapped address feature
FALSE: enable IPv4-mapped address feature
Default: FALSE (as specified in RFC2553bis)
Default: FALSE (as specified in RFC3493)
IPv6 Fragmentation:

View File

@ -0,0 +1,378 @@
Scaling in the Linux Networking Stack
Introduction
============
This document describes a set of complementary techniques in the Linux
networking stack to increase parallelism and improve performance for
multi-processor systems.
The following technologies are described:
RSS: Receive Side Scaling
RPS: Receive Packet Steering
RFS: Receive Flow Steering
Accelerated Receive Flow Steering
XPS: Transmit Packet Steering
RSS: Receive Side Scaling
=========================
Contemporary NICs support multiple receive and transmit descriptor queues
(multi-queue). On reception, a NIC can send different packets to different
queues to distribute processing among CPUs. The NIC distributes packets by
applying a filter to each packet that assigns it to one of a small number
of logical flows. Packets for each flow are steered to a separate receive
queue, which in turn can be processed by separate CPUs. This mechanism is
generally known as “Receive-side Scaling” (RSS). The goal of RSS and
the other scaling techniques to increase performance uniformly.
Multi-queue distribution can also be used for traffic prioritization, but
that is not the focus of these techniques.
The filter used in RSS is typically a hash function over the network
and/or transport layer headers-- for example, a 4-tuple hash over
IP addresses and TCP ports of a packet. The most common hardware
implementation of RSS uses a 128-entry indirection table where each entry
stores a queue number. The receive queue for a packet is determined
by masking out the low order seven bits of the computed hash for the
packet (usually a Toeplitz hash), taking this number as a key into the
indirection table and reading the corresponding value.
Some advanced NICs allow steering packets to queues based on
programmable filters. For example, webserver bound TCP port 80 packets
can be directed to their own receive queue. Such “n-tuple” filters can
be configured from ethtool (--config-ntuple).
==== RSS Configuration
The driver for a multi-queue capable NIC typically provides a kernel
module parameter for specifying the number of hardware queues to
configure. In the bnx2x driver, for instance, this parameter is called
num_queues. A typical RSS configuration would be to have one receive queue
for each CPU if the device supports enough queues, or otherwise at least
one for each memory domain, where a memory domain is a set of CPUs that
share a particular memory level (L1, L2, NUMA node, etc.).
The indirection table of an RSS device, which resolves a queue by masked
hash, is usually programmed by the driver at initialization. The
default mapping is to distribute the queues evenly in the table, but the
indirection table can be retrieved and modified at runtime using ethtool
commands (--show-rxfh-indir and --set-rxfh-indir). Modifying the
indirection table could be done to give different queues different
relative weights.
== RSS IRQ Configuration
Each receive queue has a separate IRQ associated with it. The NIC triggers
this to notify a CPU when new packets arrive on the given queue. The
signaling path for PCIe devices uses message signaled interrupts (MSI-X),
that can route each interrupt to a particular CPU. The active mapping
of queues to IRQs can be determined from /proc/interrupts. By default,
an IRQ may be handled on any CPU. Because a non-negligible part of packet
processing takes place in receive interrupt handling, it is advantageous
to spread receive interrupts between CPUs. To manually adjust the IRQ
affinity of each interrupt see Documentation/IRQ-affinity. Some systems
will be running irqbalance, a daemon that dynamically optimizes IRQ
assignments and as a result may override any manual settings.
== Suggested Configuration
RSS should be enabled when latency is a concern or whenever receive
interrupt processing forms a bottleneck. Spreading load between CPUs
decreases queue length. For low latency networking, the optimal setting
is to allocate as many queues as there are CPUs in the system (or the
NIC maximum, if lower). The most efficient high-rate configuration
is likely the one with the smallest number of receive queues where no
receive queue overflows due to a saturated CPU, because in default
mode with interrupt coalescing enabled, the aggregate number of
interrupts (and thus work) grows with each additional queue.
Per-cpu load can be observed using the mpstat utility, but note that on
processors with hyperthreading (HT), each hyperthread is represented as
a separate CPU. For interrupt handling, HT has shown no benefit in
initial tests, so limit the number of queues to the number of CPU cores
in the system.
RPS: Receive Packet Steering
============================
Receive Packet Steering (RPS) is logically a software implementation of
RSS. Being in software, it is necessarily called later in the datapath.
Whereas RSS selects the queue and hence CPU that will run the hardware
interrupt handler, RPS selects the CPU to perform protocol processing
above the interrupt handler. This is accomplished by placing the packet
on the desired CPUs backlog queue and waking up the CPU for processing.
RPS has some advantages over RSS: 1) it can be used with any NIC,
2) software filters can easily be added to hash over new protocols,
3) it does not increase hardware device interrupt rate (although it does
introduce inter-processor interrupts (IPIs)).
RPS is called during bottom half of the receive interrupt handler, when
a driver sends a packet up the network stack with netif_rx() or
netif_receive_skb(). These call the get_rps_cpu() function, which
selects the queue that should process a packet.
The first step in determining the target CPU for RPS is to calculate a
flow hash over the packets addresses or ports (2-tuple or 4-tuple hash
depending on the protocol). This serves as a consistent hash of the
associated flow of the packet. The hash is either provided by hardware
or will be computed in the stack. Capable hardware can pass the hash in
the receive descriptor for the packet; this would usually be the same
hash used for RSS (e.g. computed Toeplitz hash). The hash is saved in
skb->rx_hash and can be used elsewhere in the stack as a hash of the
packets flow.
Each receive hardware queue has an associated list of CPUs to which
RPS may enqueue packets for processing. For each received packet,
an index into the list is computed from the flow hash modulo the size
of the list. The indexed CPU is the target for processing the packet,
and the packet is queued to the tail of that CPUs backlog queue. At
the end of the bottom half routine, IPIs are sent to any CPUs for which
packets have been queued to their backlog queue. The IPI wakes backlog
processing on the remote CPU, and any queued packets are then processed
up the networking stack.
==== RPS Configuration
RPS requires a kernel compiled with the CONFIG_RPS kconfig symbol (on
by default for SMP). Even when compiled in, RPS remains disabled until
explicitly configured. The list of CPUs to which RPS may forward traffic
can be configured for each receive queue using a sysfs file entry:
/sys/class/net/<dev>/queues/rx-<n>/rps_cpus
This file implements a bitmap of CPUs. RPS is disabled when it is zero
(the default), in which case packets are processed on the interrupting
CPU. Documentation/IRQ-affinity.txt explains how CPUs are assigned to
the bitmap.
== Suggested Configuration
For a single queue device, a typical RPS configuration would be to set
the rps_cpus to the CPUs in the same memory domain of the interrupting
CPU. If NUMA locality is not an issue, this could also be all CPUs in
the system. At high interrupt rate, it might be wise to exclude the
interrupting CPU from the map since that already performs much work.
For a multi-queue system, if RSS is configured so that a hardware
receive queue is mapped to each CPU, then RPS is probably redundant
and unnecessary. If there are fewer hardware queues than CPUs, then
RPS might be beneficial if the rps_cpus for each queue are the ones that
share the same memory domain as the interrupting CPU for that queue.
RFS: Receive Flow Steering
==========================
While RPS steers packets solely based on hash, and thus generally
provides good load distribution, it does not take into account
application locality. This is accomplished by Receive Flow Steering
(RFS). The goal of RFS is to increase datacache hitrate by steering
kernel processing of packets to the CPU where the application thread
consuming the packet is running. RFS relies on the same RPS mechanisms
to enqueue packets onto the backlog of another CPU and to wake up that
CPU.
In RFS, packets are not forwarded directly by the value of their hash,
but the hash is used as index into a flow lookup table. This table maps
flows to the CPUs where those flows are being processed. The flow hash
(see RPS section above) is used to calculate the index into this table.
The CPU recorded in each entry is the one which last processed the flow.
If an entry does not hold a valid CPU, then packets mapped to that entry
are steered using plain RPS. Multiple table entries may point to the
same CPU. Indeed, with many flows and few CPUs, it is very likely that
a single application thread handles flows with many different flow hashes.
rps_sock_table is a global flow table that contains the *desired* CPU for
flows: the CPU that is currently processing the flow in userspace. Each
table value is a CPU index that is updated during calls to recvmsg and
sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage()
and tcp_splice_read()).
When the scheduler moves a thread to a new CPU while it has outstanding
receive packets on the old CPU, packets may arrive out of order. To
avoid this, RFS uses a second flow table to track outstanding packets
for each flow: rps_dev_flow_table is a table specific to each hardware
receive queue of each device. Each table value stores a CPU index and a
counter. The CPU index represents the *current* CPU onto which packets
for this flow are enqueued for further kernel processing. Ideally, kernel
and userspace processing occur on the same CPU, and hence the CPU index
in both tables is identical. This is likely false if the scheduler has
recently migrated a userspace thread while the kernel still has packets
enqueued for kernel processing on the old CPU.
The counter in rps_dev_flow_table values records the length of the current
CPU's backlog when a packet in this flow was last enqueued. Each backlog
queue has a head counter that is incremented on dequeue. A tail counter
is computed as head counter + queue length. In other words, the counter
in rps_dev_flow_table[i] records the last element in flow i that has
been enqueued onto the currently designated CPU for flow i (of course,
entry i is actually selected by hash and multiple flows may hash to the
same entry i).
And now the trick for avoiding out of order packets: when selecting the
CPU for packet processing (from get_rps_cpu()) the rps_sock_flow table
and the rps_dev_flow table of the queue that the packet was received on
are compared. If the desired CPU for the flow (found in the
rps_sock_flow table) matches the current CPU (found in the rps_dev_flow
table), the packet is enqueued onto that CPUs backlog. If they differ,
the current CPU is updated to match the desired CPU if one of the
following is true:
- The current CPU's queue head counter >= the recorded tail counter
value in rps_dev_flow[i]
- The current CPU is unset (equal to NR_CPUS)
- The current CPU is offline
After this check, the packet is sent to the (possibly updated) current
CPU. These rules aim to ensure that a flow only moves to a new CPU when
there are no packets outstanding on the old CPU, as the outstanding
packets could arrive later than those about to be processed on the new
CPU.
==== RFS Configuration
RFS is only available if the kconfig symbol CONFIG_RFS is enabled (on
by default for SMP). The functionality remains disabled until explicitly
configured. The number of entries in the global flow table is set through:
/proc/sys/net/core/rps_sock_flow_entries
The number of entries in the per-queue flow table are set through:
/sys/class/net/<dev>/queues/tx-<n>/rps_flow_cnt
== Suggested Configuration
Both of these need to be set before RFS is enabled for a receive queue.
Values for both are rounded up to the nearest power of two. The
suggested flow count depends on the expected number of active connections
at any given time, which may be significantly less than the number of open
connections. We have found that a value of 32768 for rps_sock_flow_entries
works fairly well on a moderately loaded server.
For a single queue device, the rps_flow_cnt value for the single queue
would normally be configured to the same value as rps_sock_flow_entries.
For a multi-queue device, the rps_flow_cnt for each queue might be
configured as rps_sock_flow_entries / N, where N is the number of
queues. So for instance, if rps_flow_entries is set to 32768 and there
are 16 configured receive queues, rps_flow_cnt for each queue might be
configured as 2048.
Accelerated RFS
===============
Accelerated RFS is to RFS what RSS is to RPS: a hardware-accelerated load
balancing mechanism that uses soft state to steer flows based on where
the application thread consuming the packets of each flow is running.
Accelerated RFS should perform better than RFS since packets are sent
directly to a CPU local to the thread consuming the data. The target CPU
will either be the same CPU where the application runs, or at least a CPU
which is local to the application threads CPU in the cache hierarchy.
To enable accelerated RFS, the networking stack calls the
ndo_rx_flow_steer driver function to communicate the desired hardware
queue for packets matching a particular flow. The network stack
automatically calls this function every time a flow entry in
rps_dev_flow_table is updated. The driver in turn uses a device specific
method to program the NIC to steer the packets.
The hardware queue for a flow is derived from the CPU recorded in
rps_dev_flow_table. The stack consults a CPU to hardware queue map which
is maintained by the NIC driver. This is an auto-generated reverse map of
the IRQ affinity table shown by /proc/interrupts. Drivers can use
functions in the cpu_rmap (“CPU affinity reverse map”) kernel library
to populate the map. For each CPU, the corresponding queue in the map is
set to be one whose processing CPU is closest in cache locality.
==== Accelerated RFS Configuration
Accelerated RFS is only available if the kernel is compiled with
CONFIG_RFS_ACCEL and support is provided by the NIC device and driver.
It also requires that ntuple filtering is enabled via ethtool. The map
of CPU to queues is automatically deduced from the IRQ affinities
configured for each receive queue by the driver, so no additional
configuration should be necessary.
== Suggested Configuration
This technique should be enabled whenever one wants to use RFS and the
NIC supports hardware acceleration.
XPS: Transmit Packet Steering
=============================
Transmit Packet Steering is a mechanism for intelligently selecting
which transmit queue to use when transmitting a packet on a multi-queue
device. To accomplish this, a mapping from CPU to hardware queue(s) is
recorded. The goal of this mapping is usually to assign queues
exclusively to a subset of CPUs, where the transmit completions for
these queues are processed on a CPU within this set. This choice
provides two benefits. First, contention on the device queue lock is
significantly reduced since fewer CPUs contend for the same queue
(contention can be eliminated completely if each CPU has its own
transmit queue). Secondly, cache miss rate on transmit completion is
reduced, in particular for data cache lines that hold the sk_buff
structures.
XPS is configured per transmit queue by setting a bitmap of CPUs that
may use that queue to transmit. The reverse mapping, from CPUs to
transmit queues, is computed and maintained for each network device.
When transmitting the first packet in a flow, the function
get_xps_queue() is called to select a queue. This function uses the ID
of the running CPU as a key into the CPU-to-queue lookup table. If the
ID matches a single queue, that is used for transmission. If multiple
queues match, one is selected by using the flow hash to compute an index
into the set.
The queue chosen for transmitting a particular flow is saved in the
corresponding socket structure for the flow (e.g. a TCP connection).
This transmit queue is used for subsequent packets sent on the flow to
prevent out of order (ooo) packets. The choice also amortizes the cost
of calling get_xps_queues() over all packets in the flow. To avoid
ooo packets, the queue for a flow can subsequently only be changed if
skb->ooo_okay is set for a packet in the flow. This flag indicates that
there are no outstanding packets in the flow, so the transmit queue can
change without the risk of generating out of order packets. The
transport layer is responsible for setting ooo_okay appropriately. TCP,
for instance, sets the flag when all data for a connection has been
acknowledged.
==== XPS Configuration
XPS is only available if the kconfig symbol CONFIG_XPS is enabled (on by
default for SMP). The functionality remains disabled until explicitly
configured. To enable XPS, the bitmap of CPUs that may use a transmit
queue is configured using the sysfs file entry:
/sys/class/net/<dev>/queues/tx-<n>/xps_cpus
== Suggested Configuration
For a network device with a single transmission queue, XPS configuration
has no effect, since there is no choice in this case. In a multi-queue
system, XPS is preferably configured so that each CPU maps onto one queue.
If there are as many queues as there are CPUs in the system, then each
queue can also map onto one CPU, resulting in exclusive pairings that
experience no contention. If there are fewer queues than CPUs, then the
best CPUs to share a given queue are probably those that share the cache
with the CPU that processes transmit completions for that queue
(transmit interrupts).
Further Information
===================
RPS and RFS were introduced in kernel 2.6.35. XPS was incorporated into
2.6.38. Original patches were submitted by Tom Herbert
(therbert@google.com)
Accelerated RFS was introduced in 2.6.35. Original patches were
submitted by Ben Hutchings (bhutchings@solarflare.com)
Authors:
Tom Herbert (therbert@google.com)
Willem de Bruijn (willemb@google.com)

View File

@ -431,8 +431,7 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
void pm_runtime_irq_safe(struct device *dev);
- set the power.irq_safe flag for the device, causing the runtime-PM
suspend and resume callbacks (but not the idle callback) to be invoked
with interrupts disabled
callbacks to be invoked with interrupts off
void pm_runtime_mark_last_busy(struct device *dev);
- set the power.last_busy field to the current time

76
Documentation/ramoops.txt Normal file
View File

@ -0,0 +1,76 @@
Ramoops oops/panic logger
=========================
Sergiu Iordache <sergiu@chromium.org>
Updated: 8 August 2011
0. Introduction
Ramoops is an oops/panic logger that writes its logs to RAM before the system
crashes. It works by logging oopses and panics in a circular buffer. Ramoops
needs a system with persistent RAM so that the content of that area can
survive after a restart.
1. Ramoops concepts
Ramoops uses a predefined memory area to store the dump. The start and size of
the memory area are set using two variables:
* "mem_address" for the start
* "mem_size" for the size. The memory size will be rounded down to a
power of two.
The memory area is divided into "record_size" chunks (also rounded down to
power of two) and each oops/panic writes a "record_size" chunk of
information.
Dumping both oopses and panics can be done by setting 1 in the "dump_oops"
variable while setting 0 in that variable dumps only the panics.
The module uses a counter to record multiple dumps but the counter gets reset
on restart (i.e. new dumps after the restart will overwrite old ones).
2. Setting the parameters
Setting the ramoops parameters can be done in 2 different manners:
1. Use the module parameters (which have the names of the variables described
as before).
2. Use a platform device and set the platform data. The parameters can then
be set through that platform data. An example of doing that is:
#include <linux/ramoops.h>
[...]
static struct ramoops_platform_data ramoops_data = {
.mem_size = <...>,
.mem_address = <...>,
.record_size = <...>,
.dump_oops = <...>,
};
static struct platform_device ramoops_dev = {
.name = "ramoops",
.dev = {
.platform_data = &ramoops_data,
},
};
[... inside a function ...]
int ret;
ret = platform_device_register(&ramoops_dev);
if (ret) {
printk(KERN_ERR "unable to register platform device\n");
return ret;
}
3. Dump format
The data dump begins with a header, currently defined as "====" followed by a
timestamp and a new line. The dump then continues with the actual data.
4. Reading the data
The dump data can be read from memory (through /dev/mem or other means).
Getting the module parameters, which are needed in order to parse the data, can
be done through /sys/module/ramoops/parameters/* .

View File

@ -8,3 +8,6 @@ lguest/
- Extremely simple hypervisor for experimental/educational use.
uml/
- User Mode Linux, builds/runs Linux kernel as a userspace program.
virtio.txt
- Text version of draft virtio spec.
See http://ozlabs.org/~rusty/virtio-spec

View File

@ -1996,6 +1996,9 @@ int main(int argc, char *argv[])
/* We use a simple helper to copy the arguments separated by spaces. */
concat((char *)(boot + 1), argv+optind+2);
/* Set kernel alignment to 16M (CONFIG_PHYSICAL_ALIGN) */
boot->hdr.kernel_alignment = 0x1000000;
/* Boot protocol version: 2.07 supports the fields for lguest. */
boot->hdr.version = 0x207;

File diff suppressed because it is too large Load Diff

View File

@ -1883,7 +1883,7 @@ S: Maintained
F: drivers/connector/
CONTROL GROUPS (CGROUPS)
M: Paul Menage <menage@google.com>
M: Paul Menage <paul@paulmenage.org>
M: Li Zefan <lizf@cn.fujitsu.com>
L: containers@lists.linux-foundation.org
S: Maintained
@ -1932,7 +1932,7 @@ S: Maintained
F: tools/power/cpupower
CPUSETS
M: Paul Menage <menage@google.com>
M: Paul Menage <paul@paulmenage.org>
W: http://www.bullopensource.org/cpuset/
W: http://oss.sgi.com/projects/cpusets/
S: Supported
@ -2649,11 +2649,11 @@ F: drivers/net/wan/dlci.c
F: drivers/net/wan/sdla.c
FRAMEBUFFER LAYER
M: Paul Mundt <lethal@linux-sh.org>
M: Florian Tobias Schandinat <FlorianSchandinat@gmx.de>
L: linux-fbdev@vger.kernel.org
W: http://linux-fbdev.sourceforge.net/
Q: http://patchwork.kernel.org/project/linux-fbdev/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/fbdev-2.6.git
T: git git://github.com/schandinat/linux-2.6.git fbdev-next
S: Maintained
F: Documentation/fb/
F: Documentation/devicetree/bindings/fb/
@ -4450,8 +4450,8 @@ M: "David S. Miller" <davem@davemloft.net>
L: netdev@vger.kernel.org
W: http://www.linuxfoundation.org/en/Net
W: http://patchwork.ozlabs.org/project/netdev/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
S: Maintained
F: net/
F: include/net/
@ -4604,7 +4604,7 @@ F: arch/arm/mach-omap2/clockdomain2xxx_3xxx.c
F: arch/arm/mach-omap2/clockdomain44xx.c
OMAP AUDIO SUPPORT
M: Jarkko Nikula <jhnikula@gmail.com>
M: Jarkko Nikula <jarkko.nikula@bitmer.com>
L: alsa-devel@alsa-project.org (subscribers-only)
L: linux-omap@vger.kernel.org
S: Maintained
@ -4971,7 +4971,7 @@ M: Paul Mackerras <paulus@samba.org>
M: Ingo Molnar <mingo@elte.hu>
M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
S: Supported
F: kernel/perf_event*.c
F: kernel/events/*
F: include/linux/perf_event.h
F: arch/*/kernel/perf_event*.c
F: arch/*/kernel/*/perf_event*.c
@ -5532,6 +5532,7 @@ F: include/media/*7146*
SAMSUNG AUDIO (ASoC) DRIVERS
M: Jassi Brar <jassisinghbrar@gmail.com>
M: Sangbeom Kim <sbkim73@samsung.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Supported
F: sound/soc/samsung
@ -7087,7 +7088,7 @@ S: Supported
F: drivers/mmc/host/vub300.c
W1 DALLAS'S 1-WIRE BUS
M: Evgeniy Polyakov <johnpol@2ka.mipt.ru>
M: Evgeniy Polyakov <zbr@ioremap.net>
S: Maintained
F: Documentation/w1/
F: drivers/w1/

View File

@ -1,8 +1,8 @@
VERSION = 3
PATCHLEVEL = 1
SUBLEVEL = 0
EXTRAVERSION = -rc1
NAME = Sneaky Weasel
EXTRAVERSION = -rc6
NAME = "Divemaster Edition"
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"

View File

@ -27,13 +27,4 @@
#define UAC_NOFIX 2
#define UAC_SIGBUS 4
#ifdef __KERNEL__
/* This is the shift that is applied to the UAC bits as stored in the
per-thread flags. See thread_info.h. */
#define UAC_SHIFT 6
#endif
#endif /* __ASM_ALPHA_SYSINFO_H */

View File

@ -74,9 +74,9 @@ register struct thread_info *__current_thread_info __asm__("$8");
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_POLLING_NRFLAG 8 /* poll_idle is polling NEED_RESCHED */
#define TIF_DIE_IF_KERNEL 9 /* dik recursion lock */
#define TIF_UAC_NOPRINT 10 /* see sysinfo.h */
#define TIF_UAC_NOFIX 11
#define TIF_UAC_SIGBUS 12
#define TIF_UAC_NOPRINT 10 /* ! Preserve sequence of following */
#define TIF_UAC_NOFIX 11 /* ! flags as they match */
#define TIF_UAC_SIGBUS 12 /* ! userspace part of 'osf_sysinfo' */
#define TIF_MEMDIE 13 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 14 /* restore signal mask in do_signal */
#define TIF_FREEZE 16 /* is freezing for suspend */
@ -97,7 +97,7 @@ register struct thread_info *__current_thread_info __asm__("$8");
#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK \
| _TIF_SYSCALL_TRACE)
#define ALPHA_UAC_SHIFT 10
#define ALPHA_UAC_SHIFT TIF_UAC_NOPRINT
#define ALPHA_UAC_MASK (1 << TIF_UAC_NOPRINT | 1 << TIF_UAC_NOFIX | \
1 << TIF_UAC_SIGBUS)

View File

@ -42,6 +42,7 @@
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/sysinfo.h>
#include <asm/thread_info.h>
#include <asm/hwrpb.h>
#include <asm/processor.h>
@ -633,9 +634,10 @@ SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer,
case GSI_UACPROC:
if (nbytes < sizeof(unsigned int))
return -EINVAL;
w = (current_thread_info()->flags >> UAC_SHIFT) & UAC_BITMASK;
if (put_user(w, (unsigned int __user *)buffer))
return -EFAULT;
w = (current_thread_info()->flags >> ALPHA_UAC_SHIFT) &
UAC_BITMASK;
if (put_user(w, (unsigned int __user *)buffer))
return -EFAULT;
return 1;
case GSI_PROC_TYPE:
@ -756,8 +758,8 @@ SYSCALL_DEFINE5(osf_setsysinfo, unsigned long, op, void __user *, buffer,
case SSIN_UACPROC:
again:
old = current_thread_info()->flags;
new = old & ~(UAC_BITMASK << UAC_SHIFT);
new = new | (w & UAC_BITMASK) << UAC_SHIFT;
new = old & ~(UAC_BITMASK << ALPHA_UAC_SHIFT);
new = new | (w & UAC_BITMASK) << ALPHA_UAC_SHIFT;
if (cmpxchg(&current_thread_info()->flags,
old, new) != old)
goto again;

View File

@ -360,7 +360,7 @@ sys_call_table:
.quad sys_newuname
.quad sys_nanosleep /* 340 */
.quad sys_mremap
.quad sys_nfsservctl
.quad sys_ni_syscall /* old nfsservctl */
.quad sys_setresuid
.quad sys_getresuid
.quad sys_pciconfig_read /* 345 */

View File

@ -1271,6 +1271,18 @@ config ARM_ERRATA_754327
This workaround defines cpu_relax() as smp_mb(), preventing correctly
written polling loops from denying visibility of updates to memory.
config ARM_ERRATA_364296
bool "ARM errata: Possible cache data corruption with hit-under-miss enabled"
depends on CPU_V6 && !SMP
help
This options enables the workaround for the 364296 ARM1136
r0p2 erratum (possible cache data corruption with
hit-under-miss enabled). It sets the undocumented bit 31 in
the auxiliary control register and the FI bit in the control
register, thus disabling hit-under-miss without putting the
processor into full low interrupt latency mode. ARM11MPCore
is not affected.
endmenu
source "arch/arm/common/Kconfig"

View File

@ -82,7 +82,7 @@ asmlinkage void mmc_loader(unsigned char *buf, unsigned long len)
/* Disable clock to MMC hardware block */
__raw_writel(__raw_readl(SMSTPCR3) & (1 << 12), SMSTPCR3);
__raw_writel(__raw_readl(SMSTPCR3) | (1 << 12), SMSTPCR3);
mmc_update_progress(MMC_PROGRESS_DONE);
}

View File

@ -85,7 +85,7 @@ asmlinkage void mmc_loader(unsigned short *buf, unsigned long len)
goto err;
/* Disable clock to SDHI1 hardware block */
__raw_writel(__raw_readl(SMSTPCR3) & (1 << 13), SMSTPCR3);
__raw_writel(__raw_readl(SMSTPCR3) | (1 << 13), SMSTPCR3);
mmc_update_progress(MMC_PROGRESS_DONE);

View File

@ -45,8 +45,13 @@
#define L2X0_CLEAN_INV_LINE_PA 0x7F0
#define L2X0_CLEAN_INV_LINE_IDX 0x7F8
#define L2X0_CLEAN_INV_WAY 0x7FC
#define L2X0_LOCKDOWN_WAY_D 0x900
#define L2X0_LOCKDOWN_WAY_I 0x904
/*
* The lockdown registers repeat 8 times for L310, the L210 has only one
* D and one I lockdown register at 0x0900 and 0x0904.
*/
#define L2X0_LOCKDOWN_WAY_D_BASE 0x900
#define L2X0_LOCKDOWN_WAY_I_BASE 0x904
#define L2X0_LOCKDOWN_STRIDE 0x08
#define L2X0_TEST_OPERATION 0xF00
#define L2X0_LINE_DATA 0xF10
#define L2X0_LINE_TAG 0xF30
@ -64,7 +69,7 @@
#define L2X0_AUX_CTRL_MASK 0xc0000fff
#define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT 16
#define L2X0_AUX_CTRL_WAY_SIZE_SHIFT 17
#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x3 << 17)
#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x7 << 17)
#define L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT 22
#define L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT 26
#define L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT 27

View File

@ -41,7 +41,7 @@ struct arm_pmu_platdata {
* encoded error on failure.
*/
extern struct platform_device *
reserve_pmu(enum arm_pmu_type device);
reserve_pmu(enum arm_pmu_type type);
/**
* release_pmu() - Relinquish control of the performance counters
@ -62,26 +62,26 @@ release_pmu(enum arm_pmu_type type);
* the actual hardware initialisation.
*/
extern int
init_pmu(enum arm_pmu_type device);
init_pmu(enum arm_pmu_type type);
#else /* CONFIG_CPU_HAS_PMU */
#include <linux/err.h>
static inline struct platform_device *
reserve_pmu(enum arm_pmu_type device)
reserve_pmu(enum arm_pmu_type type)
{
return ERR_PTR(-ENODEV);
}
static inline int
release_pmu(struct platform_device *pdev)
release_pmu(enum arm_pmu_type type)
{
return -ENODEV;
}
static inline int
init_pmu(enum arm_pmu_type device)
init_pmu(enum arm_pmu_type type)
{
return -ENODEV;
}

View File

@ -178,7 +178,7 @@
CALL(sys_ni_syscall) /* vm86 */
CALL(sys_ni_syscall) /* was sys_query_module */
CALL(sys_poll)
CALL(sys_nfsservctl)
CALL(sys_ni_syscall) /* was nfsservctl */
/* 170 */ CALL(sys_setresgid16)
CALL(sys_getresgid16)
CALL(sys_prctl)

View File

@ -195,10 +195,10 @@ ENTRY(iwmmxt_task_disable)
@ enable access to CP0 and CP1
XSC(mrc p15, 0, r4, c15, c1, 0)
XSC(orr r4, r4, #0xf)
XSC(orr r4, r4, #0x3)
XSC(mcr p15, 0, r4, c15, c1, 0)
PJ4(mrc p15, 0, r4, c1, c0, 2)
PJ4(orr r4, r4, #0x3)
PJ4(orr r4, r4, #0xf)
PJ4(mcr p15, 0, r4, c1, c0, 2)
mov r0, #0 @ nothing to load
@ -313,7 +313,7 @@ ENTRY(iwmmxt_task_switch)
teq r2, r3 @ next task owns it?
movne pc, lr @ no: leave Concan disabled
1: @ flip Conan access
1: @ flip Concan access
XSC(eor r1, r1, #0x3)
XSC(mcr p15, 0, r1, c15, c1, 0)
PJ4(eor r1, r1, #0xf)

View File

@ -31,7 +31,7 @@ static int __devinit pmu_register(struct platform_device *pdev,
{
if (type < 0 || type >= ARM_NUM_PMU_DEVICES) {
pr_warning("received registration request for unknown "
"device %d\n", type);
"PMU device type %d\n", type);
return -EINVAL;
}
@ -112,17 +112,17 @@ static int __init register_pmu_driver(void)
device_initcall(register_pmu_driver);
struct platform_device *
reserve_pmu(enum arm_pmu_type device)
reserve_pmu(enum arm_pmu_type type)
{
struct platform_device *pdev;
if (test_and_set_bit_lock(device, &pmu_lock)) {
if (test_and_set_bit_lock(type, &pmu_lock)) {
pdev = ERR_PTR(-EBUSY);
} else if (pmu_devices[device] == NULL) {
clear_bit_unlock(device, &pmu_lock);
} else if (pmu_devices[type] == NULL) {
clear_bit_unlock(type, &pmu_lock);
pdev = ERR_PTR(-ENODEV);
} else {
pdev = pmu_devices[device];
pdev = pmu_devices[type];
}
return pdev;
@ -130,11 +130,11 @@ reserve_pmu(enum arm_pmu_type device)
EXPORT_SYMBOL_GPL(reserve_pmu);
int
release_pmu(enum arm_pmu_type device)
release_pmu(enum arm_pmu_type type)
{
if (WARN_ON(!pmu_devices[device]))
if (WARN_ON(!pmu_devices[type]))
return -EINVAL;
clear_bit_unlock(device, &pmu_lock);
clear_bit_unlock(type, &pmu_lock);
return 0;
}
EXPORT_SYMBOL_GPL(release_pmu);
@ -182,17 +182,17 @@ init_cpu_pmu(void)
}
int
init_pmu(enum arm_pmu_type device)
init_pmu(enum arm_pmu_type type)
{
int err = 0;
switch (device) {
switch (type) {
case ARM_PMU_DEVICE_CPU:
err = init_cpu_pmu();
break;
default:
pr_warning("attempt to initialise unknown device %d\n",
device);
pr_warning("attempt to initialise PMU of unknown "
"type %d\n", type);
err = -EINVAL;
}

View File

@ -57,7 +57,8 @@ relocate_new_kernel:
mov r0,#0
ldr r1,kexec_mach_type
ldr r2,kexec_boot_atags
mov pc,lr
ARM( mov pc, lr )
THUMB( bx lr )
.align

View File

@ -280,18 +280,19 @@ static void __init cacheid_init(void)
if (arch >= CPU_ARCH_ARMv6) {
if ((cachetype & (7 << 29)) == 4 << 29) {
/* ARMv7 register format */
arch = CPU_ARCH_ARMv7;
cacheid = CACHEID_VIPT_NONALIASING;
if ((cachetype & (3 << 14)) == 1 << 14)
cacheid |= CACHEID_ASID_TAGGED;
else if (cpu_has_aliasing_icache(CPU_ARCH_ARMv7))
cacheid |= CACHEID_VIPT_I_ALIASING;
} else if (cachetype & (1 << 23)) {
cacheid = CACHEID_VIPT_ALIASING;
} else {
cacheid = CACHEID_VIPT_NONALIASING;
if (cpu_has_aliasing_icache(CPU_ARCH_ARMv6))
cacheid |= CACHEID_VIPT_I_ALIASING;
arch = CPU_ARCH_ARMv6;
if (cachetype & (1 << 23))
cacheid = CACHEID_VIPT_ALIASING;
else
cacheid = CACHEID_VIPT_NONALIASING;
}
if (cpu_has_aliasing_icache(arch))
cacheid |= CACHEID_VIPT_I_ALIASING;
} else {
cacheid = CACHEID_VIVT;
}

View File

@ -137,8 +137,8 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk);
clk->min_delta_ns = clockevent_delta2ns(0xf, clk);
clockevents_register_device(clk);
/* Make sure our local interrupt controller has this enabled */
gic_enable_ppi(clk->irq);
clockevents_register_device(clk);
}

View File

@ -157,7 +157,7 @@ static struct clk_lookup periph_clocks_lookups[] = {
CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc1_clk),
CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
CLKDEV_CON_DEV_ID("pclk", "ssc.2", &ssc2_clk),

View File

@ -8,7 +8,6 @@
* published by the Free Software Foundation.
*/
#include <mach/hardware.h>
#include <asm/hardware/entry-macro-gic.S>
.macro disable_fiq

View File

@ -13,7 +13,6 @@
#include <linux/io.h>
#include <asm/proc-fns.h>
#include <mach/hardware.h>
static inline void arch_idle(void)
{

View File

@ -8,7 +8,6 @@
*/
#include <asm/mach-types.h>
#include <mach/hardware.h>
#include <mach/cns3xxx.h>
#define AMBA_UART_DR(base) (*(volatile unsigned char *)((base) + 0x00))

View File

@ -49,7 +49,7 @@ static struct cns3xxx_pcie *sysdata_to_cnspci(void *sysdata)
return &cns3xxx_pcie[root->domain];
}
static struct cns3xxx_pcie *pdev_to_cnspci(struct pci_dev *dev)
static struct cns3xxx_pcie *pdev_to_cnspci(const struct pci_dev *dev)
{
return sysdata_to_cnspci(dev->sysdata);
}

View File

@ -115,6 +115,32 @@ static struct spi_board_info da850evm_spi_info[] = {
},
};
#ifdef CONFIG_MTD
static void da850_evm_m25p80_notify_add(struct mtd_info *mtd)
{
char *mac_addr = davinci_soc_info.emac_pdata->mac_addr;
size_t retlen;
if (!strcmp(mtd->name, "MAC-Address")) {
mtd->read(mtd, 0, ETH_ALEN, &retlen, mac_addr);
if (retlen == ETH_ALEN)
pr_info("Read MAC addr from SPI Flash: %pM\n",
mac_addr);
}
}
static struct mtd_notifier da850evm_spi_notifier = {
.add = da850_evm_m25p80_notify_add,
};
static void da850_evm_setup_mac_addr(void)
{
register_mtd_user(&da850evm_spi_notifier);
}
#else
static void da850_evm_setup_mac_addr(void) { }
#endif
static struct mtd_partition da850_evm_norflash_partition[] = {
{
.name = "bootloaders + env",
@ -1244,6 +1270,8 @@ static __init void da850_evm_init(void)
if (ret)
pr_warning("da850_evm_init: sata registration failed: %d\n",
ret);
da850_evm_setup_mac_addr();
}
#ifdef CONFIG_SERIAL_8250_CONSOLE

View File

@ -243,7 +243,7 @@
#define PSC_STATE_DISABLE 2
#define PSC_STATE_ENABLE 3
#define MDSTAT_STATE_MASK 0x1f
#define MDSTAT_STATE_MASK 0x3f
#define MDCTL_FORCE BIT(31)
#ifndef __ASSEMBLER__

View File

@ -217,7 +217,11 @@ ddr2clk_stop_done:
ENDPROC(davinci_ddr_psc_config)
CACHE_FLUSH:
.word arm926_flush_kern_cache_all
#ifdef CONFIG_CPU_V6
.word v6_flush_kern_cache_all
#else
.word arm926_flush_kern_cache_all
#endif
ENTRY(davinci_cpu_suspend_sz)
.word . - davinci_cpu_suspend

View File

@ -6,7 +6,7 @@
* TS72xx memory map:
*
* virt phys size
* febff000 22000000 4K model number register
* febff000 22000000 4K model number register (bits 0-2)
* febfe000 22400000 4K options register
* febfd000 22800000 4K options register #2
* febf9000 10800000 4K TS-5620 RTC index register
@ -20,6 +20,9 @@
#define TS72XX_MODEL_TS7200 0x00
#define TS72XX_MODEL_TS7250 0x01
#define TS72XX_MODEL_TS7260 0x02
#define TS72XX_MODEL_TS7300 0x03
#define TS72XX_MODEL_TS7400 0x04
#define TS72XX_MODEL_MASK 0x07
#define TS72XX_OPTIONS_PHYS_BASE 0x22400000
@ -51,19 +54,34 @@
#ifndef __ASSEMBLY__
static inline int ts72xx_model(void)
{
return __raw_readb(TS72XX_MODEL_VIRT_BASE) & TS72XX_MODEL_MASK;
}
static inline int board_is_ts7200(void)
{
return __raw_readb(TS72XX_MODEL_VIRT_BASE) == TS72XX_MODEL_TS7200;
return ts72xx_model() == TS72XX_MODEL_TS7200;
}
static inline int board_is_ts7250(void)
{
return __raw_readb(TS72XX_MODEL_VIRT_BASE) == TS72XX_MODEL_TS7250;
return ts72xx_model() == TS72XX_MODEL_TS7250;
}
static inline int board_is_ts7260(void)
{
return __raw_readb(TS72XX_MODEL_VIRT_BASE) == TS72XX_MODEL_TS7260;
return ts72xx_model() == TS72XX_MODEL_TS7260;
}
static inline int board_is_ts7300(void)
{
return ts72xx_model() == TS72XX_MODEL_TS7300;
}
static inline int board_is_ts7400(void)
{
return ts72xx_model() == TS72XX_MODEL_TS7400;
}
static inline int is_max197_installed(void)

View File

@ -520,7 +520,7 @@ static struct clk init_clocks_off[] = {
.ctrlbit = (1 << 21),
}, {
.name = "ac97",
.id = -1,
.devname = "samsung-ac97",
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 27),
}, {

View File

@ -24,12 +24,13 @@
#include <plat/exynos4.h>
#include <plat/adc-core.h>
#include <plat/sdhci.h>
#include <plat/devs.h>
#include <plat/fb-core.h>
#include <plat/fimc-core.h>
#include <plat/iic-core.h>
#include <plat/reset.h>
#include <mach/regs-irq.h>
#include <mach/regs-pmu.h>
extern int combiner_init(unsigned int combiner_nr, void __iomem *base,
unsigned int irq_start);
@ -128,6 +129,11 @@ static void exynos4_idle(void)
local_irq_enable();
}
static void exynos4_sw_reset(void)
{
__raw_writel(0x1, S5P_SWRESET);
}
/*
* exynos4_map_io
*
@ -241,5 +247,8 @@ int __init exynos4_init(void)
/* set idle function */
pm_idle = exynos4_idle;
/* set sw_reset function */
s5p_reset_hook = exynos4_sw_reset;
return sysdev_register(&exynos4_sysdev);
}

View File

@ -80,9 +80,8 @@
#define IRQ_HSMMC3 IRQ_SPI(76)
#define IRQ_DWMCI IRQ_SPI(77)
#define IRQ_MIPICSI0 IRQ_SPI(78)
#define IRQ_MIPICSI1 IRQ_SPI(80)
#define IRQ_MIPI_CSIS0 IRQ_SPI(78)
#define IRQ_MIPI_CSIS1 IRQ_SPI(80)
#define IRQ_ONENAND_AUDI IRQ_SPI(82)
#define IRQ_ROTATOR IRQ_SPI(83)

View File

@ -29,6 +29,8 @@
#define S5P_USE_STANDBY_WFE1 (1 << 25)
#define S5P_USE_MASK ((0x3 << 16) | (0x3 << 24))
#define S5P_SWRESET S5P_PMUREG(0x0400)
#define S5P_WAKEUP_STAT S5P_PMUREG(0x0600)
#define S5P_EINT_WAKEUP_MASK S5P_PMUREG(0x0604)
#define S5P_WAKEUP_MASK S5P_PMUREG(0x0608)

View File

@ -23,6 +23,8 @@
#include <mach/regs-gpio.h>
#include <asm/mach/irq.h>
static DEFINE_SPINLOCK(eint_lock);
static unsigned int eint0_15_data[16];
@ -184,8 +186,11 @@ static inline void exynos4_irq_demux_eint(unsigned int start)
static void exynos4_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
{
struct irq_chip *chip = irq_get_chip(irq);
chained_irq_enter(chip, desc);
exynos4_irq_demux_eint(IRQ_EINT(16));
exynos4_irq_demux_eint(IRQ_EINT(24));
chained_irq_exit(chip, desc);
}
static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
@ -193,6 +198,7 @@ static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
u32 *irq_data = irq_get_handler_data(irq);
struct irq_chip *chip = irq_get_chip(irq);
chained_irq_enter(chip, desc);
chip->irq_mask(&desc->irq_data);
if (chip->irq_ack)
@ -201,6 +207,7 @@ static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
generic_handle_irq(*irq_data);
chip->irq_unmask(&desc->irq_data);
chained_irq_exit(chip, desc);
}
int __init exynos4_init_irq_eint(void)

View File

@ -79,7 +79,7 @@ static struct s3c2410_uartcfg universal_uartcfgs[] __initdata = {
};
static struct regulator_consumer_supply max8952_consumer =
REGULATOR_SUPPLY("vddarm", NULL);
REGULATOR_SUPPLY("vdd_arm", NULL);
static struct max8952_platform_data universal_max8952_pdata __initdata = {
.gpio_vid0 = EXYNOS4_GPX0(3),
@ -105,7 +105,7 @@ static struct max8952_platform_data universal_max8952_pdata __initdata = {
};
static struct regulator_consumer_supply lp3974_buck1_consumer =
REGULATOR_SUPPLY("vddint", NULL);
REGULATOR_SUPPLY("vdd_int", NULL);
static struct regulator_consumer_supply lp3974_buck2_consumer =
REGULATOR_SUPPLY("vddg3d", NULL);

View File

@ -82,7 +82,7 @@ static int exynos4_usb_phy1_init(struct platform_device *pdev)
rstcon &= ~(HOST_LINK_PORT_SWRST_MASK | PHY1_SWRST_MASK);
writel(rstcon, EXYNOS4_RSTCON);
udelay(50);
udelay(80);
clk_disable(otg_clk);
clk_put(otg_clk);

View File

@ -62,6 +62,7 @@ config ARCH_EBSA285_HOST
config ARCH_NETWINDER
bool "NetWinder"
select CLKSRC_I8253
select CLKEVT_I8253
select FOOTBRIDGE_HOST
select ISA
select ISA_DMA

View File

@ -18,6 +18,7 @@
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#include <video/vga.h>
#include <asm/irq.h>
#include <asm/system.h>

View File

@ -331,6 +331,9 @@ int __init mx25_clocks_init(void)
__raw_writel(__raw_readl(CRM_BASE+0x64) | (1 << 7) | (1 << 0),
CRM_BASE + 0x64);
/* Clock source for gpt is ahb_div */
__raw_writel(__raw_readl(CRM_BASE+0x64) & ~(1 << 5), CRM_BASE + 0x64);
mxc_timer_init(&gpt_clk, MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54);
return 0;

View File

@ -310,7 +310,7 @@ static struct sys_timer eukrea_cpuimx27_timer = {
.init = eukrea_cpuimx27_timer_init,
};
MACHINE_START(CPUIMX27, "EUKREA CPUIMX27")
MACHINE_START(EUKREA_CPUIMX27, "EUKREA CPUIMX27")
.boot_params = MX27_PHYS_OFFSET + 0x100,
.map_io = mx27_map_io,
.init_early = imx27_init_early,

View File

@ -192,7 +192,7 @@ struct sys_timer eukrea_cpuimx35_timer = {
.init = eukrea_cpuimx35_timer_init,
};
MACHINE_START(EUKREA_CPUIMX35, "Eukrea CPUIMX35")
MACHINE_START(EUKREA_CPUIMX35SD, "Eukrea CPUIMX35")
/* Maintainer: Eukrea Electromatique */
.boot_params = MX3x_PHYS_OFFSET + 0x100,
.map_io = mx35_map_io,

View File

@ -161,7 +161,7 @@ static struct sys_timer eukrea_cpuimx25_timer = {
.init = eukrea_cpuimx25_timer_init,
};
MACHINE_START(EUKREA_CPUIMX25, "Eukrea CPUIMX25")
MACHINE_START(EUKREA_CPUIMX25SD, "Eukrea CPUIMX25")
/* Maintainer: Eukrea Electromatique */
.boot_params = MX25_PHYS_OFFSET + 0x100,
.map_io = mx25_map_io,

View File

@ -30,6 +30,7 @@
#include <linux/input.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <sound/tlv320aic32x4.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
@ -196,6 +197,17 @@ static struct pca953x_platform_data visstrim_m10_pca9555_pdata = {
.invert = 0,
};
static struct aic32x4_pdata visstrim_m10_aic32x4_pdata = {
.power_cfg = AIC32X4_PWR_MICBIAS_2075_LDOIN |
AIC32X4_PWR_AVDD_DVDD_WEAK_DISABLE |
AIC32X4_PWR_AIC32X4_LDO_ENABLE |
AIC32X4_PWR_CMMODE_LDOIN_RANGE_18_36 |
AIC32X4_PWR_CMMODE_HP_LDOIN_POWERED,
.micpga_routing = AIC32X4_MICPGA_ROUTE_LMIC_IN2R_10K |
AIC32X4_MICPGA_ROUTE_RMIC_IN1L_10K,
.swapdacs = false,
};
static struct i2c_board_info visstrim_m10_i2c_devices[] = {
{
I2C_BOARD_INFO("pca9555", 0x20),
@ -203,6 +215,7 @@ static struct i2c_board_info visstrim_m10_i2c_devices[] = {
},
{
I2C_BOARD_INFO("tlv320aic32x4", 0x18),
.platform_data = &visstrim_m10_aic32x4_pdata,
}
};

View File

@ -468,7 +468,7 @@ static struct i2c_board_info __initdata mx31ads_i2c1_devices[] = {
#endif
};
static void mxc_init_i2c(void)
static void __init mxc_init_i2c(void)
{
i2c_register_board_info(1, mx31ads_i2c1_devices,
ARRAY_SIZE(mx31ads_i2c1_devices));
@ -486,7 +486,7 @@ static unsigned int ssi_pins[] = {
MX31_PIN_STXD5__STXD5,
};
static void mxc_init_audio(void)
static void __init mxc_init_audio(void)
{
imx31_add_imx_ssi(0, NULL);
mxc_iomux_setup_multiple_pins(ssi_pins, ARRAY_SIZE(ssi_pins), "ssi");

View File

@ -192,7 +192,7 @@ static struct mxc_usbh_platform_data usbh2_pdata __initdata = {
.portsc = MXC_EHCI_MODE_ULPI | MXC_EHCI_UTMI_8BIT,
};
static void lilly1131_usb_init(void)
static void __init lilly1131_usb_init(void)
{
imx31_add_mxc_ehci_hs(1, &usbh1_pdata);

View File

@ -337,15 +337,15 @@ static unsigned long timer_reload;
static void integrator_clocksource_init(u32 khz)
{
void __iomem *base = (void __iomem *)TIMER2_VA_BASE;
u32 ctrl = TIMER_CTRL_ENABLE;
u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC;
if (khz >= 1500) {
khz /= 16;
ctrl = TIMER_CTRL_DIV16;
ctrl |= TIMER_CTRL_DIV16;
}
writel(ctrl, base + TIMER_CTRL);
writel(0xffff, base + TIMER_LOAD);
writel(ctrl, base + TIMER_CTRL);
clocksource_mmio_init(base + TIMER_VALUE, "timer2",
khz * 1000, 200, 16, clocksource_mmio_readl_down);

View File

@ -16,16 +16,18 @@
#include <mach/gpio.h>
#include <mach/pxa168.h>
#include <mach/mfp-pxa168.h>
#include <mach/mfp-gplugd.h>
#include "common.h"
static unsigned long gplugd_pin_config[] __initdata = {
/* UART3 */
GPIO8_UART3_SOUT,
GPIO9_UART3_SIN,
GPI1O_UART3_CTS,
GPI11_UART3_RTS,
GPIO8_UART3_TXD,
GPIO9_UART3_RXD,
GPIO1O_UART3_CTS,
GPIO11_UART3_RTS,
/* USB OTG PEN */
GPIO18_GPIO,
/* MMC2 */
GPIO28_MMC2_CMD,
@ -109,6 +111,12 @@ static unsigned long gplugd_pin_config[] __initdata = {
GPIO105_CI2C_SDA,
GPIO106_CI2C_SCL,
/* SPI NOR Flash on SSP2 */
GPIO107_SSP2_RXD,
GPIO108_SSP2_TXD,
GPIO110_GPIO, /* SPI_CSn */
GPIO111_SSP2_CLK,
/* Select JTAG */
GPIO109_GPIO,
@ -154,7 +162,7 @@ static void __init select_disp_freq(void)
"frequency\n");
} else {
gpio_direction_output(35, 1);
gpio_free(104);
gpio_free(35);
}
if (unlikely(gpio_request(85, "DISP_FREQ_SEL_2"))) {
@ -162,7 +170,7 @@ static void __init select_disp_freq(void)
"frequency\n");
} else {
gpio_direction_output(85, 0);
gpio_free(104);
gpio_free(85);
}
}

View File

@ -1,52 +0,0 @@
/*
* linux/arch/arm/mach-mmp/include/mach/mfp-gplugd.h
*
* MFP definitions used in gplugD
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __MACH_MFP_GPLUGD_H
#define __MACH_MFP_GPLUGD_H
#include <plat/mfp.h>
#include <mach/mfp.h>
/* UART3 */
#define GPIO8_UART3_SOUT MFP_CFG(GPIO8, AF2)
#define GPIO9_UART3_SIN MFP_CFG(GPIO9, AF2)
#define GPI1O_UART3_CTS MFP_CFG(GPIO10, AF2)
#define GPI11_UART3_RTS MFP_CFG(GPIO11, AF2)
/* MMC2 */
#define GPIO28_MMC2_CMD MFP_CFG_DRV(GPIO28, AF6, FAST)
#define GPIO29_MMC2_CLK MFP_CFG_DRV(GPIO29, AF6, FAST)
#define GPIO30_MMC2_DAT0 MFP_CFG_DRV(GPIO30, AF6, FAST)
#define GPIO31_MMC2_DAT1 MFP_CFG_DRV(GPIO31, AF6, FAST)
#define GPIO32_MMC2_DAT2 MFP_CFG_DRV(GPIO32, AF6, FAST)
#define GPIO33_MMC2_DAT3 MFP_CFG_DRV(GPIO33, AF6, FAST)
/* I2S */
#undef GPIO114_I2S_FRM
#undef GPIO115_I2S_BCLK
#define GPIO114_I2S_FRM MFP_CFG_DRV(GPIO114, AF1, FAST)
#define GPIO115_I2S_BCLK MFP_CFG_DRV(GPIO115, AF1, FAST)
#define GPIO116_I2S_TXD MFP_CFG_DRV(GPIO116, AF1, FAST)
/* MMC4 */
#define GPIO125_MMC4_DAT3 MFP_CFG_DRV(GPIO125, AF7, FAST)
#define GPIO126_MMC4_DAT2 MFP_CFG_DRV(GPIO126, AF7, FAST)
#define GPIO127_MMC4_DAT1 MFP_CFG_DRV(GPIO127, AF7, FAST)
#define GPIO0_2_MMC4_DAT0 MFP_CFG_DRV(GPIO0_2, AF7, FAST)
#define GPIO1_2_MMC4_CMD MFP_CFG_DRV(GPIO1_2, AF7, FAST)
#define GPIO2_2_MMC4_CLK MFP_CFG_DRV(GPIO2_2, AF7, FAST)
/* OTG GPIO */
#define GPIO_USB_OTG_PEN 18
#define GPIO_USB_OIDIR 20
/* Other GPIOs are 35, 84, 85 */
#endif /* __MACH_MFP_GPLUGD_H */

View File

@ -203,6 +203,10 @@
#define GPIO33_CF_nCD2 MFP_CFG(GPIO33, AF3)
/* UART */
#define GPIO8_UART3_TXD MFP_CFG(GPIO8, AF2)
#define GPIO9_UART3_RXD MFP_CFG(GPIO9, AF2)
#define GPIO1O_UART3_CTS MFP_CFG(GPIO10, AF2)
#define GPIO11_UART3_RTS MFP_CFG(GPIO11, AF2)
#define GPIO88_UART2_TXD MFP_CFG(GPIO88, AF2)
#define GPIO89_UART2_RXD MFP_CFG(GPIO89, AF2)
#define GPIO107_UART1_TXD MFP_CFG_DRV(GPIO107, AF1, FAST)
@ -232,6 +236,22 @@
#define GPIO53_MMC1_CD MFP_CFG(GPIO53, AF1)
#define GPIO46_MMC1_WP MFP_CFG(GPIO46, AF1)
/* MMC2 */
#define GPIO28_MMC2_CMD MFP_CFG_DRV(GPIO28, AF6, FAST)
#define GPIO29_MMC2_CLK MFP_CFG_DRV(GPIO29, AF6, FAST)
#define GPIO30_MMC2_DAT0 MFP_CFG_DRV(GPIO30, AF6, FAST)
#define GPIO31_MMC2_DAT1 MFP_CFG_DRV(GPIO31, AF6, FAST)
#define GPIO32_MMC2_DAT2 MFP_CFG_DRV(GPIO32, AF6, FAST)
#define GPIO33_MMC2_DAT3 MFP_CFG_DRV(GPIO33, AF6, FAST)
/* MMC4 */
#define GPIO125_MMC4_DAT3 MFP_CFG_DRV(GPIO125, AF7, FAST)
#define GPIO126_MMC4_DAT2 MFP_CFG_DRV(GPIO126, AF7, FAST)
#define GPIO127_MMC4_DAT1 MFP_CFG_DRV(GPIO127, AF7, FAST)
#define GPIO0_2_MMC4_DAT0 MFP_CFG_DRV(GPIO0_2, AF7, FAST)
#define GPIO1_2_MMC4_CMD MFP_CFG_DRV(GPIO1_2, AF7, FAST)
#define GPIO2_2_MMC4_CLK MFP_CFG_DRV(GPIO2_2, AF7, FAST)
/* LCD */
#define GPIO84_LCD_CS MFP_CFG(GPIO84, AF1)
#define GPIO60_LCD_DD0 MFP_CFG(GPIO60, AF1)
@ -269,11 +289,12 @@
#define GPIO106_CI2C_SCL MFP_CFG(GPIO106, AF1)
/* I2S */
#define GPIO113_I2S_MCLK MFP_CFG(GPIO113,AF6)
#define GPIO114_I2S_FRM MFP_CFG(GPIO114,AF1)
#define GPIO115_I2S_BCLK MFP_CFG(GPIO115,AF1)
#define GPIO116_I2S_RXD MFP_CFG(GPIO116,AF2)
#define GPIO117_I2S_TXD MFP_CFG(GPIO117,AF2)
#define GPIO113_I2S_MCLK MFP_CFG(GPIO113, AF6)
#define GPIO114_I2S_FRM MFP_CFG(GPIO114, AF1)
#define GPIO115_I2S_BCLK MFP_CFG(GPIO115, AF1)
#define GPIO116_I2S_RXD MFP_CFG(GPIO116, AF2)
#define GPIO116_I2S_TXD MFP_CFG(GPIO116, AF1)
#define GPIO117_I2S_TXD MFP_CFG(GPIO117, AF2)
/* PWM */
#define GPIO96_PWM3_OUT MFP_CFG(GPIO96, AF1)
@ -324,4 +345,10 @@
#define GPIO101_MII_MDIO MFP_CFG(GPIO101, AF5)
#define GPIO103_RX_DV MFP_CFG(GPIO103, AF5)
/* SSP2 */
#define GPIO107_SSP2_RXD MFP_CFG(GPIO107, AF4)
#define GPIO108_SSP2_TXD MFP_CFG(GPIO108, AF4)
#define GPIO111_SSP2_CLK MFP_CFG(GPIO111, AF4)
#define GPIO112_SSP2_FRM MFP_CFG(GPIO112, AF4)
#endif /* __ASM_MACH_MFP_PXA168_H */

View File

@ -51,12 +51,12 @@ static inline uint32_t timer_read(void)
{
int delay = 100;
__raw_writel(1, TIMERS_VIRT_BASE + TMR_CVWR(0));
__raw_writel(1, TIMERS_VIRT_BASE + TMR_CVWR(1));
while (delay--)
cpu_relax();
return __raw_readl(TIMERS_VIRT_BASE + TMR_CVWR(0));
return __raw_readl(TIMERS_VIRT_BASE + TMR_CVWR(1));
}
unsigned long long notrace sched_clock(void)
@ -75,28 +75,51 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *c = dev_id;
/* disable and clear pending interrupt status */
__raw_writel(0x0, TIMERS_VIRT_BASE + TMR_IER(0));
__raw_writel(0x1, TIMERS_VIRT_BASE + TMR_ICR(0));
/*
* Clear pending interrupt status.
*/
__raw_writel(0x01, TIMERS_VIRT_BASE + TMR_ICR(0));
/*
* Disable timer 0.
*/
__raw_writel(0x02, TIMERS_VIRT_BASE + TMR_CER);
c->event_handler(c);
return IRQ_HANDLED;
}
static int timer_set_next_event(unsigned long delta,
struct clock_event_device *dev)
{
unsigned long flags, next;
unsigned long flags;
local_irq_save(flags);
/* clear pending interrupt status and enable */
/*
* Disable timer 0.
*/
__raw_writel(0x02, TIMERS_VIRT_BASE + TMR_CER);
/*
* Clear and enable timer match 0 interrupt.
*/
__raw_writel(0x01, TIMERS_VIRT_BASE + TMR_ICR(0));
__raw_writel(0x01, TIMERS_VIRT_BASE + TMR_IER(0));
next = timer_read() + delta;
__raw_writel(next, TIMERS_VIRT_BASE + TMR_TN_MM(0, 0));
/*
* Setup new clockevent timer value.
*/
__raw_writel(delta - 1, TIMERS_VIRT_BASE + TMR_TN_MM(0, 0));
/*
* Enable timer 0.
*/
__raw_writel(0x03, TIMERS_VIRT_BASE + TMR_CER);
local_irq_restore(flags);
return 0;
}
@ -145,23 +168,26 @@ static struct clocksource cksrc = {
static void __init timer_config(void)
{
uint32_t ccr = __raw_readl(TIMERS_VIRT_BASE + TMR_CCR);
uint32_t cer = __raw_readl(TIMERS_VIRT_BASE + TMR_CER);
uint32_t cmr = __raw_readl(TIMERS_VIRT_BASE + TMR_CMR);
__raw_writel(cer & ~0x1, TIMERS_VIRT_BASE + TMR_CER); /* disable */
__raw_writel(0x0, TIMERS_VIRT_BASE + TMR_CER); /* disable */
ccr &= (cpu_is_mmp2()) ? TMR_CCR_CS_0(0) : TMR_CCR_CS_0(3);
ccr &= (cpu_is_mmp2()) ? (TMR_CCR_CS_0(0) | TMR_CCR_CS_1(0)) :
(TMR_CCR_CS_0(3) | TMR_CCR_CS_1(3));
__raw_writel(ccr, TIMERS_VIRT_BASE + TMR_CCR);
/* free-running mode */
__raw_writel(cmr | 0x01, TIMERS_VIRT_BASE + TMR_CMR);
/* set timer 0 to periodic mode, and timer 1 to free-running mode */
__raw_writel(0x2, TIMERS_VIRT_BASE + TMR_CMR);
__raw_writel(0x0, TIMERS_VIRT_BASE + TMR_PLCR(0)); /* free-running */
__raw_writel(0x1, TIMERS_VIRT_BASE + TMR_PLCR(0)); /* periodic */
__raw_writel(0x7, TIMERS_VIRT_BASE + TMR_ICR(0)); /* clear status */
__raw_writel(0x0, TIMERS_VIRT_BASE + TMR_IER(0));
/* enable timer counter */
__raw_writel(cer | 0x01, TIMERS_VIRT_BASE + TMR_CER);
__raw_writel(0x0, TIMERS_VIRT_BASE + TMR_PLCR(1)); /* free-running */
__raw_writel(0x7, TIMERS_VIRT_BASE + TMR_ICR(1)); /* clear status */
__raw_writel(0x0, TIMERS_VIRT_BASE + TMR_IER(1));
/* enable timer 1 counter */
__raw_writel(0x2, TIMERS_VIRT_BASE + TMR_CER);
}
static struct irqaction timer_irq = {

View File

@ -81,7 +81,7 @@ static struct plat_serial8250_port serial_platform_data[] = {
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP,
}, {
.mapbase = (unsigned long)(MX51_CS1_BASE_ADDR + 0x2000000),
.irq = irq_to_gpio(CPUIMX51_QUARTD_GPIO),
.irq = gpio_to_irq(CPUIMX51_QUARTD_GPIO),
.irqflags = IRQF_TRIGGER_HIGH,
.uartclk = CPUIMX51_QUART_XTAL,
.regshift = CPUIMX51_QUART_REGSHIFT,

View File

@ -369,7 +369,7 @@ static void __init mx51_babbage_init(void)
ARRAY_SIZE(mx51babbage_pads));
imx51_add_imx_uart(0, &uart_pdata);
imx51_add_imx_uart(1, &uart_pdata);
imx51_add_imx_uart(1, NULL);
imx51_add_imx_uart(2, &uart_pdata);
babbage_fec_reset();

View File

@ -108,9 +108,9 @@ static void __init mx51_efikamx_board_id(void)
gpio_request(EFIKAMX_PCBID2, "pcbid2");
gpio_direction_input(EFIKAMX_PCBID2);
id = gpio_get_value(EFIKAMX_PCBID0);
id |= gpio_get_value(EFIKAMX_PCBID1) << 1;
id |= gpio_get_value(EFIKAMX_PCBID2) << 2;
id = gpio_get_value(EFIKAMX_PCBID0) ? 1 : 0;
id |= (gpio_get_value(EFIKAMX_PCBID1) ? 1 : 0) << 1;
id |= (gpio_get_value(EFIKAMX_PCBID2) ? 1 : 0) << 2;
switch (id) {
case 7:

View File

@ -156,23 +156,24 @@ static struct gpio_keys_button mx51_efikasb_keys[] = {
{
.code = KEY_POWER,
.gpio = EFIKASB_PWRKEY,
.type = EV_PWR,
.type = EV_KEY,
.desc = "Power Button",
.wakeup = 1,
.debounce_interval = 10, /* ms */
.active_low = 1,
},
{
.code = SW_LID,
.gpio = EFIKASB_LID,
.type = EV_SW,
.desc = "Lid Switch",
.active_low = 1,
},
{
/* SW_RFKILLALL vs KEY_RFKILL ? */
.code = SW_RFKILL_ALL,
.code = KEY_RFKILL,
.gpio = EFIKASB_RFKILL,
.type = EV_SW,
.type = EV_KEY,
.desc = "rfkill",
.active_low = 1,
},
};
@ -224,8 +225,8 @@ static void __init mx51_efikasb_board_id(void)
gpio_request(EFIKASB_PCBID1, "pcb id1");
gpio_direction_input(EFIKASB_PCBID1);
id = gpio_get_value(EFIKASB_PCBID0);
id |= gpio_get_value(EFIKASB_PCBID1) << 1;
id = gpio_get_value(EFIKASB_PCBID0) ? 1 : 0;
id |= (gpio_get_value(EFIKASB_PCBID1) ? 1 : 0) << 1;
switch (id) {
default:

View File

@ -271,7 +271,11 @@ static int _clk_pll_enable(struct clk *clk)
int i = 0;
pllbase = _get_pll_base(clk);
reg = __raw_readl(pllbase + MXC_PLL_DP_CTL) | MXC_PLL_DP_CTL_UPEN;
reg = __raw_readl(pllbase + MXC_PLL_DP_CTL);
if (reg & MXC_PLL_DP_CTL_UPEN)
return 0;
reg |= MXC_PLL_DP_CTL_UPEN;
__raw_writel(reg, pllbase + MXC_PLL_DP_CTL);
/* Wait for lock */

View File

@ -186,7 +186,7 @@ static int initialize_usbh1_port(struct platform_device *pdev)
mdelay(10);
return mx51_initialize_usb_hw(0, MXC_EHCI_ITC_NO_THRESHOLD);
return mx51_initialize_usb_hw(pdev->id, MXC_EHCI_ITC_NO_THRESHOLD);
}
static struct mxc_usbh_platform_data usbh1_config = {

View File

@ -7,7 +7,6 @@ config ARCH_OMAP2PLUS_TYPICAL
default y
select AEABI
select REGULATOR
select PM
select PM_RUNTIME
select VFP
select NEON if ARCH_OMAP3 || ARCH_OMAP4

View File

@ -45,8 +45,6 @@ static struct omap_board_config_kernel am3517_crane_config[] __initdata = {
static struct omap_board_mux board_mux[] __initdata = {
{ .reg_offset = OMAP_MUX_TERMINATOR },
};
#else
#define board_mux NULL
#endif
static void __init am3517_crane_init_early(void)

View File

@ -491,23 +491,22 @@ static void __init beagle_opp_init(void)
/* Custom OPP enabled for all xM versions */
if (cpu_is_omap3630()) {
struct omap_hwmod *mh = omap_hwmod_lookup("mpu");
struct omap_hwmod *dh = omap_hwmod_lookup("iva");
struct device *dev;
struct device *mpu_dev, *iva_dev;
if (!mh || !dh) {
mpu_dev = omap2_get_mpuss_device();
iva_dev = omap2_get_iva_device();
if (!mpu_dev || !iva_dev) {
pr_err("%s: Aiee.. no mpu/dsp devices? %p %p\n",
__func__, mh, dh);
__func__, mpu_dev, iva_dev);
return;
}
/* Enable MPU 1GHz and lower opps */
dev = &mh->od->pdev.dev;
r = opp_enable(dev, 800000000);
r = opp_enable(mpu_dev, 800000000);
/* TODO: MPU 1GHz needs SR and ABB */
/* Enable IVA 800MHz and lower opps */
dev = &dh->od->pdev.dev;
r |= opp_enable(dev, 660000000);
r |= opp_enable(iva_dev, 660000000);
/* TODO: DSP 800MHz needs SR and ABB */
if (r) {
pr_err("%s: failed to enable higher opp %d\n",
@ -516,10 +515,8 @@ static void __init beagle_opp_init(void)
* Cleanup - disable the higher freqs - we dont care
* about the results
*/
dev = &mh->od->pdev.dev;
opp_disable(dev, 800000000);
dev = &dh->od->pdev.dev;
opp_disable(dev, 660000000);
opp_disable(mpu_dev, 800000000);
opp_disable(iva_dev, 660000000);
}
}
return;

View File

@ -3078,6 +3078,7 @@ static struct clk gpt12_fck = {
.name = "gpt12_fck",
.ops = &clkops_null,
.parent = &secure_32k_fck,
.clkdm_name = "wkup_clkdm",
.recalc = &followparent_recalc,
};
@ -3085,6 +3086,7 @@ static struct clk wdt1_fck = {
.name = "wdt1_fck",
.ops = &clkops_null,
.parent = &secure_32k_fck,
.clkdm_name = "wkup_clkdm",
.recalc = &followparent_recalc,
};

View File

@ -3376,10 +3376,18 @@ int __init omap4xxx_clk_init(void)
} else if (cpu_is_omap446x()) {
cpu_mask = RATE_IN_4460;
cpu_clkflg = CK_446X;
} else {
return 0;
}
clk_init(&omap2_clk_functions);
omap2_clk_disable_clkdm_control();
/*
* Must stay commented until all OMAP SoC drivers are
* converted to runtime PM, or drivers may start crashing
*
* omap2_clk_disable_clkdm_control();
*/
for (c = omap44xx_clks; c < omap44xx_clks + ARRAY_SIZE(omap44xx_clks);
c++)

View File

@ -747,6 +747,7 @@ int clkdm_wakeup(struct clockdomain *clkdm)
spin_lock_irqsave(&clkdm->lock, flags);
clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED;
ret = arch_clkdm->clkdm_wakeup(clkdm);
ret |= pwrdm_state_switch(clkdm->pwrdm.ptr);
spin_unlock_irqrestore(&clkdm->lock, flags);
return ret;
}
@ -818,6 +819,7 @@ void clkdm_deny_idle(struct clockdomain *clkdm)
spin_lock_irqsave(&clkdm->lock, flags);
clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED;
arch_clkdm->clkdm_deny_idle(clkdm);
pwrdm_state_switch(clkdm->pwrdm.ptr);
spin_unlock_irqrestore(&clkdm->lock, flags);
}

View File

@ -18,13 +18,36 @@ extern void omap4_cminst_clkdm_force_sleep(u8 part, s16 inst, u16 cdoffs);
extern void omap4_cminst_clkdm_force_wakeup(u8 part, s16 inst, u16 cdoffs);
extern int omap4_cminst_wait_module_ready(u8 part, u16 inst, s16 cdoffs, u16 clkctrl_offs);
extern int omap4_cminst_wait_module_idle(u8 part, u16 inst, s16 cdoffs, u16 clkctrl_offs);
# ifdef CONFIG_ARCH_OMAP4
extern int omap4_cminst_wait_module_idle(u8 part, u16 inst, s16 cdoffs,
u16 clkctrl_offs);
extern void omap4_cminst_module_enable(u8 mode, u8 part, u16 inst, s16 cdoffs,
u16 clkctrl_offs);
extern void omap4_cminst_module_disable(u8 part, u16 inst, s16 cdoffs,
u16 clkctrl_offs);
# else
static inline int omap4_cminst_wait_module_idle(u8 part, u16 inst, s16 cdoffs,
u16 clkctrl_offs)
{
return 0;
}
static inline void omap4_cminst_module_enable(u8 mode, u8 part, u16 inst,
s16 cdoffs, u16 clkctrl_offs)
{
}
static inline void omap4_cminst_module_disable(u8 part, u16 inst, s16 cdoffs,
u16 clkctrl_offs)
{
}
# endif
/*
* In an ideal world, we would not export these low-level functions,
* but this will probably take some time to fix properly

View File

@ -821,11 +821,10 @@ static void __init omap_mux_set_cmdline_signals(void)
if (!omap_mux_options)
return;
options = kmalloc(strlen(omap_mux_options) + 1, GFP_KERNEL);
options = kstrdup(omap_mux_options, GFP_KERNEL);
if (!options)
return;
strcpy(options, omap_mux_options);
next_opt = options;
while ((token = strsep(&next_opt, ",")) != NULL) {
@ -855,24 +854,19 @@ static int __init omap_mux_copy_names(struct omap_mux *src,
for (i = 0; i < OMAP_MUX_NR_MODES; i++) {
if (src->muxnames[i]) {
dst->muxnames[i] =
kmalloc(strlen(src->muxnames[i]) + 1,
GFP_KERNEL);
dst->muxnames[i] = kstrdup(src->muxnames[i],
GFP_KERNEL);
if (!dst->muxnames[i])
goto free;
strcpy(dst->muxnames[i], src->muxnames[i]);
}
}
#ifdef CONFIG_DEBUG_FS
for (i = 0; i < OMAP_MUX_NR_SIDES; i++) {
if (src->balls[i]) {
dst->balls[i] =
kmalloc(strlen(src->balls[i]) + 1,
GFP_KERNEL);
dst->balls[i] = kstrdup(src->balls[i], GFP_KERNEL);
if (!dst->balls[i])
goto free;
strcpy(dst->balls[i], src->balls[i]);
}
}
#endif

View File

@ -192,6 +192,7 @@ static struct omap_hwmod_addr_space omap2430_usbhsotg_addrs[] = {
.pa_end = OMAP243X_HS_BASE + SZ_4K - 1,
.flags = ADDR_TYPE_RT
},
{ }
};
/* l4_core ->usbhsotg interface */

View File

@ -130,7 +130,6 @@ int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state)
} else {
hwsup = clkdm_in_hwsup(pwrdm->pwrdm_clkdms[0]);
clkdm_wakeup(pwrdm->pwrdm_clkdms[0]);
pwrdm_wait_transition(pwrdm);
sleep_switch = FORCEWAKEUP_SWITCH;
}
}
@ -156,7 +155,6 @@ int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state)
return ret;
}
pwrdm_wait_transition(pwrdm);
pwrdm_state_switch(pwrdm);
err:
return ret;

View File

@ -195,28 +195,35 @@ static int _pwrdm_post_transition_cb(struct powerdomain *pwrdm, void *unused)
/**
* pwrdm_init - set up the powerdomain layer
* @pwrdm_list: array of struct powerdomain pointers to register
* @pwrdms: array of struct powerdomain pointers to register
* @custom_funcs: func pointers for arch specific implementations
*
* Loop through the array of powerdomains @pwrdm_list, registering all
* that are available on the current CPU. If pwrdm_list is supplied
* and not null, all of the referenced powerdomains will be
* registered. No return value. XXX pwrdm_list is not really a
* "list"; it is an array. Rename appropriately.
* Loop through the array of powerdomains @pwrdms, registering all
* that are available on the current CPU. Also, program all
* powerdomain target state as ON; this is to prevent domains from
* hitting low power states (if bootloader has target states set to
* something other than ON) and potentially even losing context while
* PM is not fully initialized. The PM late init code can then program
* the desired target state for all the power domains. No return
* value.
*/
void pwrdm_init(struct powerdomain **pwrdm_list, struct pwrdm_ops *custom_funcs)
void pwrdm_init(struct powerdomain **pwrdms, struct pwrdm_ops *custom_funcs)
{
struct powerdomain **p = NULL;
struct powerdomain *temp_p;
if (!custom_funcs)
WARN(1, "powerdomain: No custom pwrdm functions registered\n");
else
arch_pwrdm = custom_funcs;
if (pwrdm_list) {
for (p = pwrdm_list; *p; p++)
if (pwrdms) {
for (p = pwrdms; *p; p++)
_pwrdm_register(*p);
}
list_for_each_entry(temp_p, &pwrdm_list, node)
pwrdm_set_next_pwrst(temp_p, PWRDM_POWER_ON);
}
/**

View File

@ -621,7 +621,7 @@ void sr_disable(struct voltagedomain *voltdm)
sr_v2_disable(sr);
}
pm_runtime_put_sync(&sr->pdev->dev);
pm_runtime_put_sync_suspend(&sr->pdev->dev);
}
/**
@ -860,6 +860,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
pm_runtime_enable(&pdev->dev);
pm_runtime_irq_safe(&pdev->dev);
sr_info->pdev = pdev;
sr_info->srid = pdev->id;

View File

@ -293,7 +293,8 @@ static void __init omap2_gp_clocksource_init(int gptimer_id,
pr_info("OMAP clocksource: GPTIMER%d at %lu Hz\n",
gptimer_id, clksrc.rate);
__omap_dm_timer_load_start(clksrc.io_base, OMAP_TIMER_CTRL_ST, 0, 1);
__omap_dm_timer_load_start(clksrc.io_base,
OMAP_TIMER_CTRL_ST | OMAP_TIMER_CTRL_AR, 0, 1);
init_sched_clock(&cd, dmtimer_update_sched_clock, 32, clksrc.rate);
if (clocksource_register_hz(&clocksource_gpt, clksrc.rate))

View File

@ -48,14 +48,7 @@ void __init omap_pmic_init(int bus, u32 clkrate,
omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1);
}
static struct twl4030_usb_data omap4_usb_pdata = {
.phy_init = omap4430_phy_init,
.phy_exit = omap4430_phy_exit,
.phy_power = omap4430_phy_power,
.phy_set_clock = omap4430_phy_set_clk,
.phy_suspend = omap4430_phy_suspend,
};
#if defined(CONFIG_ARCH_OMAP3)
static struct twl4030_usb_data omap3_usb_pdata = {
.usb_mode = T2_USB_MODE_ULPI,
};
@ -122,6 +115,45 @@ static struct regulator_init_data omap3_vpll2_idata = {
.consumer_supplies = omap3_vpll2_supplies,
};
void __init omap3_pmic_get_config(struct twl4030_platform_data *pmic_data,
u32 pdata_flags, u32 regulators_flags)
{
if (!pmic_data->irq_base)
pmic_data->irq_base = TWL4030_IRQ_BASE;
if (!pmic_data->irq_end)
pmic_data->irq_end = TWL4030_IRQ_END;
/* Common platform data configurations */
if (pdata_flags & TWL_COMMON_PDATA_USB && !pmic_data->usb)
pmic_data->usb = &omap3_usb_pdata;
if (pdata_flags & TWL_COMMON_PDATA_BCI && !pmic_data->bci)
pmic_data->bci = &omap3_bci_pdata;
if (pdata_flags & TWL_COMMON_PDATA_MADC && !pmic_data->madc)
pmic_data->madc = &omap3_madc_pdata;
if (pdata_flags & TWL_COMMON_PDATA_AUDIO && !pmic_data->audio)
pmic_data->audio = &omap3_audio_pdata;
/* Common regulator configurations */
if (regulators_flags & TWL_COMMON_REGULATOR_VDAC && !pmic_data->vdac)
pmic_data->vdac = &omap3_vdac_idata;
if (regulators_flags & TWL_COMMON_REGULATOR_VPLL2 && !pmic_data->vpll2)
pmic_data->vpll2 = &omap3_vpll2_idata;
}
#endif /* CONFIG_ARCH_OMAP3 */
#if defined(CONFIG_ARCH_OMAP4)
static struct twl4030_usb_data omap4_usb_pdata = {
.phy_init = omap4430_phy_init,
.phy_exit = omap4430_phy_exit,
.phy_power = omap4430_phy_power,
.phy_set_clock = omap4430_phy_set_clk,
.phy_suspend = omap4430_phy_suspend,
};
static struct regulator_init_data omap4_vdac_idata = {
.constraints = {
.min_uV = 1800000,
@ -273,32 +305,4 @@ void __init omap4_pmic_get_config(struct twl4030_platform_data *pmic_data,
!pmic_data->clk32kg)
pmic_data->clk32kg = &omap4_clk32kg_idata;
}
void __init omap3_pmic_get_config(struct twl4030_platform_data *pmic_data,
u32 pdata_flags, u32 regulators_flags)
{
if (!pmic_data->irq_base)
pmic_data->irq_base = TWL4030_IRQ_BASE;
if (!pmic_data->irq_end)
pmic_data->irq_end = TWL4030_IRQ_END;
/* Common platform data configurations */
if (pdata_flags & TWL_COMMON_PDATA_USB && !pmic_data->usb)
pmic_data->usb = &omap3_usb_pdata;
if (pdata_flags & TWL_COMMON_PDATA_BCI && !pmic_data->bci)
pmic_data->bci = &omap3_bci_pdata;
if (pdata_flags & TWL_COMMON_PDATA_MADC && !pmic_data->madc)
pmic_data->madc = &omap3_madc_pdata;
if (pdata_flags & TWL_COMMON_PDATA_AUDIO && !pmic_data->audio)
pmic_data->audio = &omap3_audio_pdata;
/* Common regulator configurations */
if (regulators_flags & TWL_COMMON_REGULATOR_VDAC && !pmic_data->vdac)
pmic_data->vdac = &omap3_vdac_idata;
if (regulators_flags & TWL_COMMON_REGULATOR_VPLL2 && !pmic_data->vpll2)
pmic_data->vpll2 = &omap3_vpll2_idata;
}
#endif /* CONFIG_ARCH_OMAP4 */

View File

@ -77,7 +77,7 @@ static int __init dns323_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
/*
* Check for devices with hard-wired IRQs.
*/
irq = orion5x_pci_map_irq(const dev, slot, pin);
irq = orion5x_pci_map_irq(dev, slot, pin);
if (irq != -1)
return irq;

View File

@ -14,6 +14,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/mbus.h>
#include <video/vga.h>
#include <asm/irq.h>
#include <asm/mach/pci.h>
#include <plat/pcie.h>

View File

@ -481,6 +481,7 @@ static void __init sirfsoc_clk_init(void)
static struct of_device_id clkc_ids[] = {
{ .compatible = "sirf,prima2-clkc" },
{},
};
void __init sirfsoc_of_clk_init(void)

View File

@ -51,6 +51,7 @@ static __init void sirfsoc_irq_init(void)
static struct of_device_id intc_ids[] = {
{ .compatible = "sirf,prima2-intc" },
{},
};
void __init sirfsoc_of_irq_init(void)

View File

@ -19,6 +19,7 @@ static DEFINE_MUTEX(rstc_lock);
static struct of_device_id rstc_ids[] = {
{ .compatible = "sirf,prima2-rstc" },
{},
};
static int __init sirfsoc_of_rstc_init(void)

View File

@ -190,6 +190,7 @@ static void __init sirfsoc_timer_init(void)
static struct of_device_id timer_ids[] = {
{ .compatible = "sirf,prima2-tick" },
{},
};
static void __init sirfsoc_of_timer_map(void)

View File

@ -44,6 +44,7 @@ static inline void arch_reset(char mode, const char *cmd)
*/
if (realview_reset)
realview_reset(mode);
dsb();
}
#endif

View File

@ -16,6 +16,7 @@
#include <linux/suspend.h>
#include <linux/serial_core.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <mach/map.h>
#include <mach/irqs.h>

View File

@ -129,7 +129,7 @@ static int s5p64x0_alloc_gc(void)
}
ct = gc->chip_types;
ct->chip.irq_ack = irq_gc_ack;
ct->chip.irq_ack = irq_gc_ack_set_bit;
ct->chip.irq_mask = irq_gc_mask_set_bit;
ct->chip.irq_unmask = irq_gc_mask_clr_bit;
ct->chip.irq_set_type = s5p64x0_irq_eint_set_type;

View File

@ -88,7 +88,7 @@ static struct sleep_save s5pv210_core_save[] = {
SAVE_ITEM(S3C2410_TCNTO(0)),
};
void s5pv210_cpu_suspend(unsigned long arg)
static int s5pv210_cpu_suspend(unsigned long arg)
{
unsigned long tmp;

View File

@ -28,6 +28,7 @@
#include <asm/mach-types.h>
#include <mach/nanoengine.h>
#include <mach/hardware.h>
static DEFINE_SPINLOCK(nano_lock);

Some files were not shown because too many files have changed in this diff Show More