Merge branch 'drm-intel-fixes' into HEAD
Conflicts: drivers/char/agp/intel-agp.c drivers/gpu/drm/i915/intel_crt.c
This commit is contained in:
commit
e9e5f8e8d3
|
@ -46,7 +46,6 @@
|
|||
|
||||
<sect1><title>Atomic and pointer manipulation</title>
|
||||
!Iarch/x86/include/asm/atomic.h
|
||||
!Iarch/x86/include/asm/unaligned.h
|
||||
</sect1>
|
||||
|
||||
<sect1><title>Delaying, scheduling, and timer routines</title>
|
||||
|
|
|
@ -57,7 +57,6 @@
|
|||
</para>
|
||||
|
||||
<sect1><title>String Conversions</title>
|
||||
!Ilib/vsprintf.c
|
||||
!Elib/vsprintf.c
|
||||
</sect1>
|
||||
<sect1><title>String Manipulation</title>
|
||||
|
|
|
@ -1961,6 +1961,12 @@ machines due to caching.
|
|||
</sect1>
|
||||
</chapter>
|
||||
|
||||
<chapter id="apiref">
|
||||
<title>Mutex API reference</title>
|
||||
!Iinclude/linux/mutex.h
|
||||
!Ekernel/mutex.c
|
||||
</chapter>
|
||||
|
||||
<chapter id="references">
|
||||
<title>Further reading</title>
|
||||
|
||||
|
|
|
@ -104,4 +104,9 @@
|
|||
<title>Block IO</title>
|
||||
!Iinclude/trace/events/block.h
|
||||
</chapter>
|
||||
|
||||
<chapter id="workqueue">
|
||||
<title>Workqueue</title>
|
||||
!Iinclude/trace/events/workqueue.h
|
||||
</chapter>
|
||||
</book>
|
||||
|
|
|
@ -0,0 +1,45 @@
|
|||
CFQ ioscheduler tunables
|
||||
========================
|
||||
|
||||
slice_idle
|
||||
----------
|
||||
This specifies how long CFQ should idle for next request on certain cfq queues
|
||||
(for sequential workloads) and service trees (for random workloads) before
|
||||
queue is expired and CFQ selects next queue to dispatch from.
|
||||
|
||||
By default slice_idle is a non-zero value. That means by default we idle on
|
||||
queues/service trees. This can be very helpful on highly seeky media like
|
||||
single spindle SATA/SAS disks where we can cut down on overall number of
|
||||
seeks and see improved throughput.
|
||||
|
||||
Setting slice_idle to 0 will remove all the idling on queues/service tree
|
||||
level and one should see an overall improved throughput on faster storage
|
||||
devices like multiple SATA/SAS disks in hardware RAID configuration. The down
|
||||
side is that isolation provided from WRITES also goes down and notion of
|
||||
IO priority becomes weaker.
|
||||
|
||||
So depending on storage and workload, it might be useful to set slice_idle=0.
|
||||
In general I think for SATA/SAS disks and software RAID of SATA/SAS disks
|
||||
keeping slice_idle enabled should be useful. For any configurations where
|
||||
there are multiple spindles behind single LUN (Host based hardware RAID
|
||||
controller or for storage arrays), setting slice_idle=0 might end up in better
|
||||
throughput and acceptable latencies.
|
||||
|
||||
CFQ IOPS Mode for group scheduling
|
||||
===================================
|
||||
Basic CFQ design is to provide priority based time slices. Higher priority
|
||||
process gets bigger time slice and lower priority process gets smaller time
|
||||
slice. Measuring time becomes harder if storage is fast and supports NCQ and
|
||||
it would be better to dispatch multiple requests from multiple cfq queues in
|
||||
request queue at a time. In such scenario, it is not possible to measure time
|
||||
consumed by single queue accurately.
|
||||
|
||||
What is possible though is to measure number of requests dispatched from a
|
||||
single queue and also allow dispatch from multiple cfq queue at the same time.
|
||||
This effectively becomes the fairness in terms of IOPS (IO operations per
|
||||
second).
|
||||
|
||||
If one sets slice_idle=0 and if storage supports NCQ, CFQ internally switches
|
||||
to IOPS mode and starts providing fairness in terms of number of requests
|
||||
dispatched. Note that this mode switching takes effect only for group
|
||||
scheduling. For non-cgroup users nothing should change.
|
|
@ -217,6 +217,7 @@ Details of cgroup files
|
|||
CFQ sysfs tunable
|
||||
=================
|
||||
/sys/block/<disk>/queue/iosched/group_isolation
|
||||
-----------------------------------------------
|
||||
|
||||
If group_isolation=1, it provides stronger isolation between groups at the
|
||||
expense of throughput. By default group_isolation is 0. In general that
|
||||
|
@ -243,6 +244,33 @@ By default one should run with group_isolation=0. If that is not sufficient
|
|||
and one wants stronger isolation between groups, then set group_isolation=1
|
||||
but this will come at cost of reduced throughput.
|
||||
|
||||
/sys/block/<disk>/queue/iosched/slice_idle
|
||||
------------------------------------------
|
||||
On a faster hardware CFQ can be slow, especially with sequential workload.
|
||||
This happens because CFQ idles on a single queue and single queue might not
|
||||
drive deeper request queue depths to keep the storage busy. In such scenarios
|
||||
one can try setting slice_idle=0 and that would switch CFQ to IOPS
|
||||
(IO operations per second) mode on NCQ supporting hardware.
|
||||
|
||||
That means CFQ will not idle between cfq queues of a cfq group and hence be
|
||||
able to driver higher queue depth and achieve better throughput. That also
|
||||
means that cfq provides fairness among groups in terms of IOPS and not in
|
||||
terms of disk time.
|
||||
|
||||
/sys/block/<disk>/queue/iosched/group_idle
|
||||
------------------------------------------
|
||||
If one disables idling on individual cfq queues and cfq service trees by
|
||||
setting slice_idle=0, group_idle kicks in. That means CFQ will still idle
|
||||
on the group in an attempt to provide fairness among groups.
|
||||
|
||||
By default group_idle is same as slice_idle and does not do anything if
|
||||
slice_idle is enabled.
|
||||
|
||||
One can experience an overall throughput drop if you have created multiple
|
||||
groups and put applications in that group which are not driving enough
|
||||
IO to keep disk busy. In that case set group_idle=0, and CFQ will not idle
|
||||
on individual groups and throughput should improve.
|
||||
|
||||
What works
|
||||
==========
|
||||
- Currently only sync IO queues are support. All the buffered writes are
|
||||
|
|
|
@ -109,17 +109,19 @@ use numbers 2000-2063 to identify GPIOs in a bank of I2C GPIO expanders.
|
|||
|
||||
If you want to initialize a structure with an invalid GPIO number, use
|
||||
some negative number (perhaps "-EINVAL"); that will never be valid. To
|
||||
test if a number could reference a GPIO, you may use this predicate:
|
||||
test if such number from such a structure could reference a GPIO, you
|
||||
may use this predicate:
|
||||
|
||||
int gpio_is_valid(int number);
|
||||
|
||||
A number that's not valid will be rejected by calls which may request
|
||||
or free GPIOs (see below). Other numbers may also be rejected; for
|
||||
example, a number might be valid but unused on a given board.
|
||||
|
||||
Whether a platform supports multiple GPIO controllers is currently a
|
||||
platform-specific implementation issue.
|
||||
example, a number might be valid but temporarily unused on a given board.
|
||||
|
||||
Whether a platform supports multiple GPIO controllers is a platform-specific
|
||||
implementation issue, as are whether that support can leave "holes" in the space
|
||||
of GPIO numbers, and whether new controllers can be added at runtime. Such issues
|
||||
can affect things including whether adjacent GPIO numbers are both valid.
|
||||
|
||||
Using GPIOs
|
||||
-----------
|
||||
|
@ -480,12 +482,16 @@ To support this framework, a platform's Kconfig will "select" either
|
|||
ARCH_REQUIRE_GPIOLIB or ARCH_WANT_OPTIONAL_GPIOLIB
|
||||
and arrange that its <asm/gpio.h> includes <asm-generic/gpio.h> and defines
|
||||
three functions: gpio_get_value(), gpio_set_value(), and gpio_cansleep().
|
||||
They may also want to provide a custom value for ARCH_NR_GPIOS.
|
||||
|
||||
ARCH_REQUIRE_GPIOLIB means that the gpio-lib code will always get compiled
|
||||
It may also provide a custom value for ARCH_NR_GPIOS, so that it better
|
||||
reflects the number of GPIOs in actual use on that platform, without
|
||||
wasting static table space. (It should count both built-in/SoC GPIOs and
|
||||
also ones on GPIO expanders.
|
||||
|
||||
ARCH_REQUIRE_GPIOLIB means that the gpiolib code will always get compiled
|
||||
into the kernel on that architecture.
|
||||
|
||||
ARCH_WANT_OPTIONAL_GPIOLIB means the gpio-lib code defaults to off and the user
|
||||
ARCH_WANT_OPTIONAL_GPIOLIB means the gpiolib code defaults to off and the user
|
||||
can enable it and build it into the kernel optionally.
|
||||
|
||||
If neither of these options are selected, the platform does not support
|
||||
|
|
|
@ -91,12 +91,11 @@ name The chip name.
|
|||
I2C devices get this attribute created automatically.
|
||||
RO
|
||||
|
||||
update_rate The rate at which the chip will update readings.
|
||||
update_interval The interval at which the chip will update readings.
|
||||
Unit: millisecond
|
||||
RW
|
||||
Some devices have a variable update rate. This attribute
|
||||
can be used to change the update rate to the desired
|
||||
frequency.
|
||||
Some devices have a variable update rate or interval.
|
||||
This attribute can be used to change it to the desired value.
|
||||
|
||||
|
||||
************
|
||||
|
|
|
@ -345,5 +345,10 @@ documentation, in <filename>, for the functions listed.
|
|||
section titled <section title> from <filename>.
|
||||
Spaces are allowed in <section title>; do not quote the <section title>.
|
||||
|
||||
!C<filename> is replaced by nothing, but makes the tools check that
|
||||
all DOC: sections and documented functions, symbols, etc. are used.
|
||||
This makes sense to use when you use !F/!P only and want to verify
|
||||
that all documentation is included.
|
||||
|
||||
Tim.
|
||||
*/ <twaugh@redhat.com>
|
||||
|
|
|
@ -1974,15 +1974,18 @@ and is between 256 and 4096 characters. It is defined in the file
|
|||
force Enable ASPM even on devices that claim not to support it.
|
||||
WARNING: Forcing ASPM on may cause system lockups.
|
||||
|
||||
pcie_ports= [PCIE] PCIe ports handling:
|
||||
auto Ask the BIOS whether or not to use native PCIe services
|
||||
associated with PCIe ports (PME, hot-plug, AER). Use
|
||||
them only if that is allowed by the BIOS.
|
||||
native Use native PCIe services associated with PCIe ports
|
||||
unconditionally.
|
||||
compat Treat PCIe ports as PCI-to-PCI bridges, disable the PCIe
|
||||
ports driver.
|
||||
|
||||
pcie_pme= [PCIE,PM] Native PCIe PME signaling options:
|
||||
Format: {auto|force}[,nomsi]
|
||||
auto Use native PCIe PME signaling if the BIOS allows the
|
||||
kernel to control PCIe config registers of root ports.
|
||||
force Use native PCIe PME signaling even if the BIOS refuses
|
||||
to allow the kernel to control the relevant PCIe config
|
||||
registers.
|
||||
nomsi Do not use MSI for native PCIe PME signaling (this makes
|
||||
all PCIe root ports use INTx for everything).
|
||||
all PCIe root ports use INTx for all services).
|
||||
|
||||
pcmv= [HW,PCMCIA] BadgePAD 4
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ firstly, there's nothing wrong with semaphores. But if the simpler
|
|||
mutex semantics are sufficient for your code, then there are a couple
|
||||
of advantages of mutexes:
|
||||
|
||||
- 'struct mutex' is smaller on most architectures: .e.g on x86,
|
||||
- 'struct mutex' is smaller on most architectures: E.g. on x86,
|
||||
'struct semaphore' is 20 bytes, 'struct mutex' is 16 bytes.
|
||||
A smaller structure size means less RAM footprint, and better
|
||||
CPU-cache utilization.
|
||||
|
@ -136,3 +136,4 @@ the APIs of 'struct mutex' have been streamlined:
|
|||
void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
|
||||
int mutex_lock_interruptible_nested(struct mutex *lock,
|
||||
unsigned int subclass);
|
||||
int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
|
||||
|
|
|
@ -13,7 +13,7 @@ regulators (where voltage output is controllable) and current sinks (where
|
|||
current limit is controllable).
|
||||
|
||||
(C) 2008 Wolfson Microelectronics PLC.
|
||||
Author: Liam Girdwood <lg@opensource.wolfsonmicro.com>
|
||||
Author: Liam Girdwood <lrg@slimlogic.co.uk>
|
||||
|
||||
|
||||
Nomenclature
|
||||
|
|
|
@ -296,6 +296,7 @@ Conexant 5051
|
|||
Conexant 5066
|
||||
=============
|
||||
laptop Basic Laptop config (default)
|
||||
hp-laptop HP laptops, e g G60
|
||||
dell-laptop Dell laptops
|
||||
dell-vostro Dell Vostro
|
||||
olpc-xo-1_5 OLPC XO 1.5
|
||||
|
|
|
@ -0,0 +1,380 @@
|
|||
|
||||
Concurrency Managed Workqueue (cmwq)
|
||||
|
||||
September, 2010 Tejun Heo <tj@kernel.org>
|
||||
Florian Mickler <florian@mickler.org>
|
||||
|
||||
CONTENTS
|
||||
|
||||
1. Introduction
|
||||
2. Why cmwq?
|
||||
3. The Design
|
||||
4. Application Programming Interface (API)
|
||||
5. Example Execution Scenarios
|
||||
6. Guidelines
|
||||
|
||||
|
||||
1. Introduction
|
||||
|
||||
There are many cases where an asynchronous process execution context
|
||||
is needed and the workqueue (wq) API is the most commonly used
|
||||
mechanism for such cases.
|
||||
|
||||
When such an asynchronous execution context is needed, a work item
|
||||
describing which function to execute is put on a queue. An
|
||||
independent thread serves as the asynchronous execution context. The
|
||||
queue is called workqueue and the thread is called worker.
|
||||
|
||||
While there are work items on the workqueue the worker executes the
|
||||
functions associated with the work items one after the other. When
|
||||
there is no work item left on the workqueue the worker becomes idle.
|
||||
When a new work item gets queued, the worker begins executing again.
|
||||
|
||||
|
||||
2. Why cmwq?
|
||||
|
||||
In the original wq implementation, a multi threaded (MT) wq had one
|
||||
worker thread per CPU and a single threaded (ST) wq had one worker
|
||||
thread system-wide. A single MT wq needed to keep around the same
|
||||
number of workers as the number of CPUs. The kernel grew a lot of MT
|
||||
wq users over the years and with the number of CPU cores continuously
|
||||
rising, some systems saturated the default 32k PID space just booting
|
||||
up.
|
||||
|
||||
Although MT wq wasted a lot of resource, the level of concurrency
|
||||
provided was unsatisfactory. The limitation was common to both ST and
|
||||
MT wq albeit less severe on MT. Each wq maintained its own separate
|
||||
worker pool. A MT wq could provide only one execution context per CPU
|
||||
while a ST wq one for the whole system. Work items had to compete for
|
||||
those very limited execution contexts leading to various problems
|
||||
including proneness to deadlocks around the single execution context.
|
||||
|
||||
The tension between the provided level of concurrency and resource
|
||||
usage also forced its users to make unnecessary tradeoffs like libata
|
||||
choosing to use ST wq for polling PIOs and accepting an unnecessary
|
||||
limitation that no two polling PIOs can progress at the same time. As
|
||||
MT wq don't provide much better concurrency, users which require
|
||||
higher level of concurrency, like async or fscache, had to implement
|
||||
their own thread pool.
|
||||
|
||||
Concurrency Managed Workqueue (cmwq) is a reimplementation of wq with
|
||||
focus on the following goals.
|
||||
|
||||
* Maintain compatibility with the original workqueue API.
|
||||
|
||||
* Use per-CPU unified worker pools shared by all wq to provide
|
||||
flexible level of concurrency on demand without wasting a lot of
|
||||
resource.
|
||||
|
||||
* Automatically regulate worker pool and level of concurrency so that
|
||||
the API users don't need to worry about such details.
|
||||
|
||||
|
||||
3. The Design
|
||||
|
||||
In order to ease the asynchronous execution of functions a new
|
||||
abstraction, the work item, is introduced.
|
||||
|
||||
A work item is a simple struct that holds a pointer to the function
|
||||
that is to be executed asynchronously. Whenever a driver or subsystem
|
||||
wants a function to be executed asynchronously it has to set up a work
|
||||
item pointing to that function and queue that work item on a
|
||||
workqueue.
|
||||
|
||||
Special purpose threads, called worker threads, execute the functions
|
||||
off of the queue, one after the other. If no work is queued, the
|
||||
worker threads become idle. These worker threads are managed in so
|
||||
called thread-pools.
|
||||
|
||||
The cmwq design differentiates between the user-facing workqueues that
|
||||
subsystems and drivers queue work items on and the backend mechanism
|
||||
which manages thread-pool and processes the queued work items.
|
||||
|
||||
The backend is called gcwq. There is one gcwq for each possible CPU
|
||||
and one gcwq to serve work items queued on unbound workqueues.
|
||||
|
||||
Subsystems and drivers can create and queue work items through special
|
||||
workqueue API functions as they see fit. They can influence some
|
||||
aspects of the way the work items are executed by setting flags on the
|
||||
workqueue they are putting the work item on. These flags include
|
||||
things like CPU locality, reentrancy, concurrency limits and more. To
|
||||
get a detailed overview refer to the API description of
|
||||
alloc_workqueue() below.
|
||||
|
||||
When a work item is queued to a workqueue, the target gcwq is
|
||||
determined according to the queue parameters and workqueue attributes
|
||||
and appended on the shared worklist of the gcwq. For example, unless
|
||||
specifically overridden, a work item of a bound workqueue will be
|
||||
queued on the worklist of exactly that gcwq that is associated to the
|
||||
CPU the issuer is running on.
|
||||
|
||||
For any worker pool implementation, managing the concurrency level
|
||||
(how many execution contexts are active) is an important issue. cmwq
|
||||
tries to keep the concurrency at a minimal but sufficient level.
|
||||
Minimal to save resources and sufficient in that the system is used at
|
||||
its full capacity.
|
||||
|
||||
Each gcwq bound to an actual CPU implements concurrency management by
|
||||
hooking into the scheduler. The gcwq is notified whenever an active
|
||||
worker wakes up or sleeps and keeps track of the number of the
|
||||
currently runnable workers. Generally, work items are not expected to
|
||||
hog a CPU and consume many cycles. That means maintaining just enough
|
||||
concurrency to prevent work processing from stalling should be
|
||||
optimal. As long as there are one or more runnable workers on the
|
||||
CPU, the gcwq doesn't start execution of a new work, but, when the
|
||||
last running worker goes to sleep, it immediately schedules a new
|
||||
worker so that the CPU doesn't sit idle while there are pending work
|
||||
items. This allows using a minimal number of workers without losing
|
||||
execution bandwidth.
|
||||
|
||||
Keeping idle workers around doesn't cost other than the memory space
|
||||
for kthreads, so cmwq holds onto idle ones for a while before killing
|
||||
them.
|
||||
|
||||
For an unbound wq, the above concurrency management doesn't apply and
|
||||
the gcwq for the pseudo unbound CPU tries to start executing all work
|
||||
items as soon as possible. The responsibility of regulating
|
||||
concurrency level is on the users. There is also a flag to mark a
|
||||
bound wq to ignore the concurrency management. Please refer to the
|
||||
API section for details.
|
||||
|
||||
Forward progress guarantee relies on that workers can be created when
|
||||
more execution contexts are necessary, which in turn is guaranteed
|
||||
through the use of rescue workers. All work items which might be used
|
||||
on code paths that handle memory reclaim are required to be queued on
|
||||
wq's that have a rescue-worker reserved for execution under memory
|
||||
pressure. Else it is possible that the thread-pool deadlocks waiting
|
||||
for execution contexts to free up.
|
||||
|
||||
|
||||
4. Application Programming Interface (API)
|
||||
|
||||
alloc_workqueue() allocates a wq. The original create_*workqueue()
|
||||
functions are deprecated and scheduled for removal. alloc_workqueue()
|
||||
takes three arguments - @name, @flags and @max_active. @name is the
|
||||
name of the wq and also used as the name of the rescuer thread if
|
||||
there is one.
|
||||
|
||||
A wq no longer manages execution resources but serves as a domain for
|
||||
forward progress guarantee, flush and work item attributes. @flags
|
||||
and @max_active control how work items are assigned execution
|
||||
resources, scheduled and executed.
|
||||
|
||||
@flags:
|
||||
|
||||
WQ_NON_REENTRANT
|
||||
|
||||
By default, a wq guarantees non-reentrance only on the same
|
||||
CPU. A work item may not be executed concurrently on the same
|
||||
CPU by multiple workers but is allowed to be executed
|
||||
concurrently on multiple CPUs. This flag makes sure
|
||||
non-reentrance is enforced across all CPUs. Work items queued
|
||||
to a non-reentrant wq are guaranteed to be executed by at most
|
||||
one worker system-wide at any given time.
|
||||
|
||||
WQ_UNBOUND
|
||||
|
||||
Work items queued to an unbound wq are served by a special
|
||||
gcwq which hosts workers which are not bound to any specific
|
||||
CPU. This makes the wq behave as a simple execution context
|
||||
provider without concurrency management. The unbound gcwq
|
||||
tries to start execution of work items as soon as possible.
|
||||
Unbound wq sacrifices locality but is useful for the following
|
||||
cases.
|
||||
|
||||
* Wide fluctuation in the concurrency level requirement is
|
||||
expected and using bound wq may end up creating large number
|
||||
of mostly unused workers across different CPUs as the issuer
|
||||
hops through different CPUs.
|
||||
|
||||
* Long running CPU intensive workloads which can be better
|
||||
managed by the system scheduler.
|
||||
|
||||
WQ_FREEZEABLE
|
||||
|
||||
A freezeable wq participates in the freeze phase of the system
|
||||
suspend operations. Work items on the wq are drained and no
|
||||
new work item starts execution until thawed.
|
||||
|
||||
WQ_RESCUER
|
||||
|
||||
All wq which might be used in the memory reclaim paths _MUST_
|
||||
have this flag set. This reserves one worker exclusively for
|
||||
the execution of this wq under memory pressure.
|
||||
|
||||
WQ_HIGHPRI
|
||||
|
||||
Work items of a highpri wq are queued at the head of the
|
||||
worklist of the target gcwq and start execution regardless of
|
||||
the current concurrency level. In other words, highpri work
|
||||
items will always start execution as soon as execution
|
||||
resource is available.
|
||||
|
||||
Ordering among highpri work items is preserved - a highpri
|
||||
work item queued after another highpri work item will start
|
||||
execution after the earlier highpri work item starts.
|
||||
|
||||
Although highpri work items are not held back by other
|
||||
runnable work items, they still contribute to the concurrency
|
||||
level. Highpri work items in runnable state will prevent
|
||||
non-highpri work items from starting execution.
|
||||
|
||||
This flag is meaningless for unbound wq.
|
||||
|
||||
WQ_CPU_INTENSIVE
|
||||
|
||||
Work items of a CPU intensive wq do not contribute to the
|
||||
concurrency level. In other words, runnable CPU intensive
|
||||
work items will not prevent other work items from starting
|
||||
execution. This is useful for bound work items which are
|
||||
expected to hog CPU cycles so that their execution is
|
||||
regulated by the system scheduler.
|
||||
|
||||
Although CPU intensive work items don't contribute to the
|
||||
concurrency level, start of their executions is still
|
||||
regulated by the concurrency management and runnable
|
||||
non-CPU-intensive work items can delay execution of CPU
|
||||
intensive work items.
|
||||
|
||||
This flag is meaningless for unbound wq.
|
||||
|
||||
WQ_HIGHPRI | WQ_CPU_INTENSIVE
|
||||
|
||||
This combination makes the wq avoid interaction with
|
||||
concurrency management completely and behave as a simple
|
||||
per-CPU execution context provider. Work items queued on a
|
||||
highpri CPU-intensive wq start execution as soon as resources
|
||||
are available and don't affect execution of other work items.
|
||||
|
||||
@max_active:
|
||||
|
||||
@max_active determines the maximum number of execution contexts per
|
||||
CPU which can be assigned to the work items of a wq. For example,
|
||||
with @max_active of 16, at most 16 work items of the wq can be
|
||||
executing at the same time per CPU.
|
||||
|
||||
Currently, for a bound wq, the maximum limit for @max_active is 512
|
||||
and the default value used when 0 is specified is 256. For an unbound
|
||||
wq, the limit is higher of 512 and 4 * num_possible_cpus(). These
|
||||
values are chosen sufficiently high such that they are not the
|
||||
limiting factor while providing protection in runaway cases.
|
||||
|
||||
The number of active work items of a wq is usually regulated by the
|
||||
users of the wq, more specifically, by how many work items the users
|
||||
may queue at the same time. Unless there is a specific need for
|
||||
throttling the number of active work items, specifying '0' is
|
||||
recommended.
|
||||
|
||||
Some users depend on the strict execution ordering of ST wq. The
|
||||
combination of @max_active of 1 and WQ_UNBOUND is used to achieve this
|
||||
behavior. Work items on such wq are always queued to the unbound gcwq
|
||||
and only one work item can be active at any given time thus achieving
|
||||
the same ordering property as ST wq.
|
||||
|
||||
|
||||
5. Example Execution Scenarios
|
||||
|
||||
The following example execution scenarios try to illustrate how cmwq
|
||||
behave under different configurations.
|
||||
|
||||
Work items w0, w1, w2 are queued to a bound wq q0 on the same CPU.
|
||||
w0 burns CPU for 5ms then sleeps for 10ms then burns CPU for 5ms
|
||||
again before finishing. w1 and w2 burn CPU for 5ms then sleep for
|
||||
10ms.
|
||||
|
||||
Ignoring all other tasks, works and processing overhead, and assuming
|
||||
simple FIFO scheduling, the following is one highly simplified version
|
||||
of possible sequences of events with the original wq.
|
||||
|
||||
TIME IN MSECS EVENT
|
||||
0 w0 starts and burns CPU
|
||||
5 w0 sleeps
|
||||
15 w0 wakes up and burns CPU
|
||||
20 w0 finishes
|
||||
20 w1 starts and burns CPU
|
||||
25 w1 sleeps
|
||||
35 w1 wakes up and finishes
|
||||
35 w2 starts and burns CPU
|
||||
40 w2 sleeps
|
||||
50 w2 wakes up and finishes
|
||||
|
||||
And with cmwq with @max_active >= 3,
|
||||
|
||||
TIME IN MSECS EVENT
|
||||
0 w0 starts and burns CPU
|
||||
5 w0 sleeps
|
||||
5 w1 starts and burns CPU
|
||||
10 w1 sleeps
|
||||
10 w2 starts and burns CPU
|
||||
15 w2 sleeps
|
||||
15 w0 wakes up and burns CPU
|
||||
20 w0 finishes
|
||||
20 w1 wakes up and finishes
|
||||
25 w2 wakes up and finishes
|
||||
|
||||
If @max_active == 2,
|
||||
|
||||
TIME IN MSECS EVENT
|
||||
0 w0 starts and burns CPU
|
||||
5 w0 sleeps
|
||||
5 w1 starts and burns CPU
|
||||
10 w1 sleeps
|
||||
15 w0 wakes up and burns CPU
|
||||
20 w0 finishes
|
||||
20 w1 wakes up and finishes
|
||||
20 w2 starts and burns CPU
|
||||
25 w2 sleeps
|
||||
35 w2 wakes up and finishes
|
||||
|
||||
Now, let's assume w1 and w2 are queued to a different wq q1 which has
|
||||
WQ_HIGHPRI set,
|
||||
|
||||
TIME IN MSECS EVENT
|
||||
0 w1 and w2 start and burn CPU
|
||||
5 w1 sleeps
|
||||
10 w2 sleeps
|
||||
10 w0 starts and burns CPU
|
||||
15 w0 sleeps
|
||||
15 w1 wakes up and finishes
|
||||
20 w2 wakes up and finishes
|
||||
25 w0 wakes up and burns CPU
|
||||
30 w0 finishes
|
||||
|
||||
If q1 has WQ_CPU_INTENSIVE set,
|
||||
|
||||
TIME IN MSECS EVENT
|
||||
0 w0 starts and burns CPU
|
||||
5 w0 sleeps
|
||||
5 w1 and w2 start and burn CPU
|
||||
10 w1 sleeps
|
||||
15 w2 sleeps
|
||||
15 w0 wakes up and burns CPU
|
||||
20 w0 finishes
|
||||
20 w1 wakes up and finishes
|
||||
25 w2 wakes up and finishes
|
||||
|
||||
|
||||
6. Guidelines
|
||||
|
||||
* Do not forget to use WQ_RESCUER if a wq may process work items which
|
||||
are used during memory reclaim. Each wq with WQ_RESCUER set has one
|
||||
rescuer thread reserved for it. If there is dependency among
|
||||
multiple work items used during memory reclaim, they should be
|
||||
queued to separate wq each with WQ_RESCUER.
|
||||
|
||||
* Unless strict ordering is required, there is no need to use ST wq.
|
||||
|
||||
* Unless there is a specific need, using 0 for @max_active is
|
||||
recommended. In most use cases, concurrency level usually stays
|
||||
well under the default limit.
|
||||
|
||||
* A wq serves as a domain for forward progress guarantee (WQ_RESCUER),
|
||||
flush and work item attributes. Work items which are not involved
|
||||
in memory reclaim and don't need to be flushed as a part of a group
|
||||
of work items, and don't require any special attribute, can use one
|
||||
of the system wq. There is no difference in execution
|
||||
characteristics between using a dedicated wq and a system wq.
|
||||
|
||||
* Unless work items are expected to consume a huge amount of CPU
|
||||
cycles, using a bound wq is usually beneficial due to the increased
|
||||
level of locality in wq operations and work item execution.
|
47
MAINTAINERS
47
MAINTAINERS
|
@ -1135,7 +1135,7 @@ ATLX ETHERNET DRIVERS
|
|||
M: Jay Cliburn <jcliburn@gmail.com>
|
||||
M: Chris Snook <chris.snook@gmail.com>
|
||||
M: Jie Yang <jie.yang@atheros.com>
|
||||
L: atl1-devel@lists.sourceforge.net
|
||||
L: netdev@vger.kernel.org
|
||||
W: http://sourceforge.net/projects/atl1
|
||||
W: http://atl1.sourceforge.net
|
||||
S: Maintained
|
||||
|
@ -1445,6 +1445,16 @@ S: Maintained
|
|||
F: Documentation/video4linux/cafe_ccic
|
||||
F: drivers/media/video/cafe_ccic*
|
||||
|
||||
CAIF NETWORK LAYER
|
||||
M: Sjur Braendeland <sjur.brandeland@stericsson.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/networking/caif/
|
||||
F: drivers/net/caif/
|
||||
F: include/linux/caif/
|
||||
F: include/net/caif/
|
||||
F: net/caif/
|
||||
|
||||
CALGARY x86-64 IOMMU
|
||||
M: Muli Ben-Yehuda <muli@il.ibm.com>
|
||||
M: "Jon D. Mason" <jdmason@kudzu.us>
|
||||
|
@ -2201,6 +2211,12 @@ L: linux-rdma@vger.kernel.org
|
|||
S: Supported
|
||||
F: drivers/infiniband/hw/ehca/
|
||||
|
||||
EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER
|
||||
M: Breno Leitao <leitao@linux.vnet.ibm.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/ehea/
|
||||
|
||||
EMBEDDED LINUX
|
||||
M: Paul Gortmaker <paul.gortmaker@windriver.com>
|
||||
M: Matt Mackall <mpm@selenic.com>
|
||||
|
@ -2641,9 +2657,12 @@ S: Maintained
|
|||
F: drivers/media/video/gspca/
|
||||
|
||||
HARDWARE MONITORING
|
||||
M: Jean Delvare <khali@linux-fr.org>
|
||||
M: Guenter Roeck <guenter.roeck@ericsson.com>
|
||||
L: lm-sensors@lm-sensors.org
|
||||
W: http://www.lm-sensors.org/
|
||||
S: Orphan
|
||||
T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
|
||||
S: Maintained
|
||||
F: Documentation/hwmon/
|
||||
F: drivers/hwmon/
|
||||
F: include/linux/hwmon*.h
|
||||
|
@ -2781,11 +2800,6 @@ S: Maintained
|
|||
F: arch/x86/kernel/hpet.c
|
||||
F: arch/x86/include/asm/hpet.h
|
||||
|
||||
HPET: ACPI
|
||||
M: Bob Picco <bob.picco@hp.com>
|
||||
S: Maintained
|
||||
F: drivers/char/hpet.c
|
||||
|
||||
HPFS FILESYSTEM
|
||||
M: Mikulas Patocka <mikulas@artax.karlin.mff.cuni.cz>
|
||||
W: http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi
|
||||
|
@ -3398,7 +3412,7 @@ F: drivers/s390/kvm/
|
|||
|
||||
KEXEC
|
||||
M: Eric Biederman <ebiederm@xmission.com>
|
||||
W: http://ftp.kernel.org/pub/linux/kernel/people/horms/kexec-tools/
|
||||
W: http://kernel.org/pub/linux/utils/kernel/kexec/
|
||||
L: kexec@lists.infradead.org
|
||||
S: Maintained
|
||||
F: include/linux/kexec.h
|
||||
|
@ -3923,13 +3937,12 @@ F: Documentation/sound/oss/MultiSound
|
|||
F: sound/oss/msnd*
|
||||
|
||||
MULTITECH MULTIPORT CARD (ISICOM)
|
||||
M: Jiri Slaby <jirislaby@gmail.com>
|
||||
S: Maintained
|
||||
S: Orphan
|
||||
F: drivers/char/isicom.c
|
||||
F: include/linux/isicom.h
|
||||
|
||||
MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
|
||||
M: Felipe Balbi <felipe.balbi@nokia.com>
|
||||
M: Felipe Balbi <balbi@ti.com>
|
||||
L: linux-usb@vger.kernel.org
|
||||
T: git git://gitorious.org/usb/usb.git
|
||||
S: Maintained
|
||||
|
@ -4227,7 +4240,7 @@ S: Maintained
|
|||
F: drivers/char/hw_random/omap-rng.c
|
||||
|
||||
OMAP USB SUPPORT
|
||||
M: Felipe Balbi <felipe.balbi@nokia.com>
|
||||
M: Felipe Balbi <balbi@ti.com>
|
||||
M: David Brownell <dbrownell@users.sourceforge.net>
|
||||
L: linux-usb@vger.kernel.org
|
||||
L: linux-omap@vger.kernel.org
|
||||
|
@ -4604,7 +4617,7 @@ F: include/linux/preempt.h
|
|||
PRISM54 WIRELESS DRIVER
|
||||
M: "Luis R. Rodriguez" <mcgrof@gmail.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
W: http://prism54.org
|
||||
W: http://wireless.kernel.org/en/users/Drivers/p54
|
||||
S: Obsolete
|
||||
F: drivers/net/wireless/prism54/
|
||||
|
||||
|
@ -4805,6 +4818,7 @@ RCUTORTURE MODULE
|
|||
M: Josh Triplett <josh@freedesktop.org>
|
||||
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
|
||||
S: Supported
|
||||
T: git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
|
||||
F: Documentation/RCU/torture.txt
|
||||
F: kernel/rcutorture.c
|
||||
|
||||
|
@ -4829,6 +4843,7 @@ M: Dipankar Sarma <dipankar@in.ibm.com>
|
|||
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
|
||||
W: http://www.rdrop.com/users/paulmck/rclock/
|
||||
S: Supported
|
||||
T: git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
|
||||
F: Documentation/RCU/
|
||||
F: include/linux/rcu*
|
||||
F: include/linux/srcu*
|
||||
|
@ -4836,12 +4851,10 @@ F: kernel/rcu*
|
|||
F: kernel/srcu*
|
||||
X: kernel/rcutorture.c
|
||||
|
||||
REAL TIME CLOCK DRIVER
|
||||
REAL TIME CLOCK DRIVER (LEGACY)
|
||||
M: Paul Gortmaker <p_gortmaker@yahoo.com>
|
||||
S: Maintained
|
||||
F: Documentation/rtc.txt
|
||||
F: drivers/rtc/
|
||||
F: include/linux/rtc.h
|
||||
F: drivers/char/rtc.c
|
||||
|
||||
REAL TIME CLOCK (RTC) SUBSYSTEM
|
||||
M: Alessandro Zummo <a.zummo@towertech.it>
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 2
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 36
|
||||
EXTRAVERSION = -rc3
|
||||
EXTRAVERSION = -rc5
|
||||
NAME = Sheep on Meth
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
# define L1_CACHE_SHIFT 5
|
||||
#endif
|
||||
|
||||
#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
|
||||
#define SMP_CACHE_BYTES L1_CACHE_BYTES
|
||||
|
||||
#endif
|
||||
|
|
|
@ -43,6 +43,8 @@ extern void smp_imb(void);
|
|||
/* ??? Ought to use this in arch/alpha/kernel/signal.c too. */
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
#include <linux/sched.h>
|
||||
|
||||
extern void __load_new_mm_context(struct mm_struct *);
|
||||
static inline void
|
||||
flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
|
||||
|
|
|
@ -449,10 +449,13 @@
|
|||
#define __NR_pwritev 491
|
||||
#define __NR_rt_tgsigqueueinfo 492
|
||||
#define __NR_perf_event_open 493
|
||||
#define __NR_fanotify_init 494
|
||||
#define __NR_fanotify_mark 495
|
||||
#define __NR_prlimit64 496
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#define NR_SYSCALLS 494
|
||||
#define NR_SYSCALLS 497
|
||||
|
||||
#define __ARCH_WANT_IPC_PARSE_VERSION
|
||||
#define __ARCH_WANT_OLD_READDIR
|
||||
|
@ -463,6 +466,7 @@
|
|||
#define __ARCH_WANT_SYS_OLD_GETRLIMIT
|
||||
#define __ARCH_WANT_SYS_OLDUMOUNT
|
||||
#define __ARCH_WANT_SYS_SIGPENDING
|
||||
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
|
||||
|
||||
/* "Conditional" syscalls. What we want is
|
||||
|
||||
|
|
|
@ -317,14 +317,14 @@ ret_from_sys_call:
|
|||
ldq $0, SP_OFF($sp)
|
||||
and $0, 8, $0
|
||||
beq $0, restore_all
|
||||
ret_from_reschedule:
|
||||
ret_to_user:
|
||||
/* Make sure need_resched and sigpending don't change between
|
||||
sampling and the rti. */
|
||||
lda $16, 7
|
||||
call_pal PAL_swpipl
|
||||
ldl $5, TI_FLAGS($8)
|
||||
and $5, _TIF_WORK_MASK, $2
|
||||
bne $5, work_pending
|
||||
bne $2, work_pending
|
||||
restore_all:
|
||||
RESTORE_ALL
|
||||
call_pal PAL_rti
|
||||
|
@ -363,7 +363,7 @@ $ret_success:
|
|||
* $8: current.
|
||||
* $19: The old syscall number, or zero if this is not a return
|
||||
* from a syscall that errored and is possibly restartable.
|
||||
* $20: Error indication.
|
||||
* $20: The old a3 value
|
||||
*/
|
||||
|
||||
.align 4
|
||||
|
@ -392,12 +392,18 @@ $work_resched:
|
|||
|
||||
$work_notifysig:
|
||||
mov $sp, $16
|
||||
br $1, do_switch_stack
|
||||
bsr $1, do_switch_stack
|
||||
mov $sp, $17
|
||||
mov $5, $18
|
||||
mov $19, $9 /* save old syscall number */
|
||||
mov $20, $10 /* save old a3 */
|
||||
and $5, _TIF_SIGPENDING, $2
|
||||
cmovne $2, 0, $9 /* we don't want double syscall restarts */
|
||||
jsr $26, do_notify_resume
|
||||
mov $9, $19
|
||||
mov $10, $20
|
||||
bsr $1, undo_switch_stack
|
||||
br restore_all
|
||||
br ret_to_user
|
||||
.end work_pending
|
||||
|
||||
/*
|
||||
|
@ -430,6 +436,7 @@ strace:
|
|||
beq $1, 1f
|
||||
ldq $27, 0($2)
|
||||
1: jsr $26, ($27), sys_gettimeofday
|
||||
ret_from_straced:
|
||||
ldgp $gp, 0($26)
|
||||
|
||||
/* check return.. */
|
||||
|
@ -757,11 +764,15 @@ sys_vfork:
|
|||
.ent sys_sigreturn
|
||||
sys_sigreturn:
|
||||
.prologue 0
|
||||
lda $9, ret_from_straced
|
||||
cmpult $26, $9, $9
|
||||
mov $sp, $17
|
||||
lda $18, -SWITCH_STACK_SIZE($sp)
|
||||
lda $sp, -SWITCH_STACK_SIZE($sp)
|
||||
jsr $26, do_sigreturn
|
||||
br $1, undo_switch_stack
|
||||
bne $9, 1f
|
||||
jsr $26, syscall_trace
|
||||
1: br $1, undo_switch_stack
|
||||
br ret_from_sys_call
|
||||
.end sys_sigreturn
|
||||
|
||||
|
@ -770,46 +781,18 @@ sys_sigreturn:
|
|||
.ent sys_rt_sigreturn
|
||||
sys_rt_sigreturn:
|
||||
.prologue 0
|
||||
lda $9, ret_from_straced
|
||||
cmpult $26, $9, $9
|
||||
mov $sp, $17
|
||||
lda $18, -SWITCH_STACK_SIZE($sp)
|
||||
lda $sp, -SWITCH_STACK_SIZE($sp)
|
||||
jsr $26, do_rt_sigreturn
|
||||
br $1, undo_switch_stack
|
||||
bne $9, 1f
|
||||
jsr $26, syscall_trace
|
||||
1: br $1, undo_switch_stack
|
||||
br ret_from_sys_call
|
||||
.end sys_rt_sigreturn
|
||||
|
||||
.align 4
|
||||
.globl sys_sigsuspend
|
||||
.ent sys_sigsuspend
|
||||
sys_sigsuspend:
|
||||
.prologue 0
|
||||
mov $sp, $17
|
||||
br $1, do_switch_stack
|
||||
mov $sp, $18
|
||||
subq $sp, 16, $sp
|
||||
stq $26, 0($sp)
|
||||
jsr $26, do_sigsuspend
|
||||
ldq $26, 0($sp)
|
||||
lda $sp, SWITCH_STACK_SIZE+16($sp)
|
||||
ret
|
||||
.end sys_sigsuspend
|
||||
|
||||
.align 4
|
||||
.globl sys_rt_sigsuspend
|
||||
.ent sys_rt_sigsuspend
|
||||
sys_rt_sigsuspend:
|
||||
.prologue 0
|
||||
mov $sp, $18
|
||||
br $1, do_switch_stack
|
||||
mov $sp, $19
|
||||
subq $sp, 16, $sp
|
||||
stq $26, 0($sp)
|
||||
jsr $26, do_rt_sigsuspend
|
||||
ldq $26, 0($sp)
|
||||
lda $sp, SWITCH_STACK_SIZE+16($sp)
|
||||
ret
|
||||
.end sys_rt_sigsuspend
|
||||
|
||||
.align 4
|
||||
.globl sys_sethae
|
||||
.ent sys_sethae
|
||||
|
|
|
@ -90,11 +90,13 @@ static int
|
|||
ev6_parse_cbox(u64 c_addr, u64 c1_syn, u64 c2_syn,
|
||||
u64 c_stat, u64 c_sts, int print)
|
||||
{
|
||||
char *sourcename[] = { "UNKNOWN", "UNKNOWN", "UNKNOWN",
|
||||
"MEMORY", "BCACHE", "DCACHE",
|
||||
"BCACHE PROBE", "BCACHE PROBE" };
|
||||
char *streamname[] = { "D", "I" };
|
||||
char *bitsname[] = { "SINGLE", "DOUBLE" };
|
||||
static const char * const sourcename[] = {
|
||||
"UNKNOWN", "UNKNOWN", "UNKNOWN",
|
||||
"MEMORY", "BCACHE", "DCACHE",
|
||||
"BCACHE PROBE", "BCACHE PROBE"
|
||||
};
|
||||
static const char * const streamname[] = { "D", "I" };
|
||||
static const char * const bitsname[] = { "SINGLE", "DOUBLE" };
|
||||
int status = MCHK_DISPOSITION_REPORT;
|
||||
int source = -1, stream = -1, bits = -1;
|
||||
|
||||
|
|
|
@ -109,7 +109,7 @@ marvel_print_err_cyc(u64 err_cyc)
|
|||
#define IO7__ERR_CYC__CYCLE__M (0x7)
|
||||
|
||||
printk("%s Packet In Error: %s\n"
|
||||
"%s Error in %s, cycle %ld%s%s\n",
|
||||
"%s Error in %s, cycle %lld%s%s\n",
|
||||
err_print_prefix,
|
||||
packet_desc[EXTRACT(err_cyc, IO7__ERR_CYC__PACKET)],
|
||||
err_print_prefix,
|
||||
|
@ -313,7 +313,7 @@ marvel_print_po7_ugbge_sym(u64 ugbge_sym)
|
|||
}
|
||||
|
||||
printk("%s Up Hose Garbage Symptom:\n"
|
||||
"%s Source Port: %ld - Dest PID: %ld - OpCode: %s\n",
|
||||
"%s Source Port: %lld - Dest PID: %lld - OpCode: %s\n",
|
||||
err_print_prefix,
|
||||
err_print_prefix,
|
||||
EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_SRC_PORT),
|
||||
|
@ -552,7 +552,7 @@ marvel_print_pox_spl_cmplt(u64 spl_cmplt)
|
|||
#define IO7__POX_SPLCMPLT__REM_BYTE_COUNT__M (0xfff)
|
||||
|
||||
printk("%s Split Completion Error:\n"
|
||||
"%s Source (Bus:Dev:Func): %ld:%ld:%ld\n",
|
||||
"%s Source (Bus:Dev:Func): %lld:%lld:%lld\n",
|
||||
err_print_prefix,
|
||||
err_print_prefix,
|
||||
EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__SOURCE_BUS),
|
||||
|
@ -589,22 +589,23 @@ marvel_print_pox_spl_cmplt(u64 spl_cmplt)
|
|||
static void
|
||||
marvel_print_pox_trans_sum(u64 trans_sum)
|
||||
{
|
||||
char *pcix_cmd[] = { "Interrupt Acknowledge",
|
||||
"Special Cycle",
|
||||
"I/O Read",
|
||||
"I/O Write",
|
||||
"Reserved",
|
||||
"Reserved / Device ID Message",
|
||||
"Memory Read",
|
||||
"Memory Write",
|
||||
"Reserved / Alias to Memory Read Block",
|
||||
"Reserved / Alias to Memory Write Block",
|
||||
"Configuration Read",
|
||||
"Configuration Write",
|
||||
"Memory Read Multiple / Split Completion",
|
||||
"Dual Address Cycle",
|
||||
"Memory Read Line / Memory Read Block",
|
||||
"Memory Write and Invalidate / Memory Write Block"
|
||||
static const char * const pcix_cmd[] = {
|
||||
"Interrupt Acknowledge",
|
||||
"Special Cycle",
|
||||
"I/O Read",
|
||||
"I/O Write",
|
||||
"Reserved",
|
||||
"Reserved / Device ID Message",
|
||||
"Memory Read",
|
||||
"Memory Write",
|
||||
"Reserved / Alias to Memory Read Block",
|
||||
"Reserved / Alias to Memory Write Block",
|
||||
"Configuration Read",
|
||||
"Configuration Write",
|
||||
"Memory Read Multiple / Split Completion",
|
||||
"Dual Address Cycle",
|
||||
"Memory Read Line / Memory Read Block",
|
||||
"Memory Write and Invalidate / Memory Write Block"
|
||||
};
|
||||
|
||||
#define IO7__POX_TRANSUM__PCI_ADDR__S (0)
|
||||
|
|
|
@ -75,8 +75,12 @@ titan_parse_p_serror(int which, u64 serror, int print)
|
|||
int status = MCHK_DISPOSITION_REPORT;
|
||||
|
||||
#ifdef CONFIG_VERBOSE_MCHECK
|
||||
char *serror_src[] = {"GPCI", "APCI", "AGP HP", "AGP LP"};
|
||||
char *serror_cmd[] = {"DMA Read", "DMA RMW", "SGTE Read", "Reserved"};
|
||||
static const char * const serror_src[] = {
|
||||
"GPCI", "APCI", "AGP HP", "AGP LP"
|
||||
};
|
||||
static const char * const serror_cmd[] = {
|
||||
"DMA Read", "DMA RMW", "SGTE Read", "Reserved"
|
||||
};
|
||||
#endif /* CONFIG_VERBOSE_MCHECK */
|
||||
|
||||
#define TITAN__PCHIP_SERROR__LOST_UECC (1UL << 0)
|
||||
|
@ -140,14 +144,15 @@ titan_parse_p_perror(int which, int port, u64 perror, int print)
|
|||
int status = MCHK_DISPOSITION_REPORT;
|
||||
|
||||
#ifdef CONFIG_VERBOSE_MCHECK
|
||||
char *perror_cmd[] = { "Interrupt Acknowledge", "Special Cycle",
|
||||
"I/O Read", "I/O Write",
|
||||
"Reserved", "Reserved",
|
||||
"Memory Read", "Memory Write",
|
||||
"Reserved", "Reserved",
|
||||
"Configuration Read", "Configuration Write",
|
||||
"Memory Read Multiple", "Dual Address Cycle",
|
||||
"Memory Read Line","Memory Write and Invalidate"
|
||||
static const char * const perror_cmd[] = {
|
||||
"Interrupt Acknowledge", "Special Cycle",
|
||||
"I/O Read", "I/O Write",
|
||||
"Reserved", "Reserved",
|
||||
"Memory Read", "Memory Write",
|
||||
"Reserved", "Reserved",
|
||||
"Configuration Read", "Configuration Write",
|
||||
"Memory Read Multiple", "Dual Address Cycle",
|
||||
"Memory Read Line", "Memory Write and Invalidate"
|
||||
};
|
||||
#endif /* CONFIG_VERBOSE_MCHECK */
|
||||
|
||||
|
@ -273,11 +278,11 @@ titan_parse_p_agperror(int which, u64 agperror, int print)
|
|||
int cmd, len;
|
||||
unsigned long addr;
|
||||
|
||||
char *agperror_cmd[] = { "Read (low-priority)", "Read (high-priority)",
|
||||
"Write (low-priority)",
|
||||
"Write (high-priority)",
|
||||
"Reserved", "Reserved",
|
||||
"Flush", "Fence"
|
||||
static const char * const agperror_cmd[] = {
|
||||
"Read (low-priority)", "Read (high-priority)",
|
||||
"Write (low-priority)", "Write (high-priority)",
|
||||
"Reserved", "Reserved",
|
||||
"Flush", "Fence"
|
||||
};
|
||||
#endif /* CONFIG_VERBOSE_MCHECK */
|
||||
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/unistd.h>
|
||||
|
@ -69,7 +68,6 @@ SYSCALL_DEFINE4(osf_set_program_attributes, unsigned long, text_start,
|
|||
{
|
||||
struct mm_struct *mm;
|
||||
|
||||
lock_kernel();
|
||||
mm = current->mm;
|
||||
mm->end_code = bss_start + bss_len;
|
||||
mm->start_brk = bss_start + bss_len;
|
||||
|
@ -78,7 +76,6 @@ SYSCALL_DEFINE4(osf_set_program_attributes, unsigned long, text_start,
|
|||
printk("set_program_attributes(%lx %lx %lx %lx)\n",
|
||||
text_start, text_len, bss_start, bss_len);
|
||||
#endif
|
||||
unlock_kernel();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -517,7 +514,6 @@ SYSCALL_DEFINE2(osf_proplist_syscall, enum pl_code, code,
|
|||
long error;
|
||||
int __user *min_buf_size_ptr;
|
||||
|
||||
lock_kernel();
|
||||
switch (code) {
|
||||
case PL_SET:
|
||||
if (get_user(error, &args->set.nbytes))
|
||||
|
@ -547,7 +543,6 @@ SYSCALL_DEFINE2(osf_proplist_syscall, enum pl_code, code,
|
|||
error = -EOPNOTSUPP;
|
||||
break;
|
||||
};
|
||||
unlock_kernel();
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -594,7 +589,7 @@ SYSCALL_DEFINE2(osf_sigstack, struct sigstack __user *, uss,
|
|||
|
||||
SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
|
||||
{
|
||||
char *sysinfo_table[] = {
|
||||
const char *sysinfo_table[] = {
|
||||
utsname()->sysname,
|
||||
utsname()->nodename,
|
||||
utsname()->release,
|
||||
|
@ -606,7 +601,7 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
|
|||
"dummy", /* secure RPC domain */
|
||||
};
|
||||
unsigned long offset;
|
||||
char *res;
|
||||
const char *res;
|
||||
long len, err = -EINVAL;
|
||||
|
||||
offset = command-1;
|
||||
|
|
|
@ -66,7 +66,7 @@ static int pci_mmap_resource(struct kobject *kobj,
|
|||
{
|
||||
struct pci_dev *pdev = to_pci_dev(container_of(kobj,
|
||||
struct device, kobj));
|
||||
struct resource *res = (struct resource *)attr->private;
|
||||
struct resource *res = attr->private;
|
||||
enum pci_mmap_state mmap_type;
|
||||
struct pci_bus_region bar;
|
||||
int i;
|
||||
|
|
|
@ -241,20 +241,20 @@ static inline unsigned long alpha_read_pmc(int idx)
|
|||
static int alpha_perf_event_set_period(struct perf_event *event,
|
||||
struct hw_perf_event *hwc, int idx)
|
||||
{
|
||||
long left = atomic64_read(&hwc->period_left);
|
||||
long left = local64_read(&hwc->period_left);
|
||||
long period = hwc->sample_period;
|
||||
int ret = 0;
|
||||
|
||||
if (unlikely(left <= -period)) {
|
||||
left = period;
|
||||
atomic64_set(&hwc->period_left, left);
|
||||
local64_set(&hwc->period_left, left);
|
||||
hwc->last_period = period;
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
if (unlikely(left <= 0)) {
|
||||
left += period;
|
||||
atomic64_set(&hwc->period_left, left);
|
||||
local64_set(&hwc->period_left, left);
|
||||
hwc->last_period = period;
|
||||
ret = 1;
|
||||
}
|
||||
|
@ -269,7 +269,7 @@ static int alpha_perf_event_set_period(struct perf_event *event,
|
|||
if (left > (long)alpha_pmu->pmc_max_period[idx])
|
||||
left = alpha_pmu->pmc_max_period[idx];
|
||||
|
||||
atomic64_set(&hwc->prev_count, (unsigned long)(-left));
|
||||
local64_set(&hwc->prev_count, (unsigned long)(-left));
|
||||
|
||||
alpha_write_pmc(idx, (unsigned long)(-left));
|
||||
|
||||
|
@ -300,10 +300,10 @@ static unsigned long alpha_perf_event_update(struct perf_event *event,
|
|||
long delta;
|
||||
|
||||
again:
|
||||
prev_raw_count = atomic64_read(&hwc->prev_count);
|
||||
prev_raw_count = local64_read(&hwc->prev_count);
|
||||
new_raw_count = alpha_read_pmc(idx);
|
||||
|
||||
if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
|
||||
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
|
||||
new_raw_count) != prev_raw_count)
|
||||
goto again;
|
||||
|
||||
|
@ -316,8 +316,8 @@ again:
|
|||
delta += alpha_pmu->pmc_max_period[idx] + 1;
|
||||
}
|
||||
|
||||
atomic64_add(delta, &event->count);
|
||||
atomic64_sub(delta, &hwc->period_left);
|
||||
local64_add(delta, &event->count);
|
||||
local64_sub(delta, &hwc->period_left);
|
||||
|
||||
return new_raw_count;
|
||||
}
|
||||
|
@ -636,7 +636,7 @@ static int __hw_perf_event_init(struct perf_event *event)
|
|||
if (!hwc->sample_period) {
|
||||
hwc->sample_period = alpha_pmu->pmc_max_period[0];
|
||||
hwc->last_period = hwc->sample_period;
|
||||
atomic64_set(&hwc->period_left, hwc->sample_period);
|
||||
local64_set(&hwc->period_left, hwc->sample_period);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -156,9 +156,6 @@ extern void SMC669_Init(int);
|
|||
/* es1888.c */
|
||||
extern void es1888_init(void);
|
||||
|
||||
/* ns87312.c */
|
||||
extern void ns87312_enable_ide(long ide_base);
|
||||
|
||||
/* ../lib/fpreg.c */
|
||||
extern void alpha_write_fp_reg (unsigned long reg, unsigned long val);
|
||||
extern unsigned long alpha_read_fp_reg (unsigned long reg);
|
||||
|
|
|
@ -144,8 +144,7 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act,
|
|||
/*
|
||||
* Atomically swap in the new signal mask, and wait for a signal.
|
||||
*/
|
||||
asmlinkage int
|
||||
do_sigsuspend(old_sigset_t mask, struct pt_regs *regs, struct switch_stack *sw)
|
||||
SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
|
||||
{
|
||||
mask &= _BLOCKABLE;
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
|
@ -154,41 +153,6 @@ do_sigsuspend(old_sigset_t mask, struct pt_regs *regs, struct switch_stack *sw)
|
|||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
|
||||
/* Indicate EINTR on return from any possible signal handler,
|
||||
which will not come back through here, but via sigreturn. */
|
||||
regs->r0 = EINTR;
|
||||
regs->r19 = 1;
|
||||
|
||||
current->state = TASK_INTERRUPTIBLE;
|
||||
schedule();
|
||||
set_thread_flag(TIF_RESTORE_SIGMASK);
|
||||
return -ERESTARTNOHAND;
|
||||
}
|
||||
|
||||
asmlinkage int
|
||||
do_rt_sigsuspend(sigset_t __user *uset, size_t sigsetsize,
|
||||
struct pt_regs *regs, struct switch_stack *sw)
|
||||
{
|
||||
sigset_t set;
|
||||
|
||||
/* XXX: Don't preclude handling different sized sigset_t's. */
|
||||
if (sigsetsize != sizeof(sigset_t))
|
||||
return -EINVAL;
|
||||
if (copy_from_user(&set, uset, sizeof(set)))
|
||||
return -EFAULT;
|
||||
|
||||
sigdelsetmask(&set, ~_BLOCKABLE);
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
current->saved_sigmask = current->blocked;
|
||||
current->blocked = set;
|
||||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
|
||||
/* Indicate EINTR on return from any possible signal handler,
|
||||
which will not come back through here, but via sigreturn. */
|
||||
regs->r0 = EINTR;
|
||||
regs->r19 = 1;
|
||||
|
||||
current->state = TASK_INTERRUPTIBLE;
|
||||
schedule();
|
||||
set_thread_flag(TIF_RESTORE_SIGMASK);
|
||||
|
@ -239,6 +203,8 @@ restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
|
|||
unsigned long usp;
|
||||
long i, err = __get_user(regs->pc, &sc->sc_pc);
|
||||
|
||||
current_thread_info()->restart_block.fn = do_no_restart_syscall;
|
||||
|
||||
sw->r26 = (unsigned long) ret_from_sys_call;
|
||||
|
||||
err |= __get_user(regs->r0, sc->sc_regs+0);
|
||||
|
@ -591,7 +557,6 @@ syscall_restart(unsigned long r0, unsigned long r19,
|
|||
regs->pc -= 4;
|
||||
break;
|
||||
case ERESTART_RESTARTBLOCK:
|
||||
current_thread_info()->restart_block.fn = do_no_restart_syscall;
|
||||
regs->r0 = EINTR;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -87,7 +87,7 @@ static int srm_env_proc_show(struct seq_file *m, void *v)
|
|||
srm_env_t *entry;
|
||||
char *page;
|
||||
|
||||
entry = (srm_env_t *)m->private;
|
||||
entry = m->private;
|
||||
page = (char *)__get_free_page(GFP_USER);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
#include "irq_impl.h"
|
||||
#include "pci_impl.h"
|
||||
#include "machvec_impl.h"
|
||||
|
||||
#include "pc873xx.h"
|
||||
|
||||
/* Note mask bit is true for DISABLED irqs. */
|
||||
static unsigned long cached_irq_mask = ~0UL;
|
||||
|
@ -235,18 +235,31 @@ cabriolet_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
|
|||
return COMMON_TABLE_LOOKUP;
|
||||
}
|
||||
|
||||
static inline void __init
|
||||
cabriolet_enable_ide(void)
|
||||
{
|
||||
if (pc873xx_probe() == -1) {
|
||||
printk(KERN_ERR "Probing for PC873xx Super IO chip failed.\n");
|
||||
} else {
|
||||
printk(KERN_INFO "Found %s Super IO chip at 0x%x\n",
|
||||
pc873xx_get_model(), pc873xx_get_base());
|
||||
|
||||
pc873xx_enable_ide();
|
||||
}
|
||||
}
|
||||
|
||||
static inline void __init
|
||||
cabriolet_init_pci(void)
|
||||
{
|
||||
common_init_pci();
|
||||
ns87312_enable_ide(0x398);
|
||||
cabriolet_enable_ide();
|
||||
}
|
||||
|
||||
static inline void __init
|
||||
cia_cab_init_pci(void)
|
||||
{
|
||||
cia_init_pci();
|
||||
ns87312_enable_ide(0x398);
|
||||
cabriolet_enable_ide();
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
#include "irq_impl.h"
|
||||
#include "pci_impl.h"
|
||||
#include "machvec_impl.h"
|
||||
|
||||
#include "pc873xx.h"
|
||||
|
||||
/* Note mask bit is true for DISABLED irqs. */
|
||||
static unsigned long cached_irq_mask[2] = { -1, -1 };
|
||||
|
@ -264,7 +264,14 @@ takara_init_pci(void)
|
|||
alpha_mv.pci_map_irq = takara_map_irq_srm;
|
||||
|
||||
cia_init_pci();
|
||||
ns87312_enable_ide(0x26e);
|
||||
|
||||
if (pc873xx_probe() == -1) {
|
||||
printk(KERN_ERR "Probing for PC873xx Super IO chip failed.\n");
|
||||
} else {
|
||||
printk(KERN_INFO "Found %s Super IO chip at 0x%x\n",
|
||||
pc873xx_get_model(), pc873xx_get_base());
|
||||
pc873xx_enable_ide();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -512,6 +512,9 @@ sys_call_table:
|
|||
.quad sys_pwritev
|
||||
.quad sys_rt_tgsigqueueinfo
|
||||
.quad sys_perf_event_open
|
||||
.quad sys_fanotify_init
|
||||
.quad sys_fanotify_mark /* 495 */
|
||||
.quad sys_prlimit64
|
||||
|
||||
.size sys_call_table, . - sys_call_table
|
||||
.type sys_call_table, @object
|
||||
|
|
|
@ -191,16 +191,16 @@ irqreturn_t timer_interrupt(int irq, void *dev)
|
|||
|
||||
write_sequnlock(&xtime_lock);
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
while (nticks--)
|
||||
update_process_times(user_mode(get_irq_regs()));
|
||||
#endif
|
||||
|
||||
if (test_perf_event_pending()) {
|
||||
clear_perf_event_pending();
|
||||
perf_event_do_pending();
|
||||
}
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
while (nticks--)
|
||||
update_process_times(user_mode(get_irq_regs()));
|
||||
#endif
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
#include <linux/sched.h>
|
||||
#include <linux/tty.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kallsyms.h>
|
||||
|
@ -623,7 +622,6 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
|
|||
return;
|
||||
}
|
||||
|
||||
lock_kernel();
|
||||
printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n",
|
||||
pc, va, opcode, reg);
|
||||
do_exit(SIGSEGV);
|
||||
|
@ -646,7 +644,6 @@ got_exception:
|
|||
* Yikes! No one to forward the exception to.
|
||||
* Since the registers are in a weird format, dump them ourselves.
|
||||
*/
|
||||
lock_kernel();
|
||||
|
||||
printk("%s(%d): unhandled unaligned exception\n",
|
||||
current->comm, task_pid_nr(current));
|
||||
|
|
|
@ -1576,96 +1576,6 @@ config AUTO_ZRELADDR
|
|||
0xf8000000. This assumes the zImage being placed in the first 128MB
|
||||
from start of memory.
|
||||
|
||||
config ZRELADDR
|
||||
hex "Physical address of the decompressed kernel image"
|
||||
depends on !AUTO_ZRELADDR
|
||||
default 0x00008000 if ARCH_BCMRING ||\
|
||||
ARCH_CNS3XXX ||\
|
||||
ARCH_DOVE ||\
|
||||
ARCH_EBSA110 ||\
|
||||
ARCH_FOOTBRIDGE ||\
|
||||
ARCH_INTEGRATOR ||\
|
||||
ARCH_IOP13XX ||\
|
||||
ARCH_IOP33X ||\
|
||||
ARCH_IXP2000 ||\
|
||||
ARCH_IXP23XX ||\
|
||||
ARCH_IXP4XX ||\
|
||||
ARCH_KIRKWOOD ||\
|
||||
ARCH_KS8695 ||\
|
||||
ARCH_LOKI ||\
|
||||
ARCH_MMP ||\
|
||||
ARCH_MV78XX0 ||\
|
||||
ARCH_NOMADIK ||\
|
||||
ARCH_NUC93X ||\
|
||||
ARCH_NS9XXX ||\
|
||||
ARCH_ORION5X ||\
|
||||
ARCH_SPEAR3XX ||\
|
||||
ARCH_SPEAR6XX ||\
|
||||
ARCH_U8500 ||\
|
||||
ARCH_VERSATILE ||\
|
||||
ARCH_W90X900
|
||||
default 0x08008000 if ARCH_MX1 ||\
|
||||
ARCH_SHARK
|
||||
default 0x10008000 if ARCH_MSM ||\
|
||||
ARCH_OMAP1 ||\
|
||||
ARCH_RPC
|
||||
default 0x20008000 if ARCH_S5P6440 ||\
|
||||
ARCH_S5P6442 ||\
|
||||
ARCH_S5PC100 ||\
|
||||
ARCH_S5PV210
|
||||
default 0x30008000 if ARCH_S3C2410 ||\
|
||||
ARCH_S3C2400 ||\
|
||||
ARCH_S3C2412 ||\
|
||||
ARCH_S3C2416 ||\
|
||||
ARCH_S3C2440 ||\
|
||||
ARCH_S3C2443
|
||||
default 0x40008000 if ARCH_STMP378X ||\
|
||||
ARCH_STMP37XX ||\
|
||||
ARCH_SH7372 ||\
|
||||
ARCH_SH7377 ||\
|
||||
ARCH_S5PV310
|
||||
default 0x50008000 if ARCH_S3C64XX ||\
|
||||
ARCH_SH7367
|
||||
default 0x60008000 if ARCH_VEXPRESS
|
||||
default 0x80008000 if ARCH_MX25 ||\
|
||||
ARCH_MX3 ||\
|
||||
ARCH_NETX ||\
|
||||
ARCH_OMAP2PLUS ||\
|
||||
ARCH_PNX4008
|
||||
default 0x90008000 if ARCH_MX5 ||\
|
||||
ARCH_MX91231
|
||||
default 0xa0008000 if ARCH_IOP32X ||\
|
||||
ARCH_PXA ||\
|
||||
MACH_MX27
|
||||
default 0xc0008000 if ARCH_LH7A40X ||\
|
||||
MACH_MX21
|
||||
default 0xf0008000 if ARCH_AAEC2000 ||\
|
||||
ARCH_L7200
|
||||
default 0xc0028000 if ARCH_CLPS711X
|
||||
default 0x70008000 if ARCH_AT91 && (ARCH_AT91CAP9 || ARCH_AT91SAM9G45)
|
||||
default 0x20008000 if ARCH_AT91 && !(ARCH_AT91CAP9 || ARCH_AT91SAM9G45)
|
||||
default 0xc0008000 if ARCH_DAVINCI && ARCH_DAVINCI_DA8XX
|
||||
default 0x80008000 if ARCH_DAVINCI && !ARCH_DAVINCI_DA8XX
|
||||
default 0x00008000 if ARCH_EP93XX && EP93XX_SDCE3_SYNC_PHYS_OFFSET
|
||||
default 0xc0008000 if ARCH_EP93XX && EP93XX_SDCE0_PHYS_OFFSET
|
||||
default 0xd0008000 if ARCH_EP93XX && EP93XX_SDCE1_PHYS_OFFSET
|
||||
default 0xe0008000 if ARCH_EP93XX && EP93XX_SDCE2_PHYS_OFFSET
|
||||
default 0xf0008000 if ARCH_EP93XX && EP93XX_SDCE3_ASYNC_PHYS_OFFSET
|
||||
default 0x00008000 if ARCH_GEMINI && GEMINI_MEM_SWAP
|
||||
default 0x10008000 if ARCH_GEMINI && !GEMINI_MEM_SWAP
|
||||
default 0x70008000 if ARCH_REALVIEW && REALVIEW_HIGH_PHYS_OFFSET
|
||||
default 0x00008000 if ARCH_REALVIEW && !REALVIEW_HIGH_PHYS_OFFSET
|
||||
default 0xc0208000 if ARCH_SA1100 && SA1111
|
||||
default 0xc0008000 if ARCH_SA1100 && !SA1111
|
||||
default 0x30108000 if ARCH_S3C2410 && PM_H1940
|
||||
default 0x28E08000 if ARCH_U300 && MACH_U300_SINGLE_RAM
|
||||
default 0x48008000 if ARCH_U300 && !MACH_U300_SINGLE_RAM
|
||||
help
|
||||
ZRELADDR is the physical address where the decompressed kernel
|
||||
image will be placed. ZRELADDR has to be specified when the
|
||||
assumption of AUTO_ZRELADDR is not valid, or when ZBOOT_ROM is
|
||||
selected.
|
||||
|
||||
endmenu
|
||||
|
||||
menu "CPU Power Management"
|
||||
|
|
|
@ -14,16 +14,18 @@
|
|||
MKIMAGE := $(srctree)/scripts/mkuboot.sh
|
||||
|
||||
ifneq ($(MACHINE),)
|
||||
-include $(srctree)/$(MACHINE)/Makefile.boot
|
||||
include $(srctree)/$(MACHINE)/Makefile.boot
|
||||
endif
|
||||
|
||||
# Note: the following conditions must always be true:
|
||||
# ZRELADDR == virt_to_phys(PAGE_OFFSET + TEXT_OFFSET)
|
||||
# PARAMS_PHYS must be within 4MB of ZRELADDR
|
||||
# INITRD_PHYS must be in RAM
|
||||
ZRELADDR := $(zreladdr-y)
|
||||
PARAMS_PHYS := $(params_phys-y)
|
||||
INITRD_PHYS := $(initrd_phys-y)
|
||||
|
||||
export INITRD_PHYS PARAMS_PHYS
|
||||
export ZRELADDR INITRD_PHYS PARAMS_PHYS
|
||||
|
||||
targets := Image zImage xipImage bootpImage uImage
|
||||
|
||||
|
@ -65,7 +67,7 @@ quiet_cmd_uimage = UIMAGE $@
|
|||
ifeq ($(CONFIG_ZBOOT_ROM),y)
|
||||
$(obj)/uImage: LOADADDR=$(CONFIG_ZBOOT_ROM_TEXT)
|
||||
else
|
||||
$(obj)/uImage: LOADADDR=$(CONFIG_ZRELADDR)
|
||||
$(obj)/uImage: LOADADDR=$(ZRELADDR)
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_THUMB2_KERNEL),y)
|
||||
|
|
|
@ -79,6 +79,10 @@ endif
|
|||
EXTRA_CFLAGS := -fpic -fno-builtin
|
||||
EXTRA_AFLAGS := -Wa,-march=all
|
||||
|
||||
# Supply ZRELADDR to the decompressor via a linker symbol.
|
||||
ifneq ($(CONFIG_AUTO_ZRELADDR),y)
|
||||
LDFLAGS_vmlinux := --defsym zreladdr=$(ZRELADDR)
|
||||
endif
|
||||
ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
|
||||
LDFLAGS_vmlinux += --be8
|
||||
endif
|
||||
|
|
|
@ -177,7 +177,7 @@ not_angel:
|
|||
and r4, pc, #0xf8000000
|
||||
add r4, r4, #TEXT_OFFSET
|
||||
#else
|
||||
ldr r4, =CONFIG_ZRELADDR
|
||||
ldr r4, =zreladdr
|
||||
#endif
|
||||
subs r0, r0, r1 @ calculate the delta offset
|
||||
|
||||
|
|
|
@ -263,6 +263,14 @@ static int it8152_pci_platform_notify_remove(struct device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
|
||||
{
|
||||
dev_dbg(dev, "%s: dma_addr %08x, size %08x\n",
|
||||
__func__, dma_addr, size);
|
||||
return (dev->bus == &pci_bus_type) &&
|
||||
((dma_addr + size - PHYS_OFFSET) >= SZ_64M);
|
||||
}
|
||||
|
||||
int __init it8152_pci_setup(int nr, struct pci_sys_data *sys)
|
||||
{
|
||||
it8152_io.start = IT8152_IO_BASE + 0x12000;
|
||||
|
|
|
@ -288,15 +288,7 @@ extern void dmabounce_unregister_dev(struct device *);
|
|||
* DMA access and 1 if the buffer needs to be bounced.
|
||||
*
|
||||
*/
|
||||
#ifdef CONFIG_SA1111
|
||||
extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
|
||||
#else
|
||||
static inline int dma_needs_bounce(struct device *dev, dma_addr_t addr,
|
||||
size_t size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The DMA API, implemented by dmabounce.c. See below for descriptions.
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* counter interrupts are regular interrupts and not an NMI. This
|
||||
* means that when we receive the interrupt we can call
|
||||
* perf_event_do_pending() that handles all of the work with
|
||||
* interrupts enabled.
|
||||
* interrupts disabled.
|
||||
*/
|
||||
static inline void
|
||||
set_perf_event_pending(void)
|
||||
|
|
|
@ -393,6 +393,9 @@
|
|||
#define __NR_perf_event_open (__NR_SYSCALL_BASE+364)
|
||||
#define __NR_recvmmsg (__NR_SYSCALL_BASE+365)
|
||||
#define __NR_accept4 (__NR_SYSCALL_BASE+366)
|
||||
#define __NR_fanotify_init (__NR_SYSCALL_BASE+367)
|
||||
#define __NR_fanotify_mark (__NR_SYSCALL_BASE+368)
|
||||
#define __NR_prlimit64 (__NR_SYSCALL_BASE+369)
|
||||
|
||||
/*
|
||||
* The following SWIs are ARM private.
|
||||
|
|
|
@ -376,6 +376,9 @@
|
|||
CALL(sys_perf_event_open)
|
||||
/* 365 */ CALL(sys_recvmmsg)
|
||||
CALL(sys_accept4)
|
||||
CALL(sys_fanotify_init)
|
||||
CALL(sys_fanotify_mark)
|
||||
CALL(sys_prlimit64)
|
||||
#ifndef syscalls_counted
|
||||
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
|
||||
#define syscalls_counted
|
||||
|
|
|
@ -418,11 +418,13 @@ ENDPROC(sys_clone_wrapper)
|
|||
|
||||
sys_sigreturn_wrapper:
|
||||
add r0, sp, #S_OFF
|
||||
mov why, #0 @ prevent syscall restart handling
|
||||
b sys_sigreturn
|
||||
ENDPROC(sys_sigreturn_wrapper)
|
||||
|
||||
sys_rt_sigreturn_wrapper:
|
||||
add r0, sp, #S_OFF
|
||||
mov why, #0 @ prevent syscall restart handling
|
||||
b sys_rt_sigreturn
|
||||
ENDPROC(sys_rt_sigreturn_wrapper)
|
||||
|
||||
|
|
|
@ -319,8 +319,8 @@ validate_event(struct cpu_hw_events *cpuc,
|
|||
{
|
||||
struct hw_perf_event fake_event = event->hw;
|
||||
|
||||
if (event->pmu && event->pmu != &pmu)
|
||||
return 0;
|
||||
if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
|
||||
return 1;
|
||||
|
||||
return armpmu->get_event_idx(cpuc, &fake_event) >= 0;
|
||||
}
|
||||
|
@ -1041,8 +1041,8 @@ armv6pmu_handle_irq(int irq_num,
|
|||
/*
|
||||
* Handle the pending perf events.
|
||||
*
|
||||
* Note: this call *must* be run with interrupts enabled. For
|
||||
* platforms that can have the PMU interrupts raised as a PMI, this
|
||||
* Note: this call *must* be run with interrupts disabled. For
|
||||
* platforms that can have the PMU interrupts raised as an NMI, this
|
||||
* will not work.
|
||||
*/
|
||||
perf_event_do_pending();
|
||||
|
@ -2017,8 +2017,8 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
|
|||
/*
|
||||
* Handle the pending perf events.
|
||||
*
|
||||
* Note: this call *must* be run with interrupts enabled. For
|
||||
* platforms that can have the PMU interrupts raised as a PMI, this
|
||||
* Note: this call *must* be run with interrupts disabled. For
|
||||
* platforms that can have the PMU interrupts raised as an NMI, this
|
||||
* will not work.
|
||||
*/
|
||||
perf_event_do_pending();
|
||||
|
|
|
@ -121,8 +121,8 @@ static struct clk ssc1_clk = {
|
|||
.pmc_mask = 1 << AT91SAM9G45_ID_SSC1,
|
||||
.type = CLK_TYPE_PERIPHERAL,
|
||||
};
|
||||
static struct clk tcb_clk = {
|
||||
.name = "tcb_clk",
|
||||
static struct clk tcb0_clk = {
|
||||
.name = "tcb0_clk",
|
||||
.pmc_mask = 1 << AT91SAM9G45_ID_TCB,
|
||||
.type = CLK_TYPE_PERIPHERAL,
|
||||
};
|
||||
|
@ -192,6 +192,14 @@ static struct clk ohci_clk = {
|
|||
.parent = &uhphs_clk,
|
||||
};
|
||||
|
||||
/* One additional fake clock for second TC block */
|
||||
static struct clk tcb1_clk = {
|
||||
.name = "tcb1_clk",
|
||||
.pmc_mask = 0,
|
||||
.type = CLK_TYPE_PERIPHERAL,
|
||||
.parent = &tcb0_clk,
|
||||
};
|
||||
|
||||
static struct clk *periph_clocks[] __initdata = {
|
||||
&pioA_clk,
|
||||
&pioB_clk,
|
||||
|
@ -208,7 +216,7 @@ static struct clk *periph_clocks[] __initdata = {
|
|||
&spi1_clk,
|
||||
&ssc0_clk,
|
||||
&ssc1_clk,
|
||||
&tcb_clk,
|
||||
&tcb0_clk,
|
||||
&pwm_clk,
|
||||
&tsc_clk,
|
||||
&dma_clk,
|
||||
|
@ -221,6 +229,7 @@ static struct clk *periph_clocks[] __initdata = {
|
|||
&mmc1_clk,
|
||||
// irq0
|
||||
&ohci_clk,
|
||||
&tcb1_clk,
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -46,7 +46,7 @@ static struct resource hdmac_resources[] = {
|
|||
.end = AT91_BASE_SYS + AT91_DMA + SZ_512 - 1,
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
[2] = {
|
||||
[1] = {
|
||||
.start = AT91SAM9G45_ID_DMA,
|
||||
.end = AT91SAM9G45_ID_DMA,
|
||||
.flags = IORESOURCE_IRQ,
|
||||
|
@ -835,9 +835,9 @@ static struct platform_device at91sam9g45_tcb1_device = {
|
|||
static void __init at91_add_device_tc(void)
|
||||
{
|
||||
/* this chip has one clock and irq for all six TC channels */
|
||||
at91_clock_associate("tcb_clk", &at91sam9g45_tcb0_device.dev, "t0_clk");
|
||||
at91_clock_associate("tcb0_clk", &at91sam9g45_tcb0_device.dev, "t0_clk");
|
||||
platform_device_register(&at91sam9g45_tcb0_device);
|
||||
at91_clock_associate("tcb_clk", &at91sam9g45_tcb1_device.dev, "t0_clk");
|
||||
at91_clock_associate("tcb1_clk", &at91sam9g45_tcb1_device.dev, "t0_clk");
|
||||
platform_device_register(&at91sam9g45_tcb1_device);
|
||||
}
|
||||
#else
|
||||
|
|
|
@ -93,11 +93,12 @@ static struct resource dm9000_resource[] = {
|
|||
.start = AT91_PIN_PC11,
|
||||
.end = AT91_PIN_PC11,
|
||||
.flags = IORESOURCE_IRQ
|
||||
| IORESOURCE_IRQ_LOWEDGE | IORESOURCE_IRQ_HIGHEDGE,
|
||||
}
|
||||
};
|
||||
|
||||
static struct dm9000_plat_data dm9000_platdata = {
|
||||
.flags = DM9000_PLATF_16BITONLY,
|
||||
.flags = DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM,
|
||||
};
|
||||
|
||||
static struct platform_device dm9000_device = {
|
||||
|
@ -167,17 +168,6 @@ static struct at91_udc_data __initdata ek_udc_data = {
|
|||
};
|
||||
|
||||
|
||||
/*
|
||||
* MCI (SD/MMC)
|
||||
*/
|
||||
static struct at91_mmc_data __initdata ek_mmc_data = {
|
||||
.wire4 = 1,
|
||||
// .det_pin = ... not connected
|
||||
// .wp_pin = ... not connected
|
||||
// .vcc_pin = ... not connected
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* NAND flash
|
||||
*/
|
||||
|
@ -246,6 +236,10 @@ static void __init ek_add_device_nand(void)
|
|||
at91_add_device_nand(&ek_nand_data);
|
||||
}
|
||||
|
||||
/*
|
||||
* SPI related devices
|
||||
*/
|
||||
#if defined(CONFIG_SPI_ATMEL) || defined(CONFIG_SPI_ATMEL_MODULE)
|
||||
|
||||
/*
|
||||
* ADS7846 Touchscreen
|
||||
|
@ -356,6 +350,19 @@ static struct spi_board_info ek_spi_devices[] = {
|
|||
#endif
|
||||
};
|
||||
|
||||
#else /* CONFIG_SPI_ATMEL_* */
|
||||
/* spi0 and mmc/sd share the same PIO pins: cannot be used at the same time */
|
||||
|
||||
/*
|
||||
* MCI (SD/MMC)
|
||||
* det_pin, wp_pin and vcc_pin are not connected
|
||||
*/
|
||||
static struct at91_mmc_data __initdata ek_mmc_data = {
|
||||
.wire4 = 1,
|
||||
};
|
||||
|
||||
#endif /* CONFIG_SPI_ATMEL_* */
|
||||
|
||||
|
||||
/*
|
||||
* LCD Controller
|
||||
|
|
|
@ -501,7 +501,8 @@ postcore_initcall(at91_clk_debugfs_init);
|
|||
int __init clk_register(struct clk *clk)
|
||||
{
|
||||
if (clk_is_peripheral(clk)) {
|
||||
clk->parent = &mck;
|
||||
if (!clk->parent)
|
||||
clk->parent = &mck;
|
||||
clk->mode = pmc_periph_mode;
|
||||
list_add_tail(&clk->node, &clocks);
|
||||
}
|
||||
|
|
|
@ -560,4 +560,4 @@ static int __init ep93xx_clock_init(void)
|
|||
clkdev_add_table(clocks, ARRAY_SIZE(clocks));
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(ep93xx_clock_init);
|
||||
postcore_initcall(ep93xx_clock_init);
|
||||
|
|
|
@ -215,7 +215,7 @@ struct imx_ssi_platform_data eukrea_mbimxsd_ssi_pdata = {
|
|||
* Add platform devices present on this baseboard and init
|
||||
* them from CPU side as far as required to use them later on
|
||||
*/
|
||||
void __init eukrea_mbimxsd_baseboard_init(void)
|
||||
void __init eukrea_mbimxsd25_baseboard_init(void)
|
||||
{
|
||||
if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads,
|
||||
ARRAY_SIZE(eukrea_mbimxsd_pads)))
|
||||
|
|
|
@ -147,8 +147,8 @@ static void __init eukrea_cpuimx25_init(void)
|
|||
if (!otg_mode_host)
|
||||
mxc_register_device(&otg_udc_device, &otg_device_pdata);
|
||||
|
||||
#ifdef CONFIG_MACH_EUKREA_MBIMXSD_BASEBOARD
|
||||
eukrea_mbimxsd_baseboard_init();
|
||||
#ifdef CONFIG_MACH_EUKREA_MBIMXSD25_BASEBOARD
|
||||
eukrea_mbimxsd25_baseboard_init();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -155,7 +155,7 @@ static unsigned long get_rate_arm(void)
|
|||
|
||||
aad = &clk_consumer[(pdr0 >> 16) & 0xf];
|
||||
if (aad->sel)
|
||||
fref = fref * 2 / 3;
|
||||
fref = fref * 3 / 4;
|
||||
|
||||
return fref / aad->arm;
|
||||
}
|
||||
|
@ -164,7 +164,7 @@ static unsigned long get_rate_ahb(struct clk *clk)
|
|||
{
|
||||
unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0);
|
||||
struct arm_ahb_div *aad;
|
||||
unsigned long fref = get_rate_mpll();
|
||||
unsigned long fref = get_rate_arm();
|
||||
|
||||
aad = &clk_consumer[(pdr0 >> 16) & 0xf];
|
||||
|
||||
|
@ -176,16 +176,11 @@ static unsigned long get_rate_ipg(struct clk *clk)
|
|||
return get_rate_ahb(NULL) >> 1;
|
||||
}
|
||||
|
||||
static unsigned long get_3_3_div(unsigned long in)
|
||||
{
|
||||
return (((in >> 3) & 0x7) + 1) * ((in & 0x7) + 1);
|
||||
}
|
||||
|
||||
static unsigned long get_rate_uart(struct clk *clk)
|
||||
{
|
||||
unsigned long pdr3 = __raw_readl(CCM_BASE + CCM_PDR3);
|
||||
unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4);
|
||||
unsigned long div = get_3_3_div(pdr4 >> 10);
|
||||
unsigned long div = ((pdr4 >> 10) & 0x3f) + 1;
|
||||
|
||||
if (pdr3 & (1 << 14))
|
||||
return get_rate_arm() / div;
|
||||
|
@ -216,7 +211,7 @@ static unsigned long get_rate_sdhc(struct clk *clk)
|
|||
break;
|
||||
}
|
||||
|
||||
return rate / get_3_3_div(div);
|
||||
return rate / (div + 1);
|
||||
}
|
||||
|
||||
static unsigned long get_rate_mshc(struct clk *clk)
|
||||
|
@ -270,7 +265,7 @@ static unsigned long get_rate_csi(struct clk *clk)
|
|||
else
|
||||
rate = get_rate_ppll();
|
||||
|
||||
return rate / get_3_3_div((pdr2 >> 16) & 0x3f);
|
||||
return rate / (((pdr2 >> 16) & 0x3f) + 1);
|
||||
}
|
||||
|
||||
static unsigned long get_rate_otg(struct clk *clk)
|
||||
|
@ -283,25 +278,51 @@ static unsigned long get_rate_otg(struct clk *clk)
|
|||
else
|
||||
rate = get_rate_ppll();
|
||||
|
||||
return rate / get_3_3_div((pdr4 >> 22) & 0x3f);
|
||||
return rate / (((pdr4 >> 22) & 0x3f) + 1);
|
||||
}
|
||||
|
||||
static unsigned long get_rate_ipg_per(struct clk *clk)
|
||||
{
|
||||
unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0);
|
||||
unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4);
|
||||
unsigned long div1, div2;
|
||||
unsigned long div;
|
||||
|
||||
if (pdr0 & (1 << 26)) {
|
||||
div1 = (pdr4 >> 19) & 0x7;
|
||||
div2 = (pdr4 >> 16) & 0x7;
|
||||
return get_rate_arm() / ((div1 + 1) * (div2 + 1));
|
||||
div = (pdr4 >> 16) & 0x3f;
|
||||
return get_rate_arm() / (div + 1);
|
||||
} else {
|
||||
div1 = (pdr0 >> 12) & 0x7;
|
||||
return get_rate_ahb(NULL) / div1;
|
||||
div = (pdr0 >> 12) & 0x7;
|
||||
return get_rate_ahb(NULL) / (div + 1);
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned long get_rate_hsp(struct clk *clk)
|
||||
{
|
||||
unsigned long hsp_podf = (__raw_readl(CCM_BASE + CCM_PDR0) >> 20) & 0x03;
|
||||
unsigned long fref = get_rate_mpll();
|
||||
|
||||
if (fref > 400 * 1000 * 1000) {
|
||||
switch (hsp_podf) {
|
||||
case 0:
|
||||
return fref >> 2;
|
||||
case 1:
|
||||
return fref >> 3;
|
||||
case 2:
|
||||
return fref / 3;
|
||||
}
|
||||
} else {
|
||||
switch (hsp_podf) {
|
||||
case 0:
|
||||
case 2:
|
||||
return fref / 3;
|
||||
case 1:
|
||||
return fref / 6;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int clk_cgr_enable(struct clk *clk)
|
||||
{
|
||||
u32 reg;
|
||||
|
@ -359,7 +380,7 @@ DEFINE_CLOCK(i2c1_clk, 0, CCM_CGR1, 10, get_rate_ipg_per, NULL);
|
|||
DEFINE_CLOCK(i2c2_clk, 1, CCM_CGR1, 12, get_rate_ipg_per, NULL);
|
||||
DEFINE_CLOCK(i2c3_clk, 2, CCM_CGR1, 14, get_rate_ipg_per, NULL);
|
||||
DEFINE_CLOCK(iomuxc_clk, 0, CCM_CGR1, 16, NULL, NULL);
|
||||
DEFINE_CLOCK(ipu_clk, 0, CCM_CGR1, 18, get_rate_ahb, NULL);
|
||||
DEFINE_CLOCK(ipu_clk, 0, CCM_CGR1, 18, get_rate_hsp, NULL);
|
||||
DEFINE_CLOCK(kpp_clk, 0, CCM_CGR1, 20, get_rate_ipg, NULL);
|
||||
DEFINE_CLOCK(mlb_clk, 0, CCM_CGR1, 22, get_rate_ahb, NULL);
|
||||
DEFINE_CLOCK(mshc_clk, 0, CCM_CGR1, 24, get_rate_mshc, NULL);
|
||||
|
@ -485,10 +506,10 @@ static struct clk_lookup lookups[] = {
|
|||
|
||||
int __init mx35_clocks_init()
|
||||
{
|
||||
unsigned int ll = 0;
|
||||
unsigned int cgr2 = 3 << 26, cgr3 = 0;
|
||||
|
||||
#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
|
||||
ll = (3 << 16);
|
||||
cgr2 |= 3 << 16;
|
||||
#endif
|
||||
|
||||
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
|
||||
|
@ -499,8 +520,20 @@ int __init mx35_clocks_init()
|
|||
__raw_writel((3 << 18), CCM_BASE + CCM_CGR0);
|
||||
__raw_writel((3 << 2) | (3 << 4) | (3 << 6) | (3 << 8) | (3 << 16),
|
||||
CCM_BASE + CCM_CGR1);
|
||||
__raw_writel((3 << 26) | ll, CCM_BASE + CCM_CGR2);
|
||||
__raw_writel(0, CCM_BASE + CCM_CGR3);
|
||||
|
||||
/*
|
||||
* Check if we came up in internal boot mode. If yes, we need some
|
||||
* extra clocks turned on, otherwise the MX35 boot ROM code will
|
||||
* hang after a watchdog reset.
|
||||
*/
|
||||
if (!(__raw_readl(CCM_BASE + CCM_RCSR) & (3 << 10))) {
|
||||
/* Additionally turn on UART1, SCC, and IIM clocks */
|
||||
cgr2 |= 3 << 16 | 3 << 4;
|
||||
cgr3 |= 3 << 2;
|
||||
}
|
||||
|
||||
__raw_writel(cgr2, CCM_BASE + CCM_CGR2);
|
||||
__raw_writel(cgr3, CCM_BASE + CCM_CGR3);
|
||||
|
||||
mxc_timer_init(&gpt_clk,
|
||||
MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), MX35_INT_GPT);
|
||||
|
|
|
@ -216,7 +216,7 @@ struct imx_ssi_platform_data eukrea_mbimxsd_ssi_pdata = {
|
|||
* Add platform devices present on this baseboard and init
|
||||
* them from CPU side as far as required to use them later on
|
||||
*/
|
||||
void __init eukrea_mbimxsd_baseboard_init(void)
|
||||
void __init eukrea_mbimxsd35_baseboard_init(void)
|
||||
{
|
||||
if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads,
|
||||
ARRAY_SIZE(eukrea_mbimxsd_pads)))
|
||||
|
|
|
@ -201,8 +201,8 @@ static void __init mxc_board_init(void)
|
|||
if (!otg_mode_host)
|
||||
mxc_register_device(&mxc_otg_udc_device, &otg_device_pdata);
|
||||
|
||||
#ifdef CONFIG_MACH_EUKREA_MBIMXSD_BASEBOARD
|
||||
eukrea_mbimxsd_baseboard_init();
|
||||
#ifdef CONFIG_MACH_EUKREA_MBIMXSD35_BASEBOARD
|
||||
eukrea_mbimxsd35_baseboard_init();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -56,7 +56,7 @@ static void _clk_ccgr_disable(struct clk *clk)
|
|||
{
|
||||
u32 reg;
|
||||
reg = __raw_readl(clk->enable_reg);
|
||||
reg &= ~(MXC_CCM_CCGRx_MOD_OFF << clk->enable_shift);
|
||||
reg &= ~(MXC_CCM_CCGRx_CG_MASK << clk->enable_shift);
|
||||
__raw_writel(reg, clk->enable_reg);
|
||||
|
||||
}
|
||||
|
|
|
@ -398,7 +398,7 @@ static int pxa_set_target(struct cpufreq_policy *policy,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static __init int pxa_cpufreq_init(struct cpufreq_policy *policy)
|
||||
static int pxa_cpufreq_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
int i;
|
||||
unsigned int freq;
|
||||
|
|
|
@ -204,7 +204,7 @@ static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static __init int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
|
||||
static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
|
||||
|
|
|
@ -71,10 +71,10 @@
|
|||
#define GPIO46_CI_DD_7 MFP_CFG_DRV(GPIO46, AF0, DS04X)
|
||||
#define GPIO47_CI_DD_8 MFP_CFG_DRV(GPIO47, AF1, DS04X)
|
||||
#define GPIO48_CI_DD_9 MFP_CFG_DRV(GPIO48, AF1, DS04X)
|
||||
#define GPIO52_CI_HSYNC MFP_CFG_DRV(GPIO52, AF0, DS04X)
|
||||
#define GPIO51_CI_VSYNC MFP_CFG_DRV(GPIO51, AF0, DS04X)
|
||||
#define GPIO49_CI_MCLK MFP_CFG_DRV(GPIO49, AF0, DS04X)
|
||||
#define GPIO50_CI_PCLK MFP_CFG_DRV(GPIO50, AF0, DS04X)
|
||||
#define GPIO51_CI_HSYNC MFP_CFG_DRV(GPIO51, AF0, DS04X)
|
||||
#define GPIO52_CI_VSYNC MFP_CFG_DRV(GPIO52, AF0, DS04X)
|
||||
|
||||
/* KEYPAD */
|
||||
#define GPIO3_KP_DKIN_6 MFP_CFG_LPM(GPIO3, AF2, FLOAT)
|
||||
|
|
|
@ -18,10 +18,11 @@
|
|||
#include <mach/map.h>
|
||||
#include <mach/gpio-bank-c.h>
|
||||
#include <mach/spi-clocks.h>
|
||||
#include <mach/irqs.h>
|
||||
|
||||
#include <plat/s3c64xx-spi.h>
|
||||
#include <plat/gpio-cfg.h>
|
||||
#include <plat/irqs.h>
|
||||
#include <plat/devs.h>
|
||||
|
||||
static char *spi_src_clks[] = {
|
||||
[S3C64XX_SPI_SRCCLK_PCLK] = "pclk",
|
||||
|
|
|
@ -30,73 +30,73 @@
|
|||
#include <plat/devs.h>
|
||||
#include <plat/regs-serial.h>
|
||||
|
||||
#define UCON S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK
|
||||
#define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB
|
||||
#define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE
|
||||
#define UCON (S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK)
|
||||
#define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB)
|
||||
#define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE)
|
||||
|
||||
static struct s3c2410_uartcfg real6410_uartcfgs[] __initdata = {
|
||||
[0] = {
|
||||
.hwport = 0,
|
||||
.flags = 0,
|
||||
.ucon = UCON,
|
||||
.ulcon = ULCON,
|
||||
.ufcon = UFCON,
|
||||
.hwport = 0,
|
||||
.flags = 0,
|
||||
.ucon = UCON,
|
||||
.ulcon = ULCON,
|
||||
.ufcon = UFCON,
|
||||
},
|
||||
[1] = {
|
||||
.hwport = 1,
|
||||
.flags = 0,
|
||||
.ucon = UCON,
|
||||
.ulcon = ULCON,
|
||||
.ufcon = UFCON,
|
||||
.hwport = 1,
|
||||
.flags = 0,
|
||||
.ucon = UCON,
|
||||
.ulcon = ULCON,
|
||||
.ufcon = UFCON,
|
||||
},
|
||||
[2] = {
|
||||
.hwport = 2,
|
||||
.flags = 0,
|
||||
.ucon = UCON,
|
||||
.ulcon = ULCON,
|
||||
.ufcon = UFCON,
|
||||
.hwport = 2,
|
||||
.flags = 0,
|
||||
.ucon = UCON,
|
||||
.ulcon = ULCON,
|
||||
.ufcon = UFCON,
|
||||
},
|
||||
[3] = {
|
||||
.hwport = 3,
|
||||
.flags = 0,
|
||||
.ucon = UCON,
|
||||
.ulcon = ULCON,
|
||||
.ufcon = UFCON,
|
||||
.hwport = 3,
|
||||
.flags = 0,
|
||||
.ucon = UCON,
|
||||
.ulcon = ULCON,
|
||||
.ufcon = UFCON,
|
||||
},
|
||||
};
|
||||
|
||||
/* DM9000AEP 10/100 ethernet controller */
|
||||
|
||||
static struct resource real6410_dm9k_resource[] = {
|
||||
[0] = {
|
||||
.start = S3C64XX_PA_XM0CSN1,
|
||||
.end = S3C64XX_PA_XM0CSN1 + 1,
|
||||
.flags = IORESOURCE_MEM
|
||||
},
|
||||
[1] = {
|
||||
.start = S3C64XX_PA_XM0CSN1 + 4,
|
||||
.end = S3C64XX_PA_XM0CSN1 + 5,
|
||||
.flags = IORESOURCE_MEM
|
||||
},
|
||||
[2] = {
|
||||
.start = S3C_EINT(7),
|
||||
.end = S3C_EINT(7),
|
||||
.flags = IORESOURCE_IRQ,
|
||||
}
|
||||
[0] = {
|
||||
.start = S3C64XX_PA_XM0CSN1,
|
||||
.end = S3C64XX_PA_XM0CSN1 + 1,
|
||||
.flags = IORESOURCE_MEM
|
||||
},
|
||||
[1] = {
|
||||
.start = S3C64XX_PA_XM0CSN1 + 4,
|
||||
.end = S3C64XX_PA_XM0CSN1 + 5,
|
||||
.flags = IORESOURCE_MEM
|
||||
},
|
||||
[2] = {
|
||||
.start = S3C_EINT(7),
|
||||
.end = S3C_EINT(7),
|
||||
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL
|
||||
}
|
||||
};
|
||||
|
||||
static struct dm9000_plat_data real6410_dm9k_pdata = {
|
||||
.flags = (DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM),
|
||||
.flags = (DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM),
|
||||
};
|
||||
|
||||
static struct platform_device real6410_device_eth = {
|
||||
.name = "dm9000",
|
||||
.id = -1,
|
||||
.num_resources = ARRAY_SIZE(real6410_dm9k_resource),
|
||||
.resource = real6410_dm9k_resource,
|
||||
.dev = {
|
||||
.platform_data = &real6410_dm9k_pdata,
|
||||
},
|
||||
.name = "dm9000",
|
||||
.id = -1,
|
||||
.num_resources = ARRAY_SIZE(real6410_dm9k_resource),
|
||||
.resource = real6410_dm9k_resource,
|
||||
.dev = {
|
||||
.platform_data = &real6410_dm9k_pdata,
|
||||
},
|
||||
};
|
||||
|
||||
static struct platform_device *real6410_devices[] __initdata = {
|
||||
|
@ -129,12 +129,12 @@ static void __init real6410_machine_init(void)
|
|||
/* set timing for nCS1 suitable for ethernet chip */
|
||||
|
||||
__raw_writel((0 << S3C64XX_SROM_BCX__PMC__SHIFT) |
|
||||
(6 << S3C64XX_SROM_BCX__TACP__SHIFT) |
|
||||
(4 << S3C64XX_SROM_BCX__TCAH__SHIFT) |
|
||||
(1 << S3C64XX_SROM_BCX__TCOH__SHIFT) |
|
||||
(13 << S3C64XX_SROM_BCX__TACC__SHIFT) |
|
||||
(4 << S3C64XX_SROM_BCX__TCOS__SHIFT) |
|
||||
(0 << S3C64XX_SROM_BCX__TACS__SHIFT), S3C64XX_SROM_BC1);
|
||||
(6 << S3C64XX_SROM_BCX__TACP__SHIFT) |
|
||||
(4 << S3C64XX_SROM_BCX__TCAH__SHIFT) |
|
||||
(1 << S3C64XX_SROM_BCX__TCOH__SHIFT) |
|
||||
(13 << S3C64XX_SROM_BCX__TACC__SHIFT) |
|
||||
(4 << S3C64XX_SROM_BCX__TCOS__SHIFT) |
|
||||
(0 << S3C64XX_SROM_BCX__TACS__SHIFT), S3C64XX_SROM_BC1);
|
||||
|
||||
platform_add_devices(real6410_devices, ARRAY_SIZE(real6410_devices));
|
||||
}
|
||||
|
|
|
@ -280,6 +280,24 @@ static struct clk init_clocks_disable[] = {
|
|||
.parent = &clk_hclk_dsys.clk,
|
||||
.enable = s5pv210_clk_ip0_ctrl,
|
||||
.ctrlbit = (1<<29),
|
||||
}, {
|
||||
.name = "fimc",
|
||||
.id = 0,
|
||||
.parent = &clk_hclk_dsys.clk,
|
||||
.enable = s5pv210_clk_ip0_ctrl,
|
||||
.ctrlbit = (1 << 24),
|
||||
}, {
|
||||
.name = "fimc",
|
||||
.id = 1,
|
||||
.parent = &clk_hclk_dsys.clk,
|
||||
.enable = s5pv210_clk_ip0_ctrl,
|
||||
.ctrlbit = (1 << 25),
|
||||
}, {
|
||||
.name = "fimc",
|
||||
.id = 2,
|
||||
.parent = &clk_hclk_dsys.clk,
|
||||
.enable = s5pv210_clk_ip0_ctrl,
|
||||
.ctrlbit = (1 << 26),
|
||||
}, {
|
||||
.name = "otg",
|
||||
.id = -1,
|
||||
|
@ -357,7 +375,7 @@ static struct clk init_clocks_disable[] = {
|
|||
.id = 1,
|
||||
.parent = &clk_pclk_psys.clk,
|
||||
.enable = s5pv210_clk_ip3_ctrl,
|
||||
.ctrlbit = (1<<8),
|
||||
.ctrlbit = (1 << 10),
|
||||
}, {
|
||||
.name = "i2c",
|
||||
.id = 2,
|
||||
|
|
|
@ -47,7 +47,7 @@ static struct map_desc s5pv210_iodesc[] __initdata = {
|
|||
{
|
||||
.virtual = (unsigned long)S5P_VA_SYSTIMER,
|
||||
.pfn = __phys_to_pfn(S5PV210_PA_SYSTIMER),
|
||||
.length = SZ_1M,
|
||||
.length = SZ_4K,
|
||||
.type = MT_DEVICE,
|
||||
}, {
|
||||
.virtual = (unsigned long)VA_VIC2,
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
#
|
||||
|
||||
# Common objects
|
||||
obj-y := timer.o console.o clock.o
|
||||
obj-y := timer.o console.o clock.o pm_runtime.o
|
||||
|
||||
# CPU objects
|
||||
obj-$(CONFIG_ARCH_SH7367) += setup-sh7367.o clock-sh7367.o intc-sh7367.o
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include <linux/platform_device.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/mfd/sh_mobile_sdhi.h>
|
||||
#include <linux/mfd/tmio.h>
|
||||
#include <linux/mmc/host.h>
|
||||
#include <linux/mtd/mtd.h>
|
||||
#include <linux/mtd/partitions.h>
|
||||
|
@ -39,6 +40,7 @@
|
|||
#include <linux/sh_clk.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/input.h>
|
||||
#include <linux/leds.h>
|
||||
#include <linux/input/sh_keysc.h>
|
||||
#include <linux/usb/r8a66597.h>
|
||||
|
||||
|
@ -307,6 +309,7 @@ static struct sh_mobile_sdhi_info sdhi1_info = {
|
|||
.dma_slave_tx = SHDMA_SLAVE_SDHI1_TX,
|
||||
.dma_slave_rx = SHDMA_SLAVE_SDHI1_RX,
|
||||
.tmio_ocr_mask = MMC_VDD_165_195,
|
||||
.tmio_flags = TMIO_MMC_WRPROTECT_DISABLE,
|
||||
};
|
||||
|
||||
static struct resource sdhi1_resources[] = {
|
||||
|
@ -558,7 +561,7 @@ static struct resource fsi_resources[] = {
|
|||
|
||||
static struct platform_device fsi_device = {
|
||||
.name = "sh_fsi2",
|
||||
.id = 0,
|
||||
.id = -1,
|
||||
.num_resources = ARRAY_SIZE(fsi_resources),
|
||||
.resource = fsi_resources,
|
||||
.dev = {
|
||||
|
@ -650,7 +653,44 @@ static struct platform_device hdmi_device = {
|
|||
},
|
||||
};
|
||||
|
||||
static struct gpio_led ap4evb_leds[] = {
|
||||
{
|
||||
.name = "led4",
|
||||
.gpio = GPIO_PORT185,
|
||||
.default_state = LEDS_GPIO_DEFSTATE_ON,
|
||||
},
|
||||
{
|
||||
.name = "led2",
|
||||
.gpio = GPIO_PORT186,
|
||||
.default_state = LEDS_GPIO_DEFSTATE_ON,
|
||||
},
|
||||
{
|
||||
.name = "led3",
|
||||
.gpio = GPIO_PORT187,
|
||||
.default_state = LEDS_GPIO_DEFSTATE_ON,
|
||||
},
|
||||
{
|
||||
.name = "led1",
|
||||
.gpio = GPIO_PORT188,
|
||||
.default_state = LEDS_GPIO_DEFSTATE_ON,
|
||||
}
|
||||
};
|
||||
|
||||
static struct gpio_led_platform_data ap4evb_leds_pdata = {
|
||||
.num_leds = ARRAY_SIZE(ap4evb_leds),
|
||||
.leds = ap4evb_leds,
|
||||
};
|
||||
|
||||
static struct platform_device leds_device = {
|
||||
.name = "leds-gpio",
|
||||
.id = 0,
|
||||
.dev = {
|
||||
.platform_data = &ap4evb_leds_pdata,
|
||||
},
|
||||
};
|
||||
|
||||
static struct platform_device *ap4evb_devices[] __initdata = {
|
||||
&leds_device,
|
||||
&nor_flash_device,
|
||||
&smc911x_device,
|
||||
&sdhi0_device,
|
||||
|
@ -840,20 +880,6 @@ static void __init ap4evb_init(void)
|
|||
gpio_request(GPIO_FN_CS5A, NULL);
|
||||
gpio_request(GPIO_FN_IRQ6_39, NULL);
|
||||
|
||||
/* enable LED 1 - 4 */
|
||||
gpio_request(GPIO_PORT185, NULL);
|
||||
gpio_request(GPIO_PORT186, NULL);
|
||||
gpio_request(GPIO_PORT187, NULL);
|
||||
gpio_request(GPIO_PORT188, NULL);
|
||||
gpio_direction_output(GPIO_PORT185, 1);
|
||||
gpio_direction_output(GPIO_PORT186, 1);
|
||||
gpio_direction_output(GPIO_PORT187, 1);
|
||||
gpio_direction_output(GPIO_PORT188, 1);
|
||||
gpio_export(GPIO_PORT185, 0);
|
||||
gpio_export(GPIO_PORT186, 0);
|
||||
gpio_export(GPIO_PORT187, 0);
|
||||
gpio_export(GPIO_PORT188, 0);
|
||||
|
||||
/* enable Debug switch (S6) */
|
||||
gpio_request(GPIO_PORT32, NULL);
|
||||
gpio_request(GPIO_PORT33, NULL);
|
||||
|
|
|
@ -286,7 +286,6 @@ static struct clk_ops pllc2_clk_ops = {
|
|||
|
||||
struct clk pllc2_clk = {
|
||||
.ops = &pllc2_clk_ops,
|
||||
.flags = CLK_ENABLE_ON_INIT,
|
||||
.parent = &extal1_div2_clk,
|
||||
.freq_table = pllc2_freq_table,
|
||||
.parent_table = pllc2_parent,
|
||||
|
@ -395,7 +394,7 @@ static struct clk div6_reparent_clks[DIV6_REPARENT_NR] = {
|
|||
|
||||
enum { MSTP001,
|
||||
MSTP131, MSTP130,
|
||||
MSTP129, MSTP128,
|
||||
MSTP129, MSTP128, MSTP127, MSTP126,
|
||||
MSTP118, MSTP117, MSTP116,
|
||||
MSTP106, MSTP101, MSTP100,
|
||||
MSTP223,
|
||||
|
@ -413,6 +412,8 @@ static struct clk mstp_clks[MSTP_NR] = {
|
|||
[MSTP130] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 30, 0), /* VEU2 */
|
||||
[MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* VEU1 */
|
||||
[MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* VEU0 */
|
||||
[MSTP127] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 27, 0), /* CEU */
|
||||
[MSTP126] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 26, 0), /* CSI2 */
|
||||
[MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX */
|
||||
[MSTP117] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 17, 0), /* LCDC1 */
|
||||
[MSTP116] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 16, 0), /* IIC0 */
|
||||
|
@ -428,7 +429,7 @@ static struct clk mstp_clks[MSTP_NR] = {
|
|||
[MSTP201] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */
|
||||
[MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */
|
||||
[MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */
|
||||
[MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, CLK_ENABLE_ON_INIT), /* FSIA */
|
||||
[MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, 0), /* FSIA */
|
||||
[MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */
|
||||
[MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */
|
||||
[MSTP314] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 14, 0), /* SDHI0 */
|
||||
|
@ -498,6 +499,8 @@ static struct clk_lookup lookups[] = {
|
|||
CLKDEV_DEV_ID("uio_pdrv_genirq.3", &mstp_clks[MSTP130]), /* VEU2 */
|
||||
CLKDEV_DEV_ID("uio_pdrv_genirq.2", &mstp_clks[MSTP129]), /* VEU1 */
|
||||
CLKDEV_DEV_ID("uio_pdrv_genirq.1", &mstp_clks[MSTP128]), /* VEU0 */
|
||||
CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), /* CEU */
|
||||
CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2 */
|
||||
CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */
|
||||
CLKDEV_DEV_ID("sh_mobile_lcdc_fb.1", &mstp_clks[MSTP117]), /* LCDC1 */
|
||||
CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* IIC0 */
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
/*
|
||||
* SH-Mobile Timer
|
||||
* SH-Mobile Clock Framework
|
||||
*
|
||||
* Copyright (C) 2010 Magnus Damm
|
||||
*
|
||||
* Used together with arch/arm/common/clkdev.c and drivers/sh/clk.c.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; version 2 of the License.
|
||||
|
|
|
@ -0,0 +1,169 @@
|
|||
/*
|
||||
* arch/arm/mach-shmobile/pm_runtime.c
|
||||
*
|
||||
* Runtime PM support code for SuperH Mobile ARM
|
||||
*
|
||||
* Copyright (C) 2009-2010 Magnus Damm
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/sh_clk.h>
|
||||
#include <linux/bitmap.h>
|
||||
|
||||
#ifdef CONFIG_PM_RUNTIME
|
||||
#define BIT_ONCE 0
|
||||
#define BIT_ACTIVE 1
|
||||
#define BIT_CLK_ENABLED 2
|
||||
|
||||
struct pm_runtime_data {
|
||||
unsigned long flags;
|
||||
struct clk *clk;
|
||||
};
|
||||
|
||||
static void __devres_release(struct device *dev, void *res)
|
||||
{
|
||||
struct pm_runtime_data *prd = res;
|
||||
|
||||
dev_dbg(dev, "__devres_release()\n");
|
||||
|
||||
if (test_bit(BIT_CLK_ENABLED, &prd->flags))
|
||||
clk_disable(prd->clk);
|
||||
|
||||
if (test_bit(BIT_ACTIVE, &prd->flags))
|
||||
clk_put(prd->clk);
|
||||
}
|
||||
|
||||
static struct pm_runtime_data *__to_prd(struct device *dev)
|
||||
{
|
||||
return devres_find(dev, __devres_release, NULL, NULL);
|
||||
}
|
||||
|
||||
static void platform_pm_runtime_init(struct device *dev,
|
||||
struct pm_runtime_data *prd)
|
||||
{
|
||||
if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags)) {
|
||||
prd->clk = clk_get(dev, NULL);
|
||||
if (!IS_ERR(prd->clk)) {
|
||||
set_bit(BIT_ACTIVE, &prd->flags);
|
||||
dev_info(dev, "clocks managed by runtime pm\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void platform_pm_runtime_bug(struct device *dev,
|
||||
struct pm_runtime_data *prd)
|
||||
{
|
||||
if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags))
|
||||
dev_err(dev, "runtime pm suspend before resume\n");
|
||||
}
|
||||
|
||||
int platform_pm_runtime_suspend(struct device *dev)
|
||||
{
|
||||
struct pm_runtime_data *prd = __to_prd(dev);
|
||||
|
||||
dev_dbg(dev, "platform_pm_runtime_suspend()\n");
|
||||
|
||||
platform_pm_runtime_bug(dev, prd);
|
||||
|
||||
if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
|
||||
clk_disable(prd->clk);
|
||||
clear_bit(BIT_CLK_ENABLED, &prd->flags);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int platform_pm_runtime_resume(struct device *dev)
|
||||
{
|
||||
struct pm_runtime_data *prd = __to_prd(dev);
|
||||
|
||||
dev_dbg(dev, "platform_pm_runtime_resume()\n");
|
||||
|
||||
platform_pm_runtime_init(dev, prd);
|
||||
|
||||
if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
|
||||
clk_enable(prd->clk);
|
||||
set_bit(BIT_CLK_ENABLED, &prd->flags);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int platform_pm_runtime_idle(struct device *dev)
|
||||
{
|
||||
/* suspend synchronously to disable clocks immediately */
|
||||
return pm_runtime_suspend(dev);
|
||||
}
|
||||
|
||||
static int platform_bus_notify(struct notifier_block *nb,
|
||||
unsigned long action, void *data)
|
||||
{
|
||||
struct device *dev = data;
|
||||
struct pm_runtime_data *prd;
|
||||
|
||||
dev_dbg(dev, "platform_bus_notify() %ld !\n", action);
|
||||
|
||||
if (action == BUS_NOTIFY_BIND_DRIVER) {
|
||||
prd = devres_alloc(__devres_release, sizeof(*prd), GFP_KERNEL);
|
||||
if (prd)
|
||||
devres_add(dev, prd);
|
||||
else
|
||||
dev_err(dev, "unable to alloc memory for runtime pm\n");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#else /* CONFIG_PM_RUNTIME */
|
||||
|
||||
static int platform_bus_notify(struct notifier_block *nb,
|
||||
unsigned long action, void *data)
|
||||
{
|
||||
struct device *dev = data;
|
||||
struct clk *clk;
|
||||
|
||||
dev_dbg(dev, "platform_bus_notify() %ld !\n", action);
|
||||
|
||||
switch (action) {
|
||||
case BUS_NOTIFY_BIND_DRIVER:
|
||||
clk = clk_get(dev, NULL);
|
||||
if (!IS_ERR(clk)) {
|
||||
clk_enable(clk);
|
||||
clk_put(clk);
|
||||
dev_info(dev, "runtime pm disabled, clock forced on\n");
|
||||
}
|
||||
break;
|
||||
case BUS_NOTIFY_UNBOUND_DRIVER:
|
||||
clk = clk_get(dev, NULL);
|
||||
if (!IS_ERR(clk)) {
|
||||
clk_disable(clk);
|
||||
clk_put(clk);
|
||||
dev_info(dev, "runtime pm disabled, clock forced off\n");
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PM_RUNTIME */
|
||||
|
||||
static struct notifier_block platform_bus_notifier = {
|
||||
.notifier_call = platform_bus_notify
|
||||
};
|
||||
|
||||
static int __init sh_pm_runtime_init(void)
|
||||
{
|
||||
bus_register_notifier(&platform_bus_type, &platform_bus_notifier);
|
||||
return 0;
|
||||
}
|
||||
core_initcall(sh_pm_runtime_init);
|
|
@ -398,7 +398,7 @@ config CPU_V6
|
|||
# ARMv6k
|
||||
config CPU_32v6K
|
||||
bool "Support ARM V6K processor extensions" if !SMP
|
||||
depends on CPU_V6
|
||||
depends on CPU_V6 || CPU_V7
|
||||
default y if SMP && !(ARCH_MX3 || ARCH_OMAP2)
|
||||
help
|
||||
Say Y here if your ARMv6 processor supports the 'K' extension.
|
||||
|
|
|
@ -229,6 +229,8 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
|
|||
}
|
||||
} while (size -= PAGE_SIZE);
|
||||
|
||||
dsb();
|
||||
|
||||
return (void *)c->vm_start;
|
||||
}
|
||||
return NULL;
|
||||
|
|
|
@ -43,6 +43,7 @@ config ARCH_MXC91231
|
|||
config ARCH_MX5
|
||||
bool "MX5-based"
|
||||
select CPU_V7
|
||||
select ARM_L1_CACHE_SHIFT_6
|
||||
help
|
||||
This enables support for systems based on the Freescale i.MX51 family
|
||||
|
||||
|
|
|
@ -37,9 +37,9 @@
|
|||
* mach-mx5/eukrea_mbimx51-baseboard.c for cpuimx51
|
||||
*/
|
||||
|
||||
extern void eukrea_mbimx25_baseboard_init(void);
|
||||
extern void eukrea_mbimxsd25_baseboard_init(void);
|
||||
extern void eukrea_mbimx27_baseboard_init(void);
|
||||
extern void eukrea_mbimx35_baseboard_init(void);
|
||||
extern void eukrea_mbimxsd35_baseboard_init(void);
|
||||
extern void eukrea_mbimx51_baseboard_init(void);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -164,8 +164,9 @@ int tzic_enable_wake(int is_idle)
|
|||
return -EAGAIN;
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
v = is_idle ? __raw_readl(TZIC_ENSET0(i)) : wakeup_intr[i];
|
||||
__raw_writel(v, TZIC_WAKEUP0(i));
|
||||
v = is_idle ? __raw_readl(tzic_base + TZIC_ENSET0(i)) :
|
||||
wakeup_intr[i];
|
||||
__raw_writel(v, tzic_base + TZIC_WAKEUP0(i));
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -176,7 +176,7 @@ static inline void __add_pwm(struct pwm_device *pwm)
|
|||
|
||||
static int __devinit pwm_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct platform_device_id *id = platform_get_device_id(pdev);
|
||||
const struct platform_device_id *id = platform_get_device_id(pdev);
|
||||
struct pwm_device *pwm, *secondary = NULL;
|
||||
struct resource *r;
|
||||
int ret = 0;
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/ioport.h>
|
||||
|
@ -18,7 +19,7 @@
|
|||
static struct resource s5p_fimc0_resource[] = {
|
||||
[0] = {
|
||||
.start = S5P_PA_FIMC0,
|
||||
.end = S5P_PA_FIMC0 + SZ_1M - 1,
|
||||
.end = S5P_PA_FIMC0 + SZ_4K - 1,
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
[1] = {
|
||||
|
@ -28,9 +29,15 @@ static struct resource s5p_fimc0_resource[] = {
|
|||
},
|
||||
};
|
||||
|
||||
static u64 s5p_fimc0_dma_mask = DMA_BIT_MASK(32);
|
||||
|
||||
struct platform_device s5p_device_fimc0 = {
|
||||
.name = "s5p-fimc",
|
||||
.id = 0,
|
||||
.num_resources = ARRAY_SIZE(s5p_fimc0_resource),
|
||||
.resource = s5p_fimc0_resource,
|
||||
.dev = {
|
||||
.dma_mask = &s5p_fimc0_dma_mask,
|
||||
.coherent_dma_mask = DMA_BIT_MASK(32),
|
||||
},
|
||||
};
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/ioport.h>
|
||||
|
@ -18,7 +19,7 @@
|
|||
static struct resource s5p_fimc1_resource[] = {
|
||||
[0] = {
|
||||
.start = S5P_PA_FIMC1,
|
||||
.end = S5P_PA_FIMC1 + SZ_1M - 1,
|
||||
.end = S5P_PA_FIMC1 + SZ_4K - 1,
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
[1] = {
|
||||
|
@ -28,9 +29,15 @@ static struct resource s5p_fimc1_resource[] = {
|
|||
},
|
||||
};
|
||||
|
||||
static u64 s5p_fimc1_dma_mask = DMA_BIT_MASK(32);
|
||||
|
||||
struct platform_device s5p_device_fimc1 = {
|
||||
.name = "s5p-fimc",
|
||||
.id = 1,
|
||||
.num_resources = ARRAY_SIZE(s5p_fimc1_resource),
|
||||
.resource = s5p_fimc1_resource,
|
||||
.dev = {
|
||||
.dma_mask = &s5p_fimc1_dma_mask,
|
||||
.coherent_dma_mask = DMA_BIT_MASK(32),
|
||||
},
|
||||
};
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/ioport.h>
|
||||
|
@ -18,7 +19,7 @@
|
|||
static struct resource s5p_fimc2_resource[] = {
|
||||
[0] = {
|
||||
.start = S5P_PA_FIMC2,
|
||||
.end = S5P_PA_FIMC2 + SZ_1M - 1,
|
||||
.end = S5P_PA_FIMC2 + SZ_4K - 1,
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
[1] = {
|
||||
|
@ -28,9 +29,15 @@ static struct resource s5p_fimc2_resource[] = {
|
|||
},
|
||||
};
|
||||
|
||||
static u64 s5p_fimc2_dma_mask = DMA_BIT_MASK(32);
|
||||
|
||||
struct platform_device s5p_device_fimc2 = {
|
||||
.name = "s5p-fimc",
|
||||
.id = 2,
|
||||
.num_resources = ARRAY_SIZE(s5p_fimc2_resource),
|
||||
.resource = s5p_fimc2_resource,
|
||||
.dev = {
|
||||
.dma_mask = &s5p_fimc2_dma_mask,
|
||||
.coherent_dma_mask = DMA_BIT_MASK(32),
|
||||
},
|
||||
};
|
||||
|
|
|
@ -273,13 +273,13 @@ s5p_gpio_drvstr_t s5p_gpio_get_drvstr(unsigned int pin)
|
|||
if (!chip)
|
||||
return -EINVAL;
|
||||
|
||||
off = chip->chip.base - pin;
|
||||
off = pin - chip->chip.base;
|
||||
shift = off * 2;
|
||||
reg = chip->base + 0x0C;
|
||||
|
||||
drvstr = __raw_readl(reg);
|
||||
drvstr = 0xffff & (0x3 << shift);
|
||||
drvstr = drvstr >> shift;
|
||||
drvstr &= 0x3;
|
||||
|
||||
return (__force s5p_gpio_drvstr_t)drvstr;
|
||||
}
|
||||
|
@ -296,11 +296,12 @@ int s5p_gpio_set_drvstr(unsigned int pin, s5p_gpio_drvstr_t drvstr)
|
|||
if (!chip)
|
||||
return -EINVAL;
|
||||
|
||||
off = chip->chip.base - pin;
|
||||
off = pin - chip->chip.base;
|
||||
shift = off * 2;
|
||||
reg = chip->base + 0x0C;
|
||||
|
||||
tmp = __raw_readl(reg);
|
||||
tmp &= ~(0x3 << shift);
|
||||
tmp |= drvstr << shift;
|
||||
|
||||
__raw_writel(tmp, reg);
|
||||
|
|
|
@ -143,12 +143,12 @@ extern s3c_gpio_pull_t s3c_gpio_getpull(unsigned int pin);
|
|||
/* Define values for the drvstr available for each gpio pin.
|
||||
*
|
||||
* These values control the value of the output signal driver strength,
|
||||
* configurable on most pins on the S5C series.
|
||||
* configurable on most pins on the S5P series.
|
||||
*/
|
||||
#define S5P_GPIO_DRVSTR_LV1 ((__force s5p_gpio_drvstr_t)0x00)
|
||||
#define S5P_GPIO_DRVSTR_LV2 ((__force s5p_gpio_drvstr_t)0x01)
|
||||
#define S5P_GPIO_DRVSTR_LV3 ((__force s5p_gpio_drvstr_t)0x10)
|
||||
#define S5P_GPIO_DRVSTR_LV4 ((__force s5p_gpio_drvstr_t)0x11)
|
||||
#define S5P_GPIO_DRVSTR_LV1 ((__force s5p_gpio_drvstr_t)0x0)
|
||||
#define S5P_GPIO_DRVSTR_LV2 ((__force s5p_gpio_drvstr_t)0x2)
|
||||
#define S5P_GPIO_DRVSTR_LV3 ((__force s5p_gpio_drvstr_t)0x1)
|
||||
#define S5P_GPIO_DRVSTR_LV4 ((__force s5p_gpio_drvstr_t)0x3)
|
||||
|
||||
/**
|
||||
* s5c_gpio_get_drvstr() - get the driver streght value of a gpio pin
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
#
|
||||
# http://www.arm.linux.org.uk/developer/machines/?action=new
|
||||
#
|
||||
# Last update: Mon Jul 12 21:10:14 2010
|
||||
# Last update: Thu Sep 9 22:43:01 2010
|
||||
#
|
||||
# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
|
||||
#
|
||||
|
@ -2622,7 +2622,7 @@ kraken MACH_KRAKEN KRAKEN 2634
|
|||
gw2388 MACH_GW2388 GW2388 2635
|
||||
jadecpu MACH_JADECPU JADECPU 2636
|
||||
carlisle MACH_CARLISLE CARLISLE 2637
|
||||
lux_sf9 MACH_LUX_SFT9 LUX_SFT9 2638
|
||||
lux_sf9 MACH_LUX_SF9 LUX_SF9 2638
|
||||
nemid_tb MACH_NEMID_TB NEMID_TB 2639
|
||||
terrier MACH_TERRIER TERRIER 2640
|
||||
turbot MACH_TURBOT TURBOT 2641
|
||||
|
@ -2950,3 +2950,97 @@ davinci_dm365_dvr MACH_DAVINCI_DM365_DVR DAVINCI_DM365_DVR 2963
|
|||
netviz MACH_NETVIZ NETVIZ 2964
|
||||
flexibity MACH_FLEXIBITY FLEXIBITY 2965
|
||||
wlan_computer MACH_WLAN_COMPUTER WLAN_COMPUTER 2966
|
||||
lpc24xx MACH_LPC24XX LPC24XX 2967
|
||||
spica MACH_SPICA SPICA 2968
|
||||
gpsdisplay MACH_GPSDISPLAY GPSDISPLAY 2969
|
||||
bipnet MACH_BIPNET BIPNET 2970
|
||||
overo_ctu_inertial MACH_OVERO_CTU_INERTIAL OVERO_CTU_INERTIAL 2971
|
||||
davinci_dm355_mmm MACH_DAVINCI_DM355_MMM DAVINCI_DM355_MMM 2972
|
||||
pc9260_v2 MACH_PC9260_V2 PC9260_V2 2973
|
||||
ptx7545 MACH_PTX7545 PTX7545 2974
|
||||
tm_efdc MACH_TM_EFDC TM_EFDC 2975
|
||||
omap3_waldo1 MACH_OMAP3_WALDO1 OMAP3_WALDO1 2977
|
||||
flyer MACH_FLYER FLYER 2978
|
||||
tornado3240 MACH_TORNADO3240 TORNADO3240 2979
|
||||
soli_01 MACH_SOLI_01 SOLI_01 2980
|
||||
omapl138_europalc MACH_OMAPL138_EUROPALC OMAPL138_EUROPALC 2981
|
||||
helios_v1 MACH_HELIOS_V1 HELIOS_V1 2982
|
||||
netspace_lite_v2 MACH_NETSPACE_LITE_V2 NETSPACE_LITE_V2 2983
|
||||
ssc MACH_SSC SSC 2984
|
||||
premierwave_en MACH_PREMIERWAVE_EN PREMIERWAVE_EN 2985
|
||||
wasabi MACH_WASABI WASABI 2986
|
||||
vivow MACH_VIVOW VIVOW 2987
|
||||
mx50_rdp MACH_MX50_RDP MX50_RDP 2988
|
||||
universal MACH_UNIVERSAL UNIVERSAL 2989
|
||||
real6410 MACH_REAL6410 REAL6410 2990
|
||||
spx_sakura MACH_SPX_SAKURA SPX_SAKURA 2991
|
||||
ij3k_2440 MACH_IJ3K_2440 IJ3K_2440 2992
|
||||
omap3_bc10 MACH_OMAP3_BC10 OMAP3_BC10 2993
|
||||
thebe MACH_THEBE THEBE 2994
|
||||
rv082 MACH_RV082 RV082 2995
|
||||
armlguest MACH_ARMLGUEST ARMLGUEST 2996
|
||||
tjinc1000 MACH_TJINC1000 TJINC1000 2997
|
||||
dockstar MACH_DOCKSTAR DOCKSTAR 2998
|
||||
ax8008 MACH_AX8008 AX8008 2999
|
||||
gnet_sgce MACH_GNET_SGCE GNET_SGCE 3000
|
||||
pxwnas_500_1000 MACH_PXWNAS_500_1000 PXWNAS_500_1000 3001
|
||||
ea20 MACH_EA20 EA20 3002
|
||||
awm2 MACH_AWM2 AWM2 3003
|
||||
ti8148evm MACH_TI8148EVM TI8148EVM 3004
|
||||
tegra_seaboard MACH_TEGRA_SEABOARD TEGRA_SEABOARD 3005
|
||||
linkstation_chlv2 MACH_LINKSTATION_CHLV2 LINKSTATION_CHLV2 3006
|
||||
tera_pro2_rack MACH_TERA_PRO2_RACK TERA_PRO2_RACK 3007
|
||||
rubys MACH_RUBYS RUBYS 3008
|
||||
aquarius MACH_AQUARIUS AQUARIUS 3009
|
||||
mx53_ard MACH_MX53_ARD MX53_ARD 3010
|
||||
mx53_smd MACH_MX53_SMD MX53_SMD 3011
|
||||
lswxl MACH_LSWXL LSWXL 3012
|
||||
dove_avng_v3 MACH_DOVE_AVNG_V3 DOVE_AVNG_V3 3013
|
||||
sdi_ess_9263 MACH_SDI_ESS_9263 SDI_ESS_9263 3014
|
||||
jocpu550 MACH_JOCPU550 JOCPU550 3015
|
||||
msm8x60_rumi3 MACH_MSM8X60_RUMI3 MSM8X60_RUMI3 3016
|
||||
msm8x60_ffa MACH_MSM8X60_FFA MSM8X60_FFA 3017
|
||||
yanomami MACH_YANOMAMI YANOMAMI 3018
|
||||
gta04 MACH_GTA04 GTA04 3019
|
||||
cm_a510 MACH_CM_A510 CM_A510 3020
|
||||
omap3_rfs200 MACH_OMAP3_RFS200 OMAP3_RFS200 3021
|
||||
kx33xx MACH_KX33XX KX33XX 3022
|
||||
ptx7510 MACH_PTX7510 PTX7510 3023
|
||||
top9000 MACH_TOP9000 TOP9000 3024
|
||||
teenote MACH_TEENOTE TEENOTE 3025
|
||||
ts3 MACH_TS3 TS3 3026
|
||||
a0 MACH_A0 A0 3027
|
||||
fsm9xxx_surf MACH_FSM9XXX_SURF FSM9XXX_SURF 3028
|
||||
fsm9xxx_ffa MACH_FSM9XXX_FFA FSM9XXX_FFA 3029
|
||||
frrhwcdma60w MACH_FRRHWCDMA60W FRRHWCDMA60W 3030
|
||||
remus MACH_REMUS REMUS 3031
|
||||
at91cap7xdk MACH_AT91CAP7XDK AT91CAP7XDK 3032
|
||||
at91cap7stk MACH_AT91CAP7STK AT91CAP7STK 3033
|
||||
kt_sbc_sam9_1 MACH_KT_SBC_SAM9_1 KT_SBC_SAM9_1 3034
|
||||
oratisrouter MACH_ORATISROUTER ORATISROUTER 3035
|
||||
armada_xp_db MACH_ARMADA_XP_DB ARMADA_XP_DB 3036
|
||||
spdm MACH_SPDM SPDM 3037
|
||||
gtib MACH_GTIB GTIB 3038
|
||||
dgm3240 MACH_DGM3240 DGM3240 3039
|
||||
atlas_i_lpe MACH_ATLAS_I_LPE ATLAS_I_LPE 3040
|
||||
htcmega MACH_HTCMEGA HTCMEGA 3041
|
||||
tricorder MACH_TRICORDER TRICORDER 3042
|
||||
tx28 MACH_TX28 TX28 3043
|
||||
bstbrd MACH_BSTBRD BSTBRD 3044
|
||||
pwb3090 MACH_PWB3090 PWB3090 3045
|
||||
idea6410 MACH_IDEA6410 IDEA6410 3046
|
||||
qbc9263 MACH_QBC9263 QBC9263 3047
|
||||
borabora MACH_BORABORA BORABORA 3048
|
||||
valdez MACH_VALDEZ VALDEZ 3049
|
||||
ls9g20 MACH_LS9G20 LS9G20 3050
|
||||
mios_v1 MACH_MIOS_V1 MIOS_V1 3051
|
||||
s5pc110_crespo MACH_S5PC110_CRESPO S5PC110_CRESPO 3052
|
||||
controltek9g20 MACH_CONTROLTEK9G20 CONTROLTEK9G20 3053
|
||||
tin307 MACH_TIN307 TIN307 3054
|
||||
tin510 MACH_TIN510 TIN510 3055
|
||||
bluecheese MACH_BLUECHEESE BLUECHEESE 3057
|
||||
tem3x30 MACH_TEM3X30 TEM3X30 3058
|
||||
harvest_desoto MACH_HARVEST_DESOTO HARVEST_DESOTO 3059
|
||||
msm8x60_qrdc MACH_MSM8X60_QRDC MSM8X60_QRDC 3060
|
||||
spear900 MACH_SPEAR900 SPEAR900 3061
|
||||
pcontrol_g20 MACH_PCONTROL_G20 PCONTROL_G20 3062
|
||||
|
|
|
@ -121,6 +121,9 @@ static int restore_sigcontext(struct sigcontext __user *sc, int *_gr8)
|
|||
struct user_context *user = current->thread.user;
|
||||
unsigned long tbr, psr;
|
||||
|
||||
/* Always make any pending restarted system calls return -EINTR */
|
||||
current_thread_info()->restart_block.fn = do_no_restart_syscall;
|
||||
|
||||
tbr = user->i.tbr;
|
||||
psr = user->i.psr;
|
||||
if (copy_from_user(user, &sc->sc_context, sizeof(sc->sc_context)))
|
||||
|
@ -250,6 +253,8 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set)
|
|||
struct sigframe __user *frame;
|
||||
int rsig;
|
||||
|
||||
set_fs(USER_DS);
|
||||
|
||||
frame = get_sigframe(ka, sizeof(*frame));
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
|
||||
|
@ -293,22 +298,23 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set)
|
|||
(unsigned long) (frame->retcode + 2));
|
||||
}
|
||||
|
||||
/* set up registers for signal handler */
|
||||
__frame->sp = (unsigned long) frame;
|
||||
__frame->lr = (unsigned long) &frame->retcode;
|
||||
__frame->gr8 = sig;
|
||||
|
||||
/* Set up registers for the signal handler */
|
||||
if (current->personality & FDPIC_FUNCPTRS) {
|
||||
struct fdpic_func_descriptor __user *funcptr =
|
||||
(struct fdpic_func_descriptor __user *) ka->sa.sa_handler;
|
||||
__get_user(__frame->pc, &funcptr->text);
|
||||
__get_user(__frame->gr15, &funcptr->GOT);
|
||||
struct fdpic_func_descriptor desc;
|
||||
if (copy_from_user(&desc, funcptr, sizeof(desc)))
|
||||
goto give_sigsegv;
|
||||
__frame->pc = desc.text;
|
||||
__frame->gr15 = desc.GOT;
|
||||
} else {
|
||||
__frame->pc = (unsigned long) ka->sa.sa_handler;
|
||||
__frame->gr15 = 0;
|
||||
}
|
||||
|
||||
set_fs(USER_DS);
|
||||
__frame->sp = (unsigned long) frame;
|
||||
__frame->lr = (unsigned long) &frame->retcode;
|
||||
__frame->gr8 = sig;
|
||||
|
||||
/* the tracer may want to single-step inside the handler */
|
||||
if (test_thread_flag(TIF_SINGLESTEP))
|
||||
|
@ -323,7 +329,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set)
|
|||
return 0;
|
||||
|
||||
give_sigsegv:
|
||||
force_sig(SIGSEGV, current);
|
||||
force_sigsegv(sig, current);
|
||||
return -EFAULT;
|
||||
|
||||
} /* end setup_frame() */
|
||||
|
@ -338,6 +344,8 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|||
struct rt_sigframe __user *frame;
|
||||
int rsig;
|
||||
|
||||
set_fs(USER_DS);
|
||||
|
||||
frame = get_sigframe(ka, sizeof(*frame));
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
|
||||
|
@ -392,22 +400,23 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|||
}
|
||||
|
||||
/* Set up registers for signal handler */
|
||||
__frame->sp = (unsigned long) frame;
|
||||
__frame->lr = (unsigned long) &frame->retcode;
|
||||
__frame->gr8 = sig;
|
||||
__frame->gr9 = (unsigned long) &frame->info;
|
||||
|
||||
if (current->personality & FDPIC_FUNCPTRS) {
|
||||
struct fdpic_func_descriptor __user *funcptr =
|
||||
(struct fdpic_func_descriptor __user *) ka->sa.sa_handler;
|
||||
__get_user(__frame->pc, &funcptr->text);
|
||||
__get_user(__frame->gr15, &funcptr->GOT);
|
||||
struct fdpic_func_descriptor desc;
|
||||
if (copy_from_user(&desc, funcptr, sizeof(desc)))
|
||||
goto give_sigsegv;
|
||||
__frame->pc = desc.text;
|
||||
__frame->gr15 = desc.GOT;
|
||||
} else {
|
||||
__frame->pc = (unsigned long) ka->sa.sa_handler;
|
||||
__frame->gr15 = 0;
|
||||
}
|
||||
|
||||
set_fs(USER_DS);
|
||||
__frame->sp = (unsigned long) frame;
|
||||
__frame->lr = (unsigned long) &frame->retcode;
|
||||
__frame->gr8 = sig;
|
||||
__frame->gr9 = (unsigned long) &frame->info;
|
||||
|
||||
/* the tracer may want to single-step inside the handler */
|
||||
if (test_thread_flag(TIF_SINGLESTEP))
|
||||
|
@ -422,7 +431,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|||
return 0;
|
||||
|
||||
give_sigsegv:
|
||||
force_sig(SIGSEGV, current);
|
||||
force_sigsegv(sig, current);
|
||||
return -EFAULT;
|
||||
|
||||
} /* end setup_rt_frame() */
|
||||
|
@ -437,7 +446,7 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
|
|||
int ret;
|
||||
|
||||
/* Are we from a system call? */
|
||||
if (in_syscall(__frame)) {
|
||||
if (__frame->syscallno != -1) {
|
||||
/* If so, check system call restarting.. */
|
||||
switch (__frame->gr8) {
|
||||
case -ERESTART_RESTARTBLOCK:
|
||||
|
@ -456,6 +465,7 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
|
|||
__frame->gr8 = __frame->orig_gr8;
|
||||
__frame->pc -= 4;
|
||||
}
|
||||
__frame->syscallno = -1;
|
||||
}
|
||||
|
||||
/* Set up the stack frame */
|
||||
|
@ -538,10 +548,11 @@ no_signal:
|
|||
break;
|
||||
|
||||
case -ERESTART_RESTARTBLOCK:
|
||||
__frame->gr8 = __NR_restart_syscall;
|
||||
__frame->gr7 = __NR_restart_syscall;
|
||||
__frame->pc -= 4;
|
||||
break;
|
||||
}
|
||||
__frame->syscallno = -1;
|
||||
}
|
||||
|
||||
/* if there's no signal to deliver, we just put the saved sigmask
|
||||
|
|
|
@ -18,7 +18,8 @@
|
|||
|
||||
static __inline__ int atomic_add_return(int i, atomic_t *v)
|
||||
{
|
||||
int ret,flags;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
local_irq_save(flags);
|
||||
ret = v->counter += i;
|
||||
local_irq_restore(flags);
|
||||
|
@ -30,7 +31,8 @@ static __inline__ int atomic_add_return(int i, atomic_t *v)
|
|||
|
||||
static __inline__ int atomic_sub_return(int i, atomic_t *v)
|
||||
{
|
||||
int ret,flags;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
local_irq_save(flags);
|
||||
ret = v->counter -= i;
|
||||
local_irq_restore(flags);
|
||||
|
@ -42,7 +44,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
|
|||
|
||||
static __inline__ int atomic_inc_return(atomic_t *v)
|
||||
{
|
||||
int ret,flags;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
local_irq_save(flags);
|
||||
v->counter++;
|
||||
ret = v->counter;
|
||||
|
@ -64,7 +67,8 @@ static __inline__ int atomic_inc_return(atomic_t *v)
|
|||
|
||||
static __inline__ int atomic_dec_return(atomic_t *v)
|
||||
{
|
||||
int ret,flags;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
local_irq_save(flags);
|
||||
--v->counter;
|
||||
ret = v->counter;
|
||||
|
@ -76,7 +80,8 @@ static __inline__ int atomic_dec_return(atomic_t *v)
|
|||
|
||||
static __inline__ int atomic_dec_and_test(atomic_t *v)
|
||||
{
|
||||
int ret,flags;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
local_irq_save(flags);
|
||||
--v->counter;
|
||||
ret = v->counter;
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
struct pt_regs;
|
||||
|
||||
/*
|
||||
* switch_to(n) should switch tasks to task ptr, first checking that
|
||||
* ptr isn't the current task, in which case it does nothing. This
|
||||
|
@ -155,6 +157,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
|
|||
|
||||
#define arch_align_stack(x) (x)
|
||||
|
||||
void die(char *str, struct pt_regs *fp, unsigned long err);
|
||||
extern void die(const char *str, struct pt_regs *fp, unsigned long err);
|
||||
|
||||
#endif /* _H8300_SYSTEM_H */
|
||||
|
|
|
@ -56,8 +56,8 @@ int kernel_execve(const char *filename,
|
|||
const char *const envp[])
|
||||
{
|
||||
register long res __asm__("er0");
|
||||
register char *const *_c __asm__("er3") = envp;
|
||||
register char *const *_b __asm__("er2") = argv;
|
||||
register const char *const *_c __asm__("er3") = envp;
|
||||
register const char *const *_b __asm__("er2") = argv;
|
||||
register const char * _a __asm__("er1") = filename;
|
||||
__asm__ __volatile__ ("mov.l %1,er0\n\t"
|
||||
"trapa #0\n\t"
|
||||
|
|
|
@ -96,7 +96,7 @@ static void dump(struct pt_regs *fp)
|
|||
printk("\n\n");
|
||||
}
|
||||
|
||||
void die(char *str, struct pt_regs *fp, unsigned long err)
|
||||
void die(const char *str, struct pt_regs *fp, unsigned long err)
|
||||
{
|
||||
static int diecount;
|
||||
|
||||
|
|
|
@ -199,7 +199,7 @@ ptr_to_compat(void __user *uptr)
|
|||
}
|
||||
|
||||
static __inline__ void __user *
|
||||
compat_alloc_user_space (long len)
|
||||
arch_compat_alloc_user_space (long len)
|
||||
{
|
||||
struct pt_regs *regs = task_pt_regs(current);
|
||||
return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len);
|
||||
|
|
|
@ -420,22 +420,31 @@ EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set
|
|||
;;
|
||||
|
||||
RSM_PSR_I(p0, r18, r19) // mask interrupt delivery
|
||||
mov ar.ccv=0
|
||||
andcm r14=r14,r17 // filter out SIGKILL & SIGSTOP
|
||||
mov r8=EINVAL // default to EINVAL
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
mov r17=1
|
||||
// __ticket_spin_trylock(r31)
|
||||
ld4 r17=[r31]
|
||||
;;
|
||||
cmpxchg4.acq r18=[r31],r17,ar.ccv // try to acquire the lock
|
||||
mov r8=EINVAL // default to EINVAL
|
||||
mov.m ar.ccv=r17
|
||||
extr.u r9=r17,17,15
|
||||
adds r19=1,r17
|
||||
extr.u r18=r17,0,15
|
||||
;;
|
||||
cmp.eq p6,p7=r9,r18
|
||||
;;
|
||||
(p6) cmpxchg4.acq r9=[r31],r19,ar.ccv
|
||||
(p6) dep.z r20=r19,1,15 // next serving ticket for unlock
|
||||
(p7) br.cond.spnt.many .lock_contention
|
||||
;;
|
||||
cmp4.eq p0,p7=r9,r17
|
||||
adds r31=2,r31
|
||||
(p7) br.cond.spnt.many .lock_contention
|
||||
ld8 r3=[r2] // re-read current->blocked now that we hold the lock
|
||||
cmp4.ne p6,p0=r18,r0
|
||||
(p6) br.cond.spnt.many .lock_contention
|
||||
;;
|
||||
#else
|
||||
ld8 r3=[r2] // re-read current->blocked now that we hold the lock
|
||||
mov r8=EINVAL // default to EINVAL
|
||||
#endif
|
||||
add r18=IA64_TASK_PENDING_OFFSET+IA64_SIGPENDING_SIGNAL_OFFSET,r16
|
||||
add r19=IA64_TASK_SIGNAL_OFFSET,r16
|
||||
|
@ -490,7 +499,9 @@ EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set
|
|||
(p6) br.cond.spnt.few 1b // yes -> retry
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
st4.rel [r31]=r0 // release the lock
|
||||
// __ticket_spin_unlock(r31)
|
||||
st2.rel [r31]=r20
|
||||
mov r20=0 // i must not leak kernel bits...
|
||||
#endif
|
||||
SSM_PSR_I(p0, p9, r31)
|
||||
;;
|
||||
|
@ -512,7 +523,8 @@ EX(.fail_efault, (p15) st8 [r34]=r3)
|
|||
|
||||
.sig_pending:
|
||||
#ifdef CONFIG_SMP
|
||||
st4.rel [r31]=r0 // release the lock
|
||||
// __ticket_spin_unlock(r31)
|
||||
st2.rel [r31]=r20 // release the lock
|
||||
#endif
|
||||
SSM_PSR_I(p0, p9, r17)
|
||||
;;
|
||||
|
|
|
@ -340,10 +340,13 @@
|
|||
#define __NR_set_thread_area 334
|
||||
#define __NR_atomic_cmpxchg_32 335
|
||||
#define __NR_atomic_barrier 336
|
||||
#define __NR_fanotify_init 337
|
||||
#define __NR_fanotify_mark 338
|
||||
#define __NR_prlimit64 339
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#define NR_syscalls 337
|
||||
#define NR_syscalls 340
|
||||
|
||||
#define __ARCH_WANT_IPC_PARSE_VERSION
|
||||
#define __ARCH_WANT_OLD_READDIR
|
||||
|
|
|
@ -765,4 +765,7 @@ sys_call_table:
|
|||
.long sys_set_thread_area
|
||||
.long sys_atomic_cmpxchg_32 /* 335 */
|
||||
.long sys_atomic_barrier
|
||||
.long sys_fanotify_init
|
||||
.long sys_fanotify_mark
|
||||
.long sys_prlimit64
|
||||
|
||||
|
|
|
@ -355,6 +355,9 @@ ENTRY(sys_call_table)
|
|||
.long sys_set_thread_area
|
||||
.long sys_atomic_cmpxchg_32 /* 335 */
|
||||
.long sys_atomic_barrier
|
||||
.long sys_fanotify_init
|
||||
.long sys_fanotify_mark
|
||||
.long sys_prlimit64
|
||||
|
||||
.rept NR_syscalls-(.-sys_call_table)/4
|
||||
.long sys_ni_syscall
|
||||
|
|
|
@ -150,6 +150,8 @@ SECTIONS {
|
|||
_sdata = . ;
|
||||
DATA_DATA
|
||||
CACHELINE_ALIGNED_DATA(32)
|
||||
PAGE_ALIGNED_DATA(PAGE_SIZE)
|
||||
*(.data..shared_aligned)
|
||||
INIT_TASK_DATA(THREAD_SIZE)
|
||||
_edata = . ;
|
||||
} > DATA
|
||||
|
|
|
@ -145,7 +145,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
|
|||
return (u32)(unsigned long)uptr;
|
||||
}
|
||||
|
||||
static inline void __user *compat_alloc_user_space(long len)
|
||||
static inline void __user *arch_compat_alloc_user_space(long len)
|
||||
{
|
||||
struct pt_regs *regs = (struct pt_regs *)
|
||||
((unsigned long) current_thread_info() + THREAD_SIZE - 32) - 1;
|
||||
|
|
|
@ -156,17 +156,17 @@ struct mn10300_serial_port mn10300_serial_port_sif0 = {
|
|||
._intr = &SC0ICR,
|
||||
._rxb = &SC0RXB,
|
||||
._txb = &SC0TXB,
|
||||
.rx_name = "ttySM0/Rx",
|
||||
.tx_name = "ttySM0/Tx",
|
||||
.rx_name = "ttySM0:Rx",
|
||||
.tx_name = "ttySM0:Tx",
|
||||
#ifdef CONFIG_MN10300_TTYSM0_TIMER8
|
||||
.tm_name = "ttySM0/Timer8",
|
||||
.tm_name = "ttySM0:Timer8",
|
||||
._tmxmd = &TM8MD,
|
||||
._tmxbr = &TM8BR,
|
||||
._tmicr = &TM8ICR,
|
||||
.tm_irq = TM8IRQ,
|
||||
.div_timer = MNSCx_DIV_TIMER_16BIT,
|
||||
#else /* CONFIG_MN10300_TTYSM0_TIMER2 */
|
||||
.tm_name = "ttySM0/Timer2",
|
||||
.tm_name = "ttySM0:Timer2",
|
||||
._tmxmd = &TM2MD,
|
||||
._tmxbr = (volatile u16 *) &TM2BR,
|
||||
._tmicr = &TM2ICR,
|
||||
|
@ -209,17 +209,17 @@ struct mn10300_serial_port mn10300_serial_port_sif1 = {
|
|||
._intr = &SC1ICR,
|
||||
._rxb = &SC1RXB,
|
||||
._txb = &SC1TXB,
|
||||
.rx_name = "ttySM1/Rx",
|
||||
.tx_name = "ttySM1/Tx",
|
||||
.rx_name = "ttySM1:Rx",
|
||||
.tx_name = "ttySM1:Tx",
|
||||
#ifdef CONFIG_MN10300_TTYSM1_TIMER9
|
||||
.tm_name = "ttySM1/Timer9",
|
||||
.tm_name = "ttySM1:Timer9",
|
||||
._tmxmd = &TM9MD,
|
||||
._tmxbr = &TM9BR,
|
||||
._tmicr = &TM9ICR,
|
||||
.tm_irq = TM9IRQ,
|
||||
.div_timer = MNSCx_DIV_TIMER_16BIT,
|
||||
#else /* CONFIG_MN10300_TTYSM1_TIMER3 */
|
||||
.tm_name = "ttySM1/Timer3",
|
||||
.tm_name = "ttySM1:Timer3",
|
||||
._tmxmd = &TM3MD,
|
||||
._tmxbr = (volatile u16 *) &TM3BR,
|
||||
._tmicr = &TM3ICR,
|
||||
|
@ -260,9 +260,9 @@ struct mn10300_serial_port mn10300_serial_port_sif2 = {
|
|||
.uart.lock =
|
||||
__SPIN_LOCK_UNLOCKED(mn10300_serial_port_sif2.uart.lock),
|
||||
.name = "ttySM2",
|
||||
.rx_name = "ttySM2/Rx",
|
||||
.tx_name = "ttySM2/Tx",
|
||||
.tm_name = "ttySM2/Timer10",
|
||||
.rx_name = "ttySM2:Rx",
|
||||
.tx_name = "ttySM2:Tx",
|
||||
.tm_name = "ttySM2:Timer10",
|
||||
._iobase = &SC2CTR,
|
||||
._control = &SC2CTR,
|
||||
._status = &SC2STR,
|
||||
|
|
|
@ -147,7 +147,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
|
|||
return (u32)(unsigned long)uptr;
|
||||
}
|
||||
|
||||
static __inline__ void __user *compat_alloc_user_space(long len)
|
||||
static __inline__ void __user *arch_compat_alloc_user_space(long len)
|
||||
{
|
||||
struct pt_regs *regs = ¤t->thread.regs;
|
||||
return (void __user *)regs->gr[30];
|
||||
|
|
|
@ -134,7 +134,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
|
|||
return (u32)(unsigned long)uptr;
|
||||
}
|
||||
|
||||
static inline void __user *compat_alloc_user_space(long len)
|
||||
static inline void __user *arch_compat_alloc_user_space(long len)
|
||||
{
|
||||
struct pt_regs *regs = current->thread.regs;
|
||||
unsigned long usp = regs->gpr[1];
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#ifndef __ARCH_POWERPC_ASM_FSLDMA_H__
|
||||
#define __ARCH_POWERPC_ASM_FSLDMA_H__
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/dmaengine.h>
|
||||
|
||||
/*
|
||||
|
|
|
@ -575,13 +575,19 @@ __secondary_start:
|
|||
/* Initialize the kernel stack. Just a repeat for iSeries. */
|
||||
LOAD_REG_ADDR(r3, current_set)
|
||||
sldi r28,r24,3 /* get current_set[cpu#] */
|
||||
ldx r1,r3,r28
|
||||
addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
|
||||
std r1,PACAKSAVE(r13)
|
||||
ldx r14,r3,r28
|
||||
addi r14,r14,THREAD_SIZE-STACK_FRAME_OVERHEAD
|
||||
std r14,PACAKSAVE(r13)
|
||||
|
||||
/* Do early setup for that CPU (stab, slb, hash table pointer) */
|
||||
bl .early_setup_secondary
|
||||
|
||||
/*
|
||||
* setup the new stack pointer, but *don't* use this until
|
||||
* translation is on.
|
||||
*/
|
||||
mr r1, r14
|
||||
|
||||
/* Clear backchain so we get nice backtraces */
|
||||
li r7,0
|
||||
mtlr r7
|
||||
|
|
|
@ -810,6 +810,9 @@ relocate_new_kernel:
|
|||
isync
|
||||
sync
|
||||
|
||||
mfspr r3, SPRN_PIR /* current core we are running on */
|
||||
mr r4, r5 /* load physical address of chunk called */
|
||||
|
||||
/* jump to the entry point, usually the setup routine */
|
||||
mtlr r5
|
||||
blrl
|
||||
|
|
|
@ -577,20 +577,11 @@ void timer_interrupt(struct pt_regs * regs)
|
|||
* some CPUs will continuue to take decrementer exceptions */
|
||||
set_dec(DECREMENTER_MAX);
|
||||
|
||||
#ifdef CONFIG_PPC32
|
||||
#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
|
||||
if (atomic_read(&ppc_n_lost_interrupts) != 0)
|
||||
do_IRQ(regs);
|
||||
#endif
|
||||
|
||||
now = get_tb_or_rtc();
|
||||
if (now < decrementer->next_tb) {
|
||||
/* not time for this event yet */
|
||||
now = decrementer->next_tb - now;
|
||||
if (now <= DECREMENTER_MAX)
|
||||
set_dec((int)now);
|
||||
trace_timer_interrupt_exit(regs);
|
||||
return;
|
||||
}
|
||||
old_regs = set_irq_regs(regs);
|
||||
irq_enter();
|
||||
|
||||
|
@ -606,8 +597,16 @@ void timer_interrupt(struct pt_regs * regs)
|
|||
get_lppaca()->int_dword.fields.decr_int = 0;
|
||||
#endif
|
||||
|
||||
if (evt->event_handler)
|
||||
evt->event_handler(evt);
|
||||
now = get_tb_or_rtc();
|
||||
if (now >= decrementer->next_tb) {
|
||||
decrementer->next_tb = ~(u64)0;
|
||||
if (evt->event_handler)
|
||||
evt->event_handler(evt);
|
||||
} else {
|
||||
now = decrementer->next_tb - now;
|
||||
if (now <= DECREMENTER_MAX)
|
||||
set_dec((int)now);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_ISERIES
|
||||
if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending())
|
||||
|
|
|
@ -48,8 +48,10 @@ static int mpc837xmds_usb_cfg(void)
|
|||
return -1;
|
||||
|
||||
np = of_find_node_by_name(NULL, "usb");
|
||||
if (!np)
|
||||
return -ENODEV;
|
||||
if (!np) {
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
phy_type = of_get_property(np, "phy_type", NULL);
|
||||
if (phy_type && !strcmp(phy_type, "ulpi")) {
|
||||
clrbits8(bcsr_regs + 12, BCSR12_USB_SER_PIN);
|
||||
|
@ -65,8 +67,9 @@ static int mpc837xmds_usb_cfg(void)
|
|||
}
|
||||
|
||||
of_node_put(np);
|
||||
out:
|
||||
iounmap(bcsr_regs);
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* ************************************************************************
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue