Merge branch 'fbdev/stable-updates'

This commit is contained in:
Paul Mundt 2011-06-24 17:16:40 +09:00
commit 20733d59d5
363 changed files with 5148 additions and 2311 deletions

View File

@ -0,0 +1,56 @@
What: /sys/class/backlight/<backlight>/<ambient light zone>_max
What: /sys/class/backlight/<backlight>/l1_daylight_max
What: /sys/class/backlight/<backlight>/l2_bright_max
What: /sys/class/backlight/<backlight>/l3_office_max
What: /sys/class/backlight/<backlight>/l4_indoor_max
What: /sys/class/backlight/<backlight>/l5_dark_max
Date: Mai 2011
KernelVersion: 2.6.40
Contact: device-drivers-devel@blackfin.uclinux.org
Description:
Control the maximum brightness for <ambient light zone>
on this <backlight>. Values are between 0 and 127. This file
will also show the brightness level stored for this
<ambient light zone>.
What: /sys/class/backlight/<backlight>/<ambient light zone>_dim
What: /sys/class/backlight/<backlight>/l2_bright_dim
What: /sys/class/backlight/<backlight>/l3_office_dim
What: /sys/class/backlight/<backlight>/l4_indoor_dim
What: /sys/class/backlight/<backlight>/l5_dark_dim
Date: Mai 2011
KernelVersion: 2.6.40
Contact: device-drivers-devel@blackfin.uclinux.org
Description:
Control the dim brightness for <ambient light zone>
on this <backlight>. Values are between 0 and 127, typically
set to 0. Full off when the backlight is disabled.
This file will also show the dim brightness level stored for
this <ambient light zone>.
What: /sys/class/backlight/<backlight>/ambient_light_level
Date: Mai 2011
KernelVersion: 2.6.40
Contact: device-drivers-devel@blackfin.uclinux.org
Description:
Get conversion value of the light sensor.
This value is updated every 80 ms (when the light sensor
is enabled). Returns integer between 0 (dark) and
8000 (max ambient brightness)
What: /sys/class/backlight/<backlight>/ambient_light_zone
Date: Mai 2011
KernelVersion: 2.6.40
Contact: device-drivers-devel@blackfin.uclinux.org
Description:
Get/Set current ambient light zone. Reading returns
integer between 1..5 (1 = daylight, 2 = bright, ..., 5 = dark).
Writing a value between 1..5 forces the backlight controller
to enter the corresponding ambient light zone.
Writing 0 returns to normal/automatic ambient light level
operation. The ambient light sensing feature on these devices
is an extension to the API documented in
Documentation/ABI/stable/sysfs-class-backlight.
It can be enabled by writing the value stored in
/sys/class/backlight/<backlight>/max_brightness to
/sys/class/backlight/<backlight>/brightness.

View File

@ -21,7 +21,7 @@ information will not be available.
To extract cgroup statistics a utility very similar to getdelays.c
has been developed, the sample output of the utility is shown below
~/balbir/cgroupstats # ./getdelays -C "/cgroup/a"
~/balbir/cgroupstats # ./getdelays -C "/sys/fs/cgroup/a"
sleeping 1, blocked 0, running 1, stopped 0, uninterruptible 0
~/balbir/cgroupstats # ./getdelays -C "/cgroup"
~/balbir/cgroupstats # ./getdelays -C "/sys/fs/cgroup"
sleeping 155, blocked 0, running 1, stopped 0, uninterruptible 2

View File

@ -28,16 +28,19 @@ cgroups. Here is what you can do.
- Enable group scheduling in CFQ
CONFIG_CFQ_GROUP_IOSCHED=y
- Compile and boot into kernel and mount IO controller (blkio).
- Compile and boot into kernel and mount IO controller (blkio); see
cgroups.txt, Why are cgroups needed?.
mount -t cgroup -o blkio none /cgroup
mount -t tmpfs cgroup_root /sys/fs/cgroup
mkdir /sys/fs/cgroup/blkio
mount -t cgroup -o blkio none /sys/fs/cgroup/blkio
- Create two cgroups
mkdir -p /cgroup/test1/ /cgroup/test2
mkdir -p /sys/fs/cgroup/blkio/test1/ /sys/fs/cgroup/blkio/test2
- Set weights of group test1 and test2
echo 1000 > /cgroup/test1/blkio.weight
echo 500 > /cgroup/test2/blkio.weight
echo 1000 > /sys/fs/cgroup/blkio/test1/blkio.weight
echo 500 > /sys/fs/cgroup/blkio/test2/blkio.weight
- Create two same size files (say 512MB each) on same disk (file1, file2) and
launch two dd threads in different cgroup to read those files.
@ -46,12 +49,12 @@ cgroups. Here is what you can do.
echo 3 > /proc/sys/vm/drop_caches
dd if=/mnt/sdb/zerofile1 of=/dev/null &
echo $! > /cgroup/test1/tasks
cat /cgroup/test1/tasks
echo $! > /sys/fs/cgroup/blkio/test1/tasks
cat /sys/fs/cgroup/blkio/test1/tasks
dd if=/mnt/sdb/zerofile2 of=/dev/null &
echo $! > /cgroup/test2/tasks
cat /cgroup/test2/tasks
echo $! > /sys/fs/cgroup/blkio/test2/tasks
cat /sys/fs/cgroup/blkio/test2/tasks
- At macro level, first dd should finish first. To get more precise data, keep
on looking at (with the help of script), at blkio.disk_time and
@ -68,13 +71,13 @@ Throttling/Upper Limit policy
- Enable throttling in block layer
CONFIG_BLK_DEV_THROTTLING=y
- Mount blkio controller
mount -t cgroup -o blkio none /cgroup/blkio
- Mount blkio controller (see cgroups.txt, Why are cgroups needed?)
mount -t cgroup -o blkio none /sys/fs/cgroup/blkio
- Specify a bandwidth rate on particular device for root group. The format
for policy is "<major>:<minor> <byes_per_second>".
echo "8:16 1048576" > /cgroup/blkio/blkio.read_bps_device
echo "8:16 1048576" > /sys/fs/cgroup/blkio/blkio.read_bps_device
Above will put a limit of 1MB/second on reads happening for root group
on device having major/minor number 8:16.
@ -108,7 +111,7 @@ Hierarchical Cgroups
CFQ and throttling will practically treat all groups at same level.
pivot
/ | \ \
/ / \ \
root test1 test2 test3
Down the line we can implement hierarchical accounting/control support
@ -149,7 +152,7 @@ Proportional weight policy files
Following is the format.
#echo dev_maj:dev_minor weight > /path/to/cgroup/blkio.weight_device
# echo dev_maj:dev_minor weight > blkio.weight_device
Configure weight=300 on /dev/sdb (8:16) in this cgroup
# echo 8:16 300 > blkio.weight_device
# cat blkio.weight_device

View File

@ -138,11 +138,11 @@ With the ability to classify tasks differently for different resources
the admin can easily set up a script which receives exec notifications
and depending on who is launching the browser he can
# echo browser_pid > /mnt/<restype>/<userclass>/tasks
# echo browser_pid > /sys/fs/cgroup/<restype>/<userclass>/tasks
With only a single hierarchy, he now would potentially have to create
a separate cgroup for every browser launched and associate it with
approp network and other resource class. This may lead to
appropriate network and other resource class. This may lead to
proliferation of such cgroups.
Also lets say that the administrator would like to give enhanced network
@ -153,9 +153,9 @@ apps enhanced CPU power,
With ability to write pids directly to resource classes, it's just a
matter of :
# echo pid > /mnt/network/<new_class>/tasks
# echo pid > /sys/fs/cgroup/network/<new_class>/tasks
(after some time)
# echo pid > /mnt/network/<orig_class>/tasks
# echo pid > /sys/fs/cgroup/network/<orig_class>/tasks
Without this ability, he would have to split the cgroup into
multiple separate ones and then associate the new cgroups with the
@ -310,21 +310,24 @@ subsystem, this is the case for the cpuset.
To start a new job that is to be contained within a cgroup, using
the "cpuset" cgroup subsystem, the steps are something like:
1) mkdir /dev/cgroup
2) mount -t cgroup -ocpuset cpuset /dev/cgroup
3) Create the new cgroup by doing mkdir's and write's (or echo's) in
the /dev/cgroup virtual file system.
4) Start a task that will be the "founding father" of the new job.
5) Attach that task to the new cgroup by writing its pid to the
/dev/cgroup tasks file for that cgroup.
6) fork, exec or clone the job tasks from this founding father task.
1) mount -t tmpfs cgroup_root /sys/fs/cgroup
2) mkdir /sys/fs/cgroup/cpuset
3) mount -t cgroup -ocpuset cpuset /sys/fs/cgroup/cpuset
4) Create the new cgroup by doing mkdir's and write's (or echo's) in
the /sys/fs/cgroup virtual file system.
5) Start a task that will be the "founding father" of the new job.
6) Attach that task to the new cgroup by writing its pid to the
/sys/fs/cgroup/cpuset/tasks file for that cgroup.
7) fork, exec or clone the job tasks from this founding father task.
For example, the following sequence of commands will setup a cgroup
named "Charlie", containing just CPUs 2 and 3, and Memory Node 1,
and then start a subshell 'sh' in that cgroup:
mount -t cgroup cpuset -ocpuset /dev/cgroup
cd /dev/cgroup
mount -t tmpfs cgroup_root /sys/fs/cgroup
mkdir /sys/fs/cgroup/cpuset
mount -t cgroup cpuset -ocpuset /sys/fs/cgroup/cpuset
cd /sys/fs/cgroup/cpuset
mkdir Charlie
cd Charlie
/bin/echo 2-3 > cpuset.cpus
@ -345,7 +348,7 @@ Creating, modifying, using the cgroups can be done through the cgroup
virtual filesystem.
To mount a cgroup hierarchy with all available subsystems, type:
# mount -t cgroup xxx /dev/cgroup
# mount -t cgroup xxx /sys/fs/cgroup
The "xxx" is not interpreted by the cgroup code, but will appear in
/proc/mounts so may be any useful identifying string that you like.
@ -354,23 +357,32 @@ Note: Some subsystems do not work without some user input first. For instance,
if cpusets are enabled the user will have to populate the cpus and mems files
for each new cgroup created before that group can be used.
As explained in section `1.2 Why are cgroups needed?' you should create
different hierarchies of cgroups for each single resource or group of
resources you want to control. Therefore, you should mount a tmpfs on
/sys/fs/cgroup and create directories for each cgroup resource or resource
group.
# mount -t tmpfs cgroup_root /sys/fs/cgroup
# mkdir /sys/fs/cgroup/rg1
To mount a cgroup hierarchy with just the cpuset and memory
subsystems, type:
# mount -t cgroup -o cpuset,memory hier1 /dev/cgroup
# mount -t cgroup -o cpuset,memory hier1 /sys/fs/cgroup/rg1
To change the set of subsystems bound to a mounted hierarchy, just
remount with different options:
# mount -o remount,cpuset,blkio hier1 /dev/cgroup
# mount -o remount,cpuset,blkio hier1 /sys/fs/cgroup/rg1
Now memory is removed from the hierarchy and blkio is added.
Note this will add blkio to the hierarchy but won't remove memory or
cpuset, because the new options are appended to the old ones:
# mount -o remount,blkio /dev/cgroup
# mount -o remount,blkio /sys/fs/cgroup/rg1
To Specify a hierarchy's release_agent:
# mount -t cgroup -o cpuset,release_agent="/sbin/cpuset_release_agent" \
xxx /dev/cgroup
xxx /sys/fs/cgroup/rg1
Note that specifying 'release_agent' more than once will return failure.
@ -379,17 +391,17 @@ when the hierarchy consists of a single (root) cgroup. Supporting
the ability to arbitrarily bind/unbind subsystems from an existing
cgroup hierarchy is intended to be implemented in the future.
Then under /dev/cgroup you can find a tree that corresponds to the
tree of the cgroups in the system. For instance, /dev/cgroup
Then under /sys/fs/cgroup/rg1 you can find a tree that corresponds to the
tree of the cgroups in the system. For instance, /sys/fs/cgroup/rg1
is the cgroup that holds the whole system.
If you want to change the value of release_agent:
# echo "/sbin/new_release_agent" > /dev/cgroup/release_agent
# echo "/sbin/new_release_agent" > /sys/fs/cgroup/rg1/release_agent
It can also be changed via remount.
If you want to create a new cgroup under /dev/cgroup:
# cd /dev/cgroup
If you want to create a new cgroup under /sys/fs/cgroup/rg1:
# cd /sys/fs/cgroup/rg1
# mkdir my_cgroup
Now you want to do something with this cgroup.

View File

@ -10,26 +10,25 @@ directly present in its group.
Accounting groups can be created by first mounting the cgroup filesystem.
# mkdir /cgroups
# mount -t cgroup -ocpuacct none /cgroups
# mount -t cgroup -ocpuacct none /sys/fs/cgroup
With the above step, the initial or the parent accounting group
becomes visible at /cgroups. At bootup, this group includes all the
tasks in the system. /cgroups/tasks lists the tasks in this cgroup.
/cgroups/cpuacct.usage gives the CPU time (in nanoseconds) obtained by
this group which is essentially the CPU time obtained by all the tasks
With the above step, the initial or the parent accounting group becomes
visible at /sys/fs/cgroup. At bootup, this group includes all the tasks in
the system. /sys/fs/cgroup/tasks lists the tasks in this cgroup.
/sys/fs/cgroup/cpuacct.usage gives the CPU time (in nanoseconds) obtained
by this group which is essentially the CPU time obtained by all the tasks
in the system.
New accounting groups can be created under the parent group /cgroups.
New accounting groups can be created under the parent group /sys/fs/cgroup.
# cd /cgroups
# cd /sys/fs/cgroup
# mkdir g1
# echo $$ > g1
The above steps create a new group g1 and move the current shell
process (bash) into it. CPU time consumed by this bash and its children
can be obtained from g1/cpuacct.usage and the same is accumulated in
/cgroups/cpuacct.usage also.
/sys/fs/cgroup/cpuacct.usage also.
cpuacct.stat file lists a few statistics which further divide the
CPU time obtained by the cgroup into user and system times. Currently

View File

@ -661,21 +661,21 @@ than stress the kernel.
To start a new job that is to be contained within a cpuset, the steps are:
1) mkdir /dev/cpuset
2) mount -t cgroup -ocpuset cpuset /dev/cpuset
1) mkdir /sys/fs/cgroup/cpuset
2) mount -t cgroup -ocpuset cpuset /sys/fs/cgroup/cpuset
3) Create the new cpuset by doing mkdir's and write's (or echo's) in
the /dev/cpuset virtual file system.
the /sys/fs/cgroup/cpuset virtual file system.
4) Start a task that will be the "founding father" of the new job.
5) Attach that task to the new cpuset by writing its pid to the
/dev/cpuset tasks file for that cpuset.
/sys/fs/cgroup/cpuset tasks file for that cpuset.
6) fork, exec or clone the job tasks from this founding father task.
For example, the following sequence of commands will setup a cpuset
named "Charlie", containing just CPUs 2 and 3, and Memory Node 1,
and then start a subshell 'sh' in that cpuset:
mount -t cgroup -ocpuset cpuset /dev/cpuset
cd /dev/cpuset
mount -t cgroup -ocpuset cpuset /sys/fs/cgroup/cpuset
cd /sys/fs/cgroup/cpuset
mkdir Charlie
cd Charlie
/bin/echo 2-3 > cpuset.cpus
@ -710,14 +710,14 @@ Creating, modifying, using the cpusets can be done through the cpuset
virtual filesystem.
To mount it, type:
# mount -t cgroup -o cpuset cpuset /dev/cpuset
# mount -t cgroup -o cpuset cpuset /sys/fs/cgroup/cpuset
Then under /dev/cpuset you can find a tree that corresponds to the
tree of the cpusets in the system. For instance, /dev/cpuset
Then under /sys/fs/cgroup/cpuset you can find a tree that corresponds to the
tree of the cpusets in the system. For instance, /sys/fs/cgroup/cpuset
is the cpuset that holds the whole system.
If you want to create a new cpuset under /dev/cpuset:
# cd /dev/cpuset
If you want to create a new cpuset under /sys/fs/cgroup/cpuset:
# cd /sys/fs/cgroup/cpuset
# mkdir my_cpuset
Now you want to do something with this cpuset.
@ -765,12 +765,12 @@ wrapper around the cgroup filesystem.
The command
mount -t cpuset X /dev/cpuset
mount -t cpuset X /sys/fs/cgroup/cpuset
is equivalent to
mount -t cgroup -ocpuset,noprefix X /dev/cpuset
echo "/sbin/cpuset_release_agent" > /dev/cpuset/release_agent
mount -t cgroup -ocpuset,noprefix X /sys/fs/cgroup/cpuset
echo "/sbin/cpuset_release_agent" > /sys/fs/cgroup/cpuset/release_agent
2.2 Adding/removing cpus
------------------------

View File

@ -22,16 +22,16 @@ removed from the child(ren).
An entry is added using devices.allow, and removed using
devices.deny. For instance
echo 'c 1:3 mr' > /cgroups/1/devices.allow
echo 'c 1:3 mr' > /sys/fs/cgroup/1/devices.allow
allows cgroup 1 to read and mknod the device usually known as
/dev/null. Doing
echo a > /cgroups/1/devices.deny
echo a > /sys/fs/cgroup/1/devices.deny
will remove the default 'a *:* rwm' entry. Doing
echo a > /cgroups/1/devices.allow
echo a > /sys/fs/cgroup/1/devices.allow
will add the 'a *:* rwm' entry to the whitelist.

View File

@ -59,28 +59,28 @@ is non-freezable.
* Examples of usage :
# mkdir /containers
# mount -t cgroup -ofreezer freezer /containers
# mkdir /containers/0
# echo $some_pid > /containers/0/tasks
# mkdir /sys/fs/cgroup/freezer
# mount -t cgroup -ofreezer freezer /sys/fs/cgroup/freezer
# mkdir /sys/fs/cgroup/freezer/0
# echo $some_pid > /sys/fs/cgroup/freezer/0/tasks
to get status of the freezer subsystem :
# cat /containers/0/freezer.state
# cat /sys/fs/cgroup/freezer/0/freezer.state
THAWED
to freeze all tasks in the container :
# echo FROZEN > /containers/0/freezer.state
# cat /containers/0/freezer.state
# echo FROZEN > /sys/fs/cgroup/freezer/0/freezer.state
# cat /sys/fs/cgroup/freezer/0/freezer.state
FREEZING
# cat /containers/0/freezer.state
# cat /sys/fs/cgroup/freezer/0/freezer.state
FROZEN
to unfreeze all tasks in the container :
# echo THAWED > /containers/0/freezer.state
# cat /containers/0/freezer.state
# echo THAWED > /sys/fs/cgroup/freezer/0/freezer.state
# cat /sys/fs/cgroup/freezer/0/freezer.state
THAWED
This is the basic mechanism which should do the right thing for user space task

View File

@ -1,8 +1,8 @@
Memory Resource Controller
NOTE: The Memory Resource Controller has been generically been referred
to as the memory controller in this document. Do not confuse memory
controller used here with the memory controller that is used in hardware.
NOTE: The Memory Resource Controller has generically been referred to as the
memory controller in this document. Do not confuse memory controller
used here with the memory controller that is used in hardware.
(For editors)
In this document:
@ -70,6 +70,7 @@ Brief summary of control files.
(See sysctl's vm.swappiness)
memory.move_charge_at_immigrate # set/show controls of moving charges
memory.oom_control # set/show oom controls.
memory.numa_stat # show the number of memory usage per numa node
1. History
@ -181,7 +182,7 @@ behind this approach is that a cgroup that aggressively uses a shared
page will eventually get charged for it (once it is uncharged from
the cgroup that brought it in -- this will happen on memory pressure).
Exception: If CONFIG_CGROUP_CGROUP_MEM_RES_CTLR_SWAP is not used..
Exception: If CONFIG_CGROUP_CGROUP_MEM_RES_CTLR_SWAP is not used.
When you do swapoff and make swapped-out pages of shmem(tmpfs) to
be backed into memory in force, charges for pages are accounted against the
caller of swapoff rather than the users of shmem.
@ -213,7 +214,7 @@ affecting global LRU, memory+swap limit is better than just limiting swap from
OS point of view.
* What happens when a cgroup hits memory.memsw.limit_in_bytes
When a cgroup his memory.memsw.limit_in_bytes, it's useless to do swap-out
When a cgroup hits memory.memsw.limit_in_bytes, it's useless to do swap-out
in this cgroup. Then, swap-out will not be done by cgroup routine and file
caches are dropped. But as mentioned above, global LRU can do swapout memory
from it for sanity of the system's memory management state. You can't forbid
@ -263,16 +264,17 @@ b. Enable CONFIG_RESOURCE_COUNTERS
c. Enable CONFIG_CGROUP_MEM_RES_CTLR
d. Enable CONFIG_CGROUP_MEM_RES_CTLR_SWAP (to use swap extension)
1. Prepare the cgroups
# mkdir -p /cgroups
# mount -t cgroup none /cgroups -o memory
1. Prepare the cgroups (see cgroups.txt, Why are cgroups needed?)
# mount -t tmpfs none /sys/fs/cgroup
# mkdir /sys/fs/cgroup/memory
# mount -t cgroup none /sys/fs/cgroup/memory -o memory
2. Make the new group and move bash into it
# mkdir /cgroups/0
# echo $$ > /cgroups/0/tasks
# mkdir /sys/fs/cgroup/memory/0
# echo $$ > /sys/fs/cgroup/memory/0/tasks
Since now we're in the 0 cgroup, we can alter the memory limit:
# echo 4M > /cgroups/0/memory.limit_in_bytes
# echo 4M > /sys/fs/cgroup/memory/0/memory.limit_in_bytes
NOTE: We can use a suffix (k, K, m, M, g or G) to indicate values in kilo,
mega or gigabytes. (Here, Kilo, Mega, Giga are Kibibytes, Mebibytes, Gibibytes.)
@ -280,11 +282,11 @@ mega or gigabytes. (Here, Kilo, Mega, Giga are Kibibytes, Mebibytes, Gibibytes.)
NOTE: We can write "-1" to reset the *.limit_in_bytes(unlimited).
NOTE: We cannot set limits on the root cgroup any more.
# cat /cgroups/0/memory.limit_in_bytes
# cat /sys/fs/cgroup/memory/0/memory.limit_in_bytes
4194304
We can check the usage:
# cat /cgroups/0/memory.usage_in_bytes
# cat /sys/fs/cgroup/memory/0/memory.usage_in_bytes
1216512
A successful write to this file does not guarantee a successful set of
@ -464,6 +466,24 @@ value for efficient access. (Of course, when necessary, it's synchronized.)
If you want to know more exact memory usage, you should use RSS+CACHE(+SWAP)
value in memory.stat(see 5.2).
5.6 numa_stat
This is similar to numa_maps but operates on a per-memcg basis. This is
useful for providing visibility into the numa locality information within
an memcg since the pages are allowed to be allocated from any physical
node. One of the usecases is evaluating application performance by
combining this information with the application's cpu allocation.
We export "total", "file", "anon" and "unevictable" pages per-node for
each memcg. The ouput format of memory.numa_stat is:
total=<total pages> N0=<node 0 pages> N1=<node 1 pages> ...
file=<total file pages> N0=<node 0 pages> N1=<node 1 pages> ...
anon=<total anon pages> N0=<node 0 pages> N1=<node 1 pages> ...
unevictable=<total anon pages> N0=<node 0 pages> N1=<node 1 pages> ...
And we have total = file + anon + unevictable.
6. Hierarchy support
The memory controller supports a deep hierarchy and hierarchical accounting.
@ -471,13 +491,13 @@ The hierarchy is created by creating the appropriate cgroups in the
cgroup filesystem. Consider for example, the following cgroup filesystem
hierarchy
root
root
/ | \
/ | \
a b c
| \
| \
d e
/ | \
a b c
| \
| \
d e
In the diagram above, with hierarchical accounting enabled, all memory
usage of e, is accounted to its ancestors up until the root (i.e, c and root),

View File

@ -481,23 +481,6 @@ Who: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
----------------------------
What: namespace cgroup (ns_cgroup)
When: 2.6.38
Why: The ns_cgroup leads to some problems:
* cgroup creation is out-of-control
* cgroup name can conflict when pids are looping
* it is not possible to have a single process handling
a lot of namespaces without falling in a exponential creation time
* we may want to create a namespace without creating a cgroup
The ns_cgroup is replaced by a compatibility flag 'clone_children',
where a newly created cgroup will copy the parent cgroup values.
The userspace has to manually create a cgroup and add a task to
the 'tasks' file.
Who: Daniel Lezcano <daniel.lezcano@free.fr>
----------------------------
What: iwlwifi disable_hw_scan module parameters
When: 2.6.40
Why: Hareware scan is the prefer method for iwlwifi devices for

View File

@ -843,6 +843,7 @@ Provides counts of softirq handlers serviced since boot time, for each cpu.
TASKLET: 0 0 0 290
SCHED: 27035 26983 26971 26746
HRTIMER: 0 0 0 0
RCU: 1678 1769 2178 2250
1.3 IDE devices in /proc/ide

View File

@ -11,7 +11,9 @@ with the difference that the orphan objects are not freed but only
reported via /sys/kernel/debug/kmemleak. A similar method is used by the
Valgrind tool (memcheck --leak-check) to detect the memory leaks in
user-space applications.
Kmemleak is supported on x86, arm, powerpc, sparc, sh, microblaze and tile.
Please check DEBUG_KMEMLEAK dependencies in lib/Kconfig.debug for supported
architectures.
Usage
-----

View File

@ -520,59 +520,20 @@ Support for power domains is provided through the pwr_domain field of struct
device. This field is a pointer to an object of type struct dev_power_domain,
defined in include/linux/pm.h, providing a set of power management callbacks
analogous to the subsystem-level and device driver callbacks that are executed
for the given device during all power transitions, in addition to the respective
subsystem-level callbacks. Specifically, the power domain "suspend" callbacks
(i.e. ->runtime_suspend(), ->suspend(), ->freeze(), ->poweroff(), etc.) are
executed after the analogous subsystem-level callbacks, while the power domain
"resume" callbacks (i.e. ->runtime_resume(), ->resume(), ->thaw(), ->restore,
etc.) are executed before the analogous subsystem-level callbacks. Error codes
returned by the "suspend" and "resume" power domain callbacks are ignored.
for the given device during all power transitions, instead of the respective
subsystem-level callbacks. Specifically, if a device's pm_domain pointer is
not NULL, the ->suspend() callback from the object pointed to by it will be
executed instead of its subsystem's (e.g. bus type's) ->suspend() callback and
anlogously for all of the remaining callbacks. In other words, power management
domain callbacks, if defined for the given device, always take precedence over
the callbacks provided by the device's subsystem (e.g. bus type).
Power domain ->runtime_idle() callback is executed before the subsystem-level
->runtime_idle() callback and the result returned by it is not ignored. Namely,
if it returns error code, the subsystem-level ->runtime_idle() callback will not
be called and the helper function rpm_idle() executing it will return error
code. This mechanism is intended to help platforms where saving device state
is a time consuming operation and should only be carried out if all devices
in the power domain are idle, before turning off the shared power resource(s).
Namely, the power domain ->runtime_idle() callback may return error code until
the pm_runtime_idle() helper (or its asychronous version) has been called for
all devices in the power domain (it is recommended that the returned error code
be -EBUSY in those cases), preventing the subsystem-level ->runtime_idle()
callback from being run prematurely.
The support for device power domains is only relevant to platforms needing to
use the same subsystem-level (e.g. platform bus type) and device driver power
management callbacks in many different power domain configurations and wanting
to avoid incorporating the support for power domains into the subsystem-level
callbacks. The other platforms need not implement it or take it into account
in any way.
System Devices
--------------
System devices (sysdevs) follow a slightly different API, which can be found in
include/linux/sysdev.h
drivers/base/sys.c
System devices will be suspended with interrupts disabled, and after all other
devices have been suspended. On resume, they will be resumed before any other
devices, and also with interrupts disabled. These things occur in special
"sysdev_driver" phases, which affect only system devices.
Thus, after the suspend_noirq (or freeze_noirq or poweroff_noirq) phase, when
the non-boot CPUs are all offline and IRQs are disabled on the remaining online
CPU, then a sysdev_driver.suspend phase is carried out, and the system enters a
sleep state (or a system image is created). During resume (or after the image
has been created or loaded) a sysdev_driver.resume phase is carried out, IRQs
are enabled on the only online CPU, the non-boot CPUs are enabled, and the
resume_noirq (or thaw_noirq or restore_noirq) phase begins.
Code to actually enter and exit the system-wide low power state sometimes
involves hardware details that are only known to the boot firmware, and
may leave a CPU running software (from SRAM or flash memory) that monitors
the system and manages its wakeup sequence.
The support for device power management domains is only relevant to platforms
needing to use the same device driver power management callbacks in many
different power domain configurations and wanting to avoid incorporating the
support for power domains into subsystem-level callbacks, for example by
modifying the platform bus type. Other platforms need not implement it or take
it into account in any way.
Device Low Power (suspend) States

View File

@ -566,11 +566,6 @@ to do this is:
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
The PM core always increments the run-time usage counter before calling the
->prepare() callback and decrements it after calling the ->complete() callback.
Hence disabling run-time PM temporarily like this will not cause any run-time
suspend callbacks to be lost.
7. Generic subsystem callbacks
Subsystems may wish to conserve code space by using the set of generic power

View File

@ -9,7 +9,121 @@ If variable is of Type, use printk format specifier:
size_t %zu or %zx
ssize_t %zd or %zx
Raw pointer value SHOULD be printed with %p.
Raw pointer value SHOULD be printed with %p. The kernel supports
the following extended format specifiers for pointer types:
Symbols/Function Pointers:
%pF versatile_init+0x0/0x110
%pf versatile_init
%pS versatile_init+0x0/0x110
%ps versatile_init
%pB prev_fn_of_versatile_init+0x88/0x88
For printing symbols and function pointers. The 'S' and 's' specifiers
result in the symbol name with ('S') or without ('s') offsets. Where
this is used on a kernel without KALLSYMS - the symbol address is
printed instead.
The 'B' specifier results in the symbol name with offsets and should be
used when printing stack backtraces. The specifier takes into
consideration the effect of compiler optimisations which may occur
when tail-call's are used and marked with the noreturn GCC attribute.
On ia64, ppc64 and parisc64 architectures function pointers are
actually function descriptors which must first be resolved. The 'F' and
'f' specifiers perform this resolution and then provide the same
functionality as the 'S' and 's' specifiers.
Kernel Pointers:
%pK 0x01234567 or 0x0123456789abcdef
For printing kernel pointers which should be hidden from unprivileged
users. The behaviour of %pK depends on the kptr_restrict sysctl - see
Documentation/sysctl/kernel.txt for more details.
Struct Resources:
%pr [mem 0x60000000-0x6fffffff flags 0x2200] or
[mem 0x0000000060000000-0x000000006fffffff flags 0x2200]
%pR [mem 0x60000000-0x6fffffff pref] or
[mem 0x0000000060000000-0x000000006fffffff pref]
For printing struct resources. The 'R' and 'r' specifiers result in a
printed resource with ('R') or without ('r') a decoded flags member.
MAC/FDDI addresses:
%pM 00:01:02:03:04:05
%pMF 00-01-02-03-04-05
%pm 000102030405
For printing 6-byte MAC/FDDI addresses in hex notation. The 'M' and 'm'
specifiers result in a printed address with ('M') or without ('m') byte
separators. The default byte separator is the colon (':').
Where FDDI addresses are concerned the 'F' specifier can be used after
the 'M' specifier to use dash ('-') separators instead of the default
separator.
IPv4 addresses:
%pI4 1.2.3.4
%pi4 001.002.003.004
%p[Ii][hnbl]
For printing IPv4 dot-separated decimal addresses. The 'I4' and 'i4'
specifiers result in a printed address with ('i4') or without ('I4')
leading zeros.
The additional 'h', 'n', 'b', and 'l' specifiers are used to specify
host, network, big or little endian order addresses respectively. Where
no specifier is provided the default network/big endian order is used.
IPv6 addresses:
%pI6 0001:0002:0003:0004:0005:0006:0007:0008
%pi6 00010002000300040005000600070008
%pI6c 1:2:3:4:5:6:7:8
For printing IPv6 network-order 16-bit hex addresses. The 'I6' and 'i6'
specifiers result in a printed address with ('I6') or without ('i6')
colon-separators. Leading zeros are always used.
The additional 'c' specifier can be used with the 'I' specifier to
print a compressed IPv6 address as described by
http://tools.ietf.org/html/rfc5952
UUID/GUID addresses:
%pUb 00010203-0405-0607-0809-0a0b0c0d0e0f
%pUB 00010203-0405-0607-0809-0A0B0C0D0E0F
%pUl 03020100-0504-0706-0809-0a0b0c0e0e0f
%pUL 03020100-0504-0706-0809-0A0B0C0E0E0F
For printing 16-byte UUID/GUIDs addresses. The additional 'l', 'L',
'b' and 'B' specifiers are used to specify a little endian order in
lower ('l') or upper case ('L') hex characters - and big endian order
in lower ('b') or upper case ('B') hex characters.
Where no additional specifiers are used the default little endian
order with lower case hex characters will be printed.
struct va_format:
%pV
For printing struct va_format structures. These contain a format string
and va_list as follows:
struct va_format {
const char *fmt;
va_list *va;
};
Do not use this feature without some mechanism to verify the
correctness of the format string and va_list arguments.
u64 SHOULD be printed with %llu/%llx, (unsigned long long):
@ -32,4 +146,5 @@ Reminder: sizeof() result is of type size_t.
Thank you for your cooperation and attention.
By Randy Dunlap <rdunlap@xenotime.net>
By Randy Dunlap <rdunlap@xenotime.net> and
Andrew Murray <amurray@mpc-data.co.uk>

View File

@ -223,9 +223,10 @@ When CONFIG_FAIR_GROUP_SCHED is defined, a "cpu.shares" file is created for each
group created using the pseudo filesystem. See example steps below to create
task groups and modify their CPU share using the "cgroups" pseudo filesystem.
# mkdir /dev/cpuctl
# mount -t cgroup -ocpu none /dev/cpuctl
# cd /dev/cpuctl
# mount -t tmpfs cgroup_root /sys/fs/cgroup
# mkdir /sys/fs/cgroup/cpu
# mount -t cgroup -ocpu none /sys/fs/cgroup/cpu
# cd /sys/fs/cgroup/cpu
# mkdir multimedia # create "multimedia" group of tasks
# mkdir browser # create "browser" group of tasks

View File

@ -129,9 +129,8 @@ priority!
Enabling CONFIG_RT_GROUP_SCHED lets you explicitly allocate real
CPU bandwidth to task groups.
This uses the /cgroup virtual file system and
"/cgroup/<cgroup>/cpu.rt_runtime_us" to control the CPU time reserved for each
control group.
This uses the cgroup virtual file system and "<cgroup>/cpu.rt_runtime_us"
to control the CPU time reserved for each control group.
For more information on working with control groups, you should read
Documentation/cgroups/cgroups.txt as well.
@ -150,7 +149,7 @@ For now, this can be simplified to just the following (but see Future plans):
===============
There is work in progress to make the scheduling period for each group
("/cgroup/<cgroup>/cpu.rt_period_us") configurable as well.
("<cgroup>/cpu.rt_period_us") configurable as well.
The constraint on the period is that a subgroup must have a smaller or
equal period to its parent. But realistically its not very useful _yet_

View File

@ -129,12 +129,12 @@ Limit injection to pages owned by memgroup. Specified by inode number
of the memcg.
Example:
mkdir /cgroup/hwpoison
mkdir /sys/fs/cgroup/mem/hwpoison
usemem -m 100 -s 1000 &
echo `jobs -p` > /cgroup/hwpoison/tasks
echo `jobs -p` > /sys/fs/cgroup/mem/hwpoison/tasks
memcg_ino=$(ls -id /cgroup/hwpoison | cut -f1 -d' ')
memcg_ino=$(ls -id /sys/fs/cgroup/mem/hwpoison | cut -f1 -d' ')
echo $memcg_ino > /debug/hwpoison/corrupt-filter-memcg
page-types -p `pidof init` --hwpoison # shall do nothing

View File

@ -2291,8 +2291,7 @@ F: drivers/scsi/eata_pio.*
EBTABLES
M: Bart De Schuymer <bart.de.schuymer@pandora.be>
L: ebtables-user@lists.sourceforge.net
L: ebtables-devel@lists.sourceforge.net
L: netfilter-devel@vger.kernel.org
W: http://ebtables.sourceforge.net/
S: Maintained
F: include/linux/netfilter_bridge/ebt_*.h
@ -3819,6 +3818,12 @@ S: Maintained
F: drivers/leds/
F: include/linux/leds.h
LEGACY EEPROM DRIVER
M: Jean Delvare <khali@linux-fr.org>
S: Maintained
F: Documentation/misc-devices/eeprom
F: drivers/misc/eeprom/eeprom.c
LEGO USB Tower driver
M: Juergen Stuber <starblue@users.sourceforge.net>
L: legousb-devel@lists.sourceforge.net
@ -4144,7 +4149,7 @@ F: include/linux/mm.h
F: mm/
MEMORY RESOURCE CONTROLLER
M: Balbir Singh <balbir@linux.vnet.ibm.com>
M: Balbir Singh <bsingharora@gmail.com>
M: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
M: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
L: linux-mm@kvack.org
@ -4889,7 +4894,7 @@ F: mm/percpu*.c
F: arch/*/include/asm/percpu.h
PER-TASK DELAY ACCOUNTING
M: Balbir Singh <balbir@linux.vnet.ibm.com>
M: Balbir Singh <bsingharora@gmail.com>
S: Maintained
F: include/linux/delayacct.h
F: kernel/delayacct.c
@ -6097,7 +6102,7 @@ F: include/target/
F: Documentation/target/
TASKSTATS STATISTICS INTERFACE
M: Balbir Singh <balbir@linux.vnet.ibm.com>
M: Balbir Singh <bsingharora@gmail.com>
S: Maintained
F: Documentation/accounting/taskstats*
F: include/linux/taskstats*
@ -6457,7 +6462,7 @@ M: Jiri Kosina <jkosina@suse.cz>
L: linux-usb@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git
S: Maintained
F: Documentation/usb/hiddev.txt
F: Documentation/hid/hiddev.txt
F: drivers/hid/usbhid/
USB ISP116X DRIVER
@ -6716,6 +6721,14 @@ S: Maintained
F: Documentation/filesystems/vfat.txt
F: fs/fat/
VIDEOBUF2 FRAMEWORK
M: Pawel Osciak <pawel@osciak.com>
M: Marek Szyprowski <m.szyprowski@samsung.com>
L: linux-media@vger.kernel.org
S: Maintained
F: drivers/media/video/videobuf2-*
F: include/media/videobuf2-*
VIRTIO CONSOLE DRIVER
M: Amit Shah <amit.shah@redhat.com>
L: virtualization@lists.linux-foundation.org
@ -6993,6 +7006,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86.
S: Maintained
F: drivers/platform/x86
X86 MCE INFRASTRUCTURE
M: Tony Luck <tony.luck@intel.com>
M: Borislav Petkov <bp@amd64.org>
L: linux-edac@vger.kernel.org
S: Maintained
F: arch/x86/kernel/cpu/mcheck/*
XEN HYPERVISOR INTERFACE
M: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>

View File

@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 0
SUBLEVEL = 0
EXTRAVERSION = -rc3
EXTRAVERSION = -rc4
NAME = Sneaky Weasel
# *DOCUMENTATION*
@ -1526,7 +1526,8 @@ quiet_cmd_rmfiles = $(if $(wildcard $(rm-files)),CLEAN $(wildcard $(rm-files))
# Run depmod only if we have System.map and depmod is executable
quiet_cmd_depmod = DEPMOD $(KERNELRELEASE)
cmd_depmod = $(srctree)/scripts/depmod.sh $(DEPMOD) $(KERNELRELEASE)
cmd_depmod = $(CONFIG_SHELL) $(srctree)/scripts/depmod.sh $(DEPMOD) \
$(KERNELRELEASE)
# Create temporary dir for module support files
# clean it up only when building all modules

View File

@ -409,7 +409,7 @@ SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen)
return -EFAULT;
len = namelen;
if (namelen > 32)
if (len > 32)
len = 32;
down_read(&uts_sem);
@ -594,7 +594,7 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
down_read(&uts_sem);
res = sysinfo_table[offset];
len = strlen(res)+1;
if (len > count)
if ((unsigned long)len > (unsigned long)count)
len = count;
if (copy_to_user(buf, res, len))
err = -EFAULT;
@ -649,7 +649,7 @@ SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer,
return 1;
case GSI_GET_HWRPB:
if (nbytes < sizeof(*hwrpb))
if (nbytes > sizeof(*hwrpb))
return -EINVAL;
if (copy_to_user(buffer, hwrpb, nbytes) != 0)
return -EFAULT;
@ -1008,6 +1008,7 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
{
struct rusage r;
long ret, err;
unsigned int status = 0;
mm_segment_t old_fs;
if (!ur)
@ -1016,13 +1017,15 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
old_fs = get_fs();
set_fs (KERNEL_DS);
ret = sys_wait4(pid, ustatus, options, (struct rusage __user *) &r);
ret = sys_wait4(pid, (unsigned int __user *) &status, options,
(struct rusage __user *) &r);
set_fs (old_fs);
if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur)))
return -EFAULT;
err = 0;
err |= put_user(status, ustatus);
err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec);
err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec);
err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec);

View File

@ -691,9 +691,9 @@ proc_types:
.word 0x41069260 @ ARM926EJ-S (v5TEJ)
.word 0xff0ffff0
b __arm926ejs_mmu_cache_on
b __armv4_mmu_cache_off
b __armv5tej_mmu_cache_flush
W(b) __arm926ejs_mmu_cache_on
W(b) __armv4_mmu_cache_off
W(b) __armv5tej_mmu_cache_flush
.word 0x00007000 @ ARM7 IDs
.word 0x0000f000

View File

@ -157,7 +157,7 @@ CONFIG_LEDS_GPIO=m
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=m
CONFIG_LEDS_TRIGGER_HEARTBEAT=m
CONFIG_RTC_CLASS=m
CONFIG_RTC_CLASS=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
CONFIG_XFS_FS=m

View File

@ -60,7 +60,7 @@ CONFIG_FB_ARMCLCD=y
# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
CONFIG_RTC_CLASS=m
CONFIG_RTC_CLASS=y
CONFIG_INOTIFY=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y

View File

@ -142,7 +142,7 @@ CONFIG_USB_GADGETFS=m
CONFIG_USB_FILE_STORAGE=m
CONFIG_USB_G_SERIAL=m
CONFIG_USB_G_PRINTER=m
CONFIG_RTC_CLASS=m
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=m
CONFIG_RTC_DRV_SA1100=m
CONFIG_EXT2_FS=m

View File

@ -73,7 +73,7 @@ CONFIG_SENSORS_MAX6650=m
# CONFIG_VGA_CONSOLE is not set
# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=m
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_SA1100=m
CONFIG_DMADEVICES=y
# CONFIG_DNOTIFY is not set

View File

@ -158,7 +158,7 @@ CONFIG_LEDS_TRIGGER_HEARTBEAT=m
CONFIG_LEDS_TRIGGER_BACKLIGHT=m
CONFIG_LEDS_TRIGGER_GPIO=m
CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
CONFIG_RTC_CLASS=m
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_ISL1208=m
CONFIG_RTC_DRV_PXA=m
CONFIG_EXT2_FS=y

View File

@ -76,6 +76,9 @@ struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
unsigned long dt_root;
const char *model;
if (!dt_phys)
return NULL;
devtree = phys_to_virt(dt_phys);
/* check device tree validity */

View File

@ -435,6 +435,10 @@ __irq_usr:
usr_entry
kuser_cmpxchg_check
#ifdef CONFIG_IRQSOFF_TRACER
bl trace_hardirqs_off
#endif
get_thread_info tsk
#ifdef CONFIG_PREEMPT
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
@ -453,7 +457,7 @@ __irq_usr:
#endif
mov why, #0
b ret_to_user
b ret_to_user_from_irq
UNWIND(.fnend )
ENDPROC(__irq_usr)

View File

@ -64,6 +64,7 @@ work_resched:
ENTRY(ret_to_user)
ret_slow_syscall:
disable_irq @ disable interrupts
ENTRY(ret_to_user_from_irq)
ldr r1, [tsk, #TI_FLAGS]
tst r1, #_TIF_WORK_MASK
bne work_pending
@ -75,6 +76,7 @@ no_work_pending:
arch_ret_to_user r1, lr
restore_user_regs fast = 0, offset = 0
ENDPROC(ret_to_user_from_irq)
ENDPROC(ret_to_user)
/*

View File

@ -139,7 +139,7 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
fs = get_fs();
set_fs(KERNEL_DS);
for (i = -4; i < 1; i++) {
for (i = -4; i < 1 + !!thumb; i++) {
unsigned int val, bad;
if (thumb)
@ -563,7 +563,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
if (!pmd_present(*pmd))
goto bad_access;
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
if (!pte_present(*pte) || !pte_dirty(*pte)) {
if (!pte_present(*pte) || !pte_write(*pte) || !pte_dirty(*pte)) {
pte_unmap_unlock(pte, ptl);
goto bad_access;
}

View File

@ -494,7 +494,7 @@ static struct platform_device da850_mcasp_device = {
.resource = da850_mcasp_resources,
};
struct platform_device davinci_pcm_device = {
static struct platform_device davinci_pcm_device = {
.name = "davinci-pcm-audio",
.id = -1,
};

View File

@ -298,7 +298,7 @@ static void davinci_init_wdt(void)
/*-------------------------------------------------------------------------*/
struct platform_device davinci_pcm_device = {
static struct platform_device davinci_pcm_device = {
.name = "davinci-pcm-audio",
.id = -1,
};

View File

@ -252,9 +252,11 @@ static struct irq_chip gpio_irqchip = {
static void
gpio_irq_handler(unsigned irq, struct irq_desc *desc)
{
struct davinci_gpio_regs __iomem *g = irq2regs(irq);
struct davinci_gpio_regs __iomem *g;
u32 mask = 0xffff;
g = (__force struct davinci_gpio_regs __iomem *) irq_desc_get_handler_data(desc);
/* we only care about one bank */
if (irq & 1)
mask <<= 16;
@ -422,8 +424,7 @@ static int __init davinci_gpio_irq_setup(void)
/* set up all irqs in this bank */
irq_set_chained_handler(bank_irq, gpio_irq_handler);
irq_set_chip_data(bank_irq, (__force void *)g);
irq_set_handler_data(bank_irq, (void *)irq);
irq_set_handler_data(bank_irq, (__force void *)g);
for (i = 0; i < 16 && gpio < ngpio; i++, irq++, gpio++) {
irq_set_chip(irq, &gpio_irqchip);

View File

@ -103,6 +103,7 @@ static void __init footbridge_timer_init(void)
clockevents_calc_mult_shift(ce, mem_fclk_21285, 5);
ce->max_delta_ns = clockevent_delta2ns(0xffffff, ce);
ce->min_delta_ns = clockevent_delta2ns(0x000004, ce);
ce->cpumask = cpumask_of(smp_processor_id());
clockevents_register_device(ce);
}

View File

@ -26,6 +26,7 @@
#include <asm/hardware/debug-8250.S>
#else
#include <mach/hardware.h>
/* For EBSA285 debugging */
.equ dc21285_high, ARMCSR_BASE & 0xff000000
.equ dc21285_low, ARMCSR_BASE & 0x00ffffff
@ -36,8 +37,8 @@
.else
mov \rp, #0
.endif
orr \rv, \rp, #0x42000000
orr \rp, \rp, #dc21285_high
orr \rv, \rp, #dc21285_high
orr \rp, \rp, #0x42000000
.endm
.macro senduart,rd,rx

View File

@ -23,6 +23,8 @@
#include <linux/io.h>
#include <asm/mach/time.h>
#include <asm/hardware/gic.h>
#include <mach/msm_iomap.h>
#include <mach/cpu.h>
@ -55,10 +57,12 @@ enum timer_location {
#if defined(CONFIG_ARCH_QSD8X50)
#define DGT_HZ (19200000 / 4) /* 19.2 MHz / 4 by default */
#define MSM_DGT_SHIFT (0)
#elif defined(CONFIG_ARCH_MSM7X30) || defined(CONFIG_ARCH_MSM8X60) || \
defined(CONFIG_ARCH_MSM8960)
#elif defined(CONFIG_ARCH_MSM7X30)
#define DGT_HZ (24576000 / 4) /* 24.576 MHz (LPXO) / 4 by default */
#define MSM_DGT_SHIFT (0)
#elif defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960)
#define DGT_HZ (27000000 / 4) /* 27 MHz (PXO) / 4 by default */
#define MSM_DGT_SHIFT (0)
#else
#define DGT_HZ 19200000 /* 19.2 MHz or 600 KHz after shift */
#define MSM_DGT_SHIFT (5)
@ -100,7 +104,11 @@ static cycle_t msm_read_timer_count(struct clocksource *cs)
{
struct msm_clock *clk = container_of(cs, struct msm_clock, clocksource);
return readl(clk->global_counter);
/*
* Shift timer count down by a constant due to unreliable lower bits
* on some targets.
*/
return readl(clk->global_counter) >> clk->shift;
}
static struct msm_clock *clockevent_to_clock(struct clock_event_device *evt)

View File

@ -16,6 +16,8 @@
#include <linux/err.h>
#include <linux/mutex.h>
#include <asm/processor.h> /* for cpu_relax() */
#include <mach/mxs.h>
#define OCOTP_WORD_OFFSET 0x20

View File

@ -4,14 +4,14 @@
# Common support
obj-y := io.o id.o sram.o time.o irq.o mux.o flash.o serial.o devices.o dma.o
obj-y += clock.o clock_data.o opp_data.o reset.o
obj-y += clock.o clock_data.o opp_data.o reset.o pm_bus.o
obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
obj-$(CONFIG_OMAP_32K_TIMER) += timer32k.o
# Power Management
obj-$(CONFIG_PM) += pm.o sleep.o pm_bus.o
obj-$(CONFIG_PM) += pm.o sleep.o
# DSP
obj-$(CONFIG_OMAP_MBOX_FWK) += mailbox_mach.o

View File

@ -56,9 +56,13 @@ static struct dev_power_domain default_power_domain = {
USE_PLATFORM_PM_SLEEP_OPS
},
};
#define OMAP1_PWR_DOMAIN (&default_power_domain)
#else
#define OMAP1_PWR_DOMAIN NULL
#endif /* CONFIG_PM_RUNTIME */
static struct pm_clk_notifier_block platform_bus_notifier = {
.pwr_domain = &default_power_domain,
.pwr_domain = OMAP1_PWR_DOMAIN,
.con_ids = { "ick", "fck", NULL, },
};
@ -72,4 +76,4 @@ static int __init omap1_pm_runtime_init(void)
return 0;
}
core_initcall(omap1_pm_runtime_init);
#endif /* CONFIG_PM_RUNTIME */

View File

@ -84,7 +84,8 @@ static struct mtd_partition omap3pandora_nand_partitions[] = {
static struct omap_nand_platform_data pandora_nand_data = {
.cs = 0,
.devsize = 1, /* '0' for 8-bit, '1' for 16-bit device */
.devsize = NAND_BUSWIDTH_16,
.xfer_type = NAND_OMAP_PREFETCH_DMA,
.parts = omap3pandora_nand_partitions,
.nr_parts = ARRAY_SIZE(omap3pandora_nand_partitions),
};

View File

@ -189,7 +189,7 @@ static struct dentry *pm_dbg_dir;
static int pm_dbg_init_done;
static int __init pm_dbg_init(void);
static int pm_dbg_init(void);
enum {
DEBUG_FILE_COUNTERS = 0,
@ -595,7 +595,7 @@ static int option_set(void *data, u64 val)
DEFINE_SIMPLE_ATTRIBUTE(pm_dbg_option_fops, option_get, option_set, "%llu\n");
static int __init pm_dbg_init(void)
static int pm_dbg_init(void)
{
int i;
struct dentry *d;

View File

@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/apm-emulation.h>

View File

@ -382,10 +382,8 @@ void ag5evm_sdhi1_set_pwr(struct platform_device *pdev, int state)
}
static struct sh_mobile_sdhi_info sh_sdhi1_platdata = {
.dma_slave_tx = SHDMA_SLAVE_SDHI1_TX,
.dma_slave_rx = SHDMA_SLAVE_SDHI1_RX,
.tmio_flags = TMIO_MMC_WRPROTECT_DISABLE,
.tmio_caps = MMC_CAP_NONREMOVABLE,
.tmio_caps = MMC_CAP_NONREMOVABLE | MMC_CAP_SDIO_IRQ,
.tmio_ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
.set_pwr = ag5evm_sdhi1_set_pwr,
};

View File

@ -126,7 +126,7 @@
* ------+--------------------+--------------------+-------
* IRQ0 | ICR1A.IRQ0SA=0010 | SDHI2 card detect | Low
* IRQ6 | ICR1A.IRQ6SA=0011 | Ether(LAN9220) | High
* IRQ7 | ICR1A.IRQ7SA=0010 | LCD Tuch Panel | Low
* IRQ7 | ICR1A.IRQ7SA=0010 | LCD Touch Panel | Low
* IRQ8 | ICR2A.IRQ8SA=0010 | MMC/SD card detect | Low
* IRQ9 | ICR2A.IRQ9SA=0010 | KEY(TCA6408) | Low
* IRQ21 | ICR4A.IRQ21SA=0011 | Sensor(ADXL345) | High
@ -165,10 +165,10 @@
* USB1 can become Host by r8a66597, and become Function by renesas_usbhs.
* But don't select both drivers in same time.
* These uses same IRQ number for request_irq(), and aren't supporting
* IRQF_SHARD / IORESOURCE_IRQ_SHAREABLE.
* IRQF_SHARED / IORESOURCE_IRQ_SHAREABLE.
*
* Actually these are old/new version of USB driver.
* This mean its register will be broken if it supports SHARD IRQ,
* This mean its register will be broken if it supports shared IRQ,
*/
/*
@ -562,7 +562,121 @@ out:
clk_put(hdmi_ick);
}
/* USB1 (Host) */
/* USBHS0 is connected to CN22 which takes a USB Mini-B plug
*
* The sh7372 SoC has IRQ7 set aside for USBHS0 hotplug,
* but on this particular board IRQ7 is already used by
* the touch screen. This leaves us with software polling.
*/
#define USBHS0_POLL_INTERVAL (HZ * 5)
struct usbhs_private {
unsigned int usbphyaddr;
unsigned int usbcrcaddr;
struct renesas_usbhs_platform_info info;
struct delayed_work work;
struct platform_device *pdev;
};
#define usbhs_get_priv(pdev) \
container_of(renesas_usbhs_get_info(pdev), \
struct usbhs_private, info)
#define usbhs_is_connected(priv) \
(!((1 << 7) & __raw_readw(priv->usbcrcaddr)))
static int usbhs_get_vbus(struct platform_device *pdev)
{
return usbhs_is_connected(usbhs_get_priv(pdev));
}
static void usbhs_phy_reset(struct platform_device *pdev)
{
struct usbhs_private *priv = usbhs_get_priv(pdev);
/* init phy */
__raw_writew(0x8a0a, priv->usbcrcaddr);
}
static int usbhs0_get_id(struct platform_device *pdev)
{
return USBHS_GADGET;
}
static void usbhs0_work_function(struct work_struct *work)
{
struct usbhs_private *priv = container_of(work, struct usbhs_private,
work.work);
renesas_usbhs_call_notify_hotplug(priv->pdev);
schedule_delayed_work(&priv->work, USBHS0_POLL_INTERVAL);
}
static int usbhs0_hardware_init(struct platform_device *pdev)
{
struct usbhs_private *priv = usbhs_get_priv(pdev);
priv->pdev = pdev;
INIT_DELAYED_WORK(&priv->work, usbhs0_work_function);
schedule_delayed_work(&priv->work, USBHS0_POLL_INTERVAL);
return 0;
}
static void usbhs0_hardware_exit(struct platform_device *pdev)
{
struct usbhs_private *priv = usbhs_get_priv(pdev);
cancel_delayed_work_sync(&priv->work);
}
static struct usbhs_private usbhs0_private = {
.usbcrcaddr = 0xe605810c, /* USBCR2 */
.info = {
.platform_callback = {
.hardware_init = usbhs0_hardware_init,
.hardware_exit = usbhs0_hardware_exit,
.phy_reset = usbhs_phy_reset,
.get_id = usbhs0_get_id,
.get_vbus = usbhs_get_vbus,
},
.driver_param = {
.buswait_bwait = 4,
},
},
};
static struct resource usbhs0_resources[] = {
[0] = {
.name = "USBHS0",
.start = 0xe6890000,
.end = 0xe68900e6 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = evt2irq(0x1ca0) /* USB0_USB0I0 */,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device usbhs0_device = {
.name = "renesas_usbhs",
.id = 0,
.dev = {
.platform_data = &usbhs0_private.info,
},
.num_resources = ARRAY_SIZE(usbhs0_resources),
.resource = usbhs0_resources,
};
/* USBHS1 is connected to CN31 which takes a USB Mini-AB plug
*
* Use J30 to select between Host and Function. This setting
* can however not be detected by software. Hotplug of USBHS1
* is provided via IRQ8.
*/
#define IRQ8 evt2irq(0x0300)
/* USBHS1 USB Host support via r8a66597_hcd */
static void usb1_host_port_power(int port, int power)
{
if (!power) /* only power-on is supported for now */
@ -579,9 +693,9 @@ static struct r8a66597_platdata usb1_host_data = {
static struct resource usb1_host_resources[] = {
[0] = {
.name = "USBHS",
.start = 0xE68B0000,
.end = 0xE68B00E6 - 1,
.name = "USBHS1",
.start = 0xe68b0000,
.end = 0xe68b00e6 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@ -602,37 +716,14 @@ static struct platform_device usb1_host_device = {
.resource = usb1_host_resources,
};
/* USB1 (Function) */
/* USBHS1 USB Function support via renesas_usbhs */
#define USB_PHY_MODE (1 << 4)
#define USB_PHY_INT_EN ((1 << 3) | (1 << 2))
#define USB_PHY_ON (1 << 1)
#define USB_PHY_OFF (1 << 0)
#define USB_PHY_INT_CLR (USB_PHY_ON | USB_PHY_OFF)
struct usbhs_private {
unsigned int irq;
unsigned int usbphyaddr;
unsigned int usbcrcaddr;
struct renesas_usbhs_platform_info info;
};
#define usbhs_get_priv(pdev) \
container_of(renesas_usbhs_get_info(pdev), \
struct usbhs_private, info)
#define usbhs_is_connected(priv) \
(!((1 << 7) & __raw_readw(priv->usbcrcaddr)))
static int usbhs1_get_id(struct platform_device *pdev)
{
return USBHS_GADGET;
}
static int usbhs1_get_vbus(struct platform_device *pdev)
{
return usbhs_is_connected(usbhs_get_priv(pdev));
}
static irqreturn_t usbhs1_interrupt(int irq, void *data)
{
struct platform_device *pdev = data;
@ -654,12 +745,10 @@ static int usbhs1_hardware_init(struct platform_device *pdev)
struct usbhs_private *priv = usbhs_get_priv(pdev);
int ret;
irq_set_irq_type(priv->irq, IRQ_TYPE_LEVEL_HIGH);
/* clear interrupt status */
__raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->usbphyaddr);
ret = request_irq(priv->irq, usbhs1_interrupt, 0,
ret = request_irq(IRQ8, usbhs1_interrupt, IRQF_TRIGGER_HIGH,
dev_name(&pdev->dev), pdev);
if (ret) {
dev_err(&pdev->dev, "request_irq err\n");
@ -679,15 +768,12 @@ static void usbhs1_hardware_exit(struct platform_device *pdev)
/* clear interrupt status */
__raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->usbphyaddr);
free_irq(priv->irq, pdev);
free_irq(IRQ8, pdev);
}
static void usbhs1_phy_reset(struct platform_device *pdev)
static int usbhs1_get_id(struct platform_device *pdev)
{
struct usbhs_private *priv = usbhs_get_priv(pdev);
/* init phy */
__raw_writew(0x8a0a, priv->usbcrcaddr);
return USBHS_GADGET;
}
static u32 usbhs1_pipe_cfg[] = {
@ -710,16 +796,15 @@ static u32 usbhs1_pipe_cfg[] = {
};
static struct usbhs_private usbhs1_private = {
.irq = evt2irq(0x0300), /* IRQ8 */
.usbphyaddr = 0xE60581E2, /* USBPHY1INTAP */
.usbcrcaddr = 0xE6058130, /* USBCR4 */
.usbphyaddr = 0xe60581e2, /* USBPHY1INTAP */
.usbcrcaddr = 0xe6058130, /* USBCR4 */
.info = {
.platform_callback = {
.hardware_init = usbhs1_hardware_init,
.hardware_exit = usbhs1_hardware_exit,
.phy_reset = usbhs1_phy_reset,
.get_id = usbhs1_get_id,
.get_vbus = usbhs1_get_vbus,
.phy_reset = usbhs_phy_reset,
.get_vbus = usbhs_get_vbus,
},
.driver_param = {
.buswait_bwait = 4,
@ -731,9 +816,9 @@ static struct usbhs_private usbhs1_private = {
static struct resource usbhs1_resources[] = {
[0] = {
.name = "USBHS",
.start = 0xE68B0000,
.end = 0xE68B00E6 - 1,
.name = "USBHS1",
.start = 0xe68b0000,
.end = 0xe68b00e6 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@ -752,7 +837,6 @@ static struct platform_device usbhs1_device = {
.resource = usbhs1_resources,
};
/* LED */
static struct gpio_led mackerel_leds[] = {
{
@ -1203,6 +1287,7 @@ static struct platform_device *mackerel_devices[] __initdata = {
&nor_flash_device,
&smc911x_device,
&lcdc_device,
&usbhs0_device,
&usb1_host_device,
&usbhs1_device,
&leds_device,
@ -1301,6 +1386,7 @@ static void __init mackerel_map_io(void)
#define GPIO_PORT9CR 0xE6051009
#define GPIO_PORT10CR 0xE605100A
#define GPIO_PORT167CR 0xE60520A7
#define GPIO_PORT168CR 0xE60520A8
#define SRCR4 0xe61580bc
#define USCCR1 0xE6058144
@ -1354,17 +1440,17 @@ static void __init mackerel_init(void)
gpio_request(GPIO_PORT151, NULL); /* LCDDON */
gpio_direction_output(GPIO_PORT151, 1);
/* USB enable */
gpio_request(GPIO_FN_VBUS0_1, NULL);
gpio_request(GPIO_FN_IDIN_1_18, NULL);
gpio_request(GPIO_FN_PWEN_1_115, NULL);
gpio_request(GPIO_FN_OVCN_1_114, NULL);
gpio_request(GPIO_FN_EXTLP_1, NULL);
gpio_request(GPIO_FN_OVCN2_1, NULL);
gpio_pull_down(GPIO_PORT168CR);
/* USBHS0 */
gpio_request(GPIO_FN_VBUS0_0, NULL);
gpio_pull_down(GPIO_PORT168CR); /* VBUS0_0 pull down */
/* setup USB phy */
__raw_writew(0x8a0a, 0xE6058130); /* USBCR4 */
/* USBHS1 */
gpio_request(GPIO_FN_VBUS0_1, NULL);
gpio_pull_down(GPIO_PORT167CR); /* VBUS0_1 pull down */
gpio_request(GPIO_FN_IDIN_1_113, NULL);
/* USB phy tweak to make the r8a66597_hcd host driver work */
__raw_writew(0x8a0a, 0xe6058130); /* USBCR4 */
/* enable FSI2 port A (ak4643) */
gpio_request(GPIO_FN_FSIAIBT, NULL);

View File

@ -250,6 +250,11 @@ static irqreturn_t sh73a0_intcs_demux(int irq, void *dev_id)
return IRQ_HANDLED;
}
static int sh73a0_set_wake(struct irq_data *data, unsigned int on)
{
return 0; /* always allow wakeup */
}
void __init sh73a0_init_irq(void)
{
void __iomem *gic_dist_base = __io(0xf0001000);
@ -257,6 +262,7 @@ void __init sh73a0_init_irq(void)
void __iomem *intevtsa = ioremap_nocache(0xffd20100, PAGE_SIZE);
gic_init(0, 29, gic_dist_base, gic_cpu_base);
gic_arch_extn.irq_set_wake = sh73a0_set_wake;
register_intc_controller(&intcs_desc);

View File

@ -38,7 +38,7 @@ static struct plat_sci_port scif0_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE,
.scbrr_algo_id = SCBRR_ALGO_4,
.type = PORT_SCIF,
.type = PORT_SCIFA,
.irqs = { evt2irq(0xc00), evt2irq(0xc00),
evt2irq(0xc00), evt2irq(0xc00) },
};
@ -57,7 +57,7 @@ static struct plat_sci_port scif1_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE,
.scbrr_algo_id = SCBRR_ALGO_4,
.type = PORT_SCIF,
.type = PORT_SCIFA,
.irqs = { evt2irq(0xc20), evt2irq(0xc20),
evt2irq(0xc20), evt2irq(0xc20) },
};
@ -76,7 +76,7 @@ static struct plat_sci_port scif2_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE,
.scbrr_algo_id = SCBRR_ALGO_4,
.type = PORT_SCIF,
.type = PORT_SCIFA,
.irqs = { evt2irq(0xc40), evt2irq(0xc40),
evt2irq(0xc40), evt2irq(0xc40) },
};
@ -95,7 +95,7 @@ static struct plat_sci_port scif3_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE,
.scbrr_algo_id = SCBRR_ALGO_4,
.type = PORT_SCIF,
.type = PORT_SCIFA,
.irqs = { evt2irq(0xc60), evt2irq(0xc60),
evt2irq(0xc60), evt2irq(0xc60) },
};
@ -114,7 +114,7 @@ static struct plat_sci_port scif4_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE,
.scbrr_algo_id = SCBRR_ALGO_4,
.type = PORT_SCIF,
.type = PORT_SCIFA,
.irqs = { evt2irq(0xd20), evt2irq(0xd20),
evt2irq(0xd20), evt2irq(0xd20) },
};
@ -133,7 +133,7 @@ static struct plat_sci_port scif5_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE,
.scbrr_algo_id = SCBRR_ALGO_4,
.type = PORT_SCIF,
.type = PORT_SCIFA,
.irqs = { evt2irq(0xd40), evt2irq(0xd40),
evt2irq(0xd40), evt2irq(0xd40) },
};
@ -152,7 +152,7 @@ static struct plat_sci_port scif6_platform_data = {
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE,
.scbrr_algo_id = SCBRR_ALGO_4,
.type = PORT_SCIF,
.type = PORT_SCIFB,
.irqs = { evt2irq(0xd60), evt2irq(0xd60),
evt2irq(0xd60), evt2irq(0xd60) },
};

View File

@ -31,7 +31,7 @@ struct clk {
bool reset;
__u16 clk_val;
__s8 usecount;
__u32 res_reg;
void __iomem * res_reg;
__u16 res_mask;
bool hw_ctrld;

View File

@ -18,6 +18,12 @@
* the defines are used for setting up the I/O memory mapping.
*/
#ifdef __ASSEMBLER__
#define IOMEM(a) (a)
#else
#define IOMEM(a) (void __iomem *) a
#endif
/* NAND Flash CS0 */
#define U300_NAND_CS0_PHYS_BASE 0x80000000
@ -47,13 +53,6 @@
#define U300_SEMI_CONFIG_BASE 0x30000000
#endif
/*
* All the following peripherals are specified at their PHYSICAL address,
* so if you need to access them (in the kernel), you MUST use the macros
* defined in <asm/io.h> to map to the IO_ADDRESS_AHB() IO_ADDRESS_FAST()
* etc.
*/
/*
* AHB peripherals
*/
@ -63,11 +62,11 @@
/* Vectored Interrupt Controller 0, servicing 32 interrupts */
#define U300_INTCON0_BASE (U300_AHB_PER_PHYS_BASE+0x1000)
#define U300_INTCON0_VBASE (U300_AHB_PER_VIRT_BASE+0x1000)
#define U300_INTCON0_VBASE IOMEM(U300_AHB_PER_VIRT_BASE+0x1000)
/* Vectored Interrupt Controller 1, servicing 32 interrupts */
#define U300_INTCON1_BASE (U300_AHB_PER_PHYS_BASE+0x2000)
#define U300_INTCON1_VBASE (U300_AHB_PER_VIRT_BASE+0x2000)
#define U300_INTCON1_VBASE IOMEM(U300_AHB_PER_VIRT_BASE+0x2000)
/* Memory Stick Pro (MSPRO) controller */
#define U300_MSPRO_BASE (U300_AHB_PER_PHYS_BASE+0x3000)
@ -115,7 +114,7 @@
/* SYSCON */
#define U300_SYSCON_BASE (U300_SLOW_PER_PHYS_BASE+0x1000)
#define U300_SYSCON_VBASE (U300_SLOW_PER_VIRT_BASE+0x1000)
#define U300_SYSCON_VBASE IOMEM(U300_SLOW_PER_VIRT_BASE+0x1000)
/* Watchdog */
#define U300_WDOG_BASE (U300_SLOW_PER_PHYS_BASE+0x2000)
@ -125,7 +124,7 @@
/* APP side special timer */
#define U300_TIMER_APP_BASE (U300_SLOW_PER_PHYS_BASE+0x4000)
#define U300_TIMER_APP_VBASE (U300_SLOW_PER_VIRT_BASE+0x4000)
#define U300_TIMER_APP_VBASE IOMEM(U300_SLOW_PER_VIRT_BASE+0x4000)
/* Keypad */
#define U300_KEYPAD_BASE (U300_SLOW_PER_PHYS_BASE+0x5000)
@ -181,5 +180,4 @@
* Virtual accessor macros for static devices
*/
#endif

View File

@ -411,8 +411,7 @@ static void __init u300_timer_init(void)
/* Use general purpose timer 2 as clock source */
if (clocksource_mmio_init(U300_TIMER_APP_VBASE + U300_TIMER_APP_GPT2CC,
"GPT2", rate, 300, 32, clocksource_mmio_readl_up))
printk(KERN_ERR "timer: failed to initialize clock "
"source %s\n", clocksource_u300_1mhz.name);
pr_err("timer: failed to initialize U300 clock source\n");
clockevents_calc_mult_shift(&clockevent_u300_1mhz,
rate, APPTIMER_MIN_RANGE);

View File

@ -46,12 +46,6 @@ static struct map_desc v2m_io_desc[] __initdata = {
},
};
static void __init v2m_init_early(void)
{
ct_desc->init_early();
versatile_sched_clock_init(MMIO_P2V(V2M_SYS_24MHZ), 24000000);
}
static void __init v2m_timer_init(void)
{
u32 scctrl;
@ -365,6 +359,13 @@ static struct clk_lookup v2m_lookups[] = {
},
};
static void __init v2m_init_early(void)
{
ct_desc->init_early();
clkdev_add_table(v2m_lookups, ARRAY_SIZE(v2m_lookups));
versatile_sched_clock_init(MMIO_P2V(V2M_SYS_24MHZ), 24000000);
}
static void v2m_power_off(void)
{
if (v2m_cfg_write(SYS_CFG_SHUTDOWN | SYS_CFG_SITE_MB, 0))
@ -418,8 +419,6 @@ static void __init v2m_init(void)
{
int i;
clkdev_add_table(v2m_lookups, ARRAY_SIZE(v2m_lookups));
platform_device_register(&v2m_pcie_i2c_device);
platform_device_register(&v2m_ddc_i2c_device);
platform_device_register(&v2m_flash_device);

View File

@ -24,7 +24,9 @@ DEFINE_PER_CPU(struct mm_struct *, current_mm);
/*
* We fork()ed a process, and we need a new context for the child
* to run in.
* to run in. We reserve version 0 for initial tasks so we will
* always allocate an ASID. The ASID 0 is reserved for the TTBR
* register changing sequence.
*/
void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
@ -34,11 +36,8 @@ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
static void flush_context(void)
{
u32 ttb;
/* Copy TTBR1 into TTBR0 */
asm volatile("mrc p15, 0, %0, c2, c0, 1\n"
"mcr p15, 0, %0, c2, c0, 0"
: "=r" (ttb));
/* set the reserved ASID before flushing the TLB */
asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (0));
isb();
local_flush_tlb_all();
if (icache_is_vivt_asid_tagged()) {
@ -94,7 +93,7 @@ static void reset_context(void *info)
return;
smp_rmb();
asid = cpu_last_asid + cpu;
asid = cpu_last_asid + cpu + 1;
flush_context();
set_mm_context(mm, asid);
@ -144,13 +143,13 @@ void __new_context(struct mm_struct *mm)
* to start a new version and flush the TLB.
*/
if (unlikely((asid & ~ASID_MASK) == 0)) {
asid = cpu_last_asid + smp_processor_id();
asid = cpu_last_asid + smp_processor_id() + 1;
flush_context();
#ifdef CONFIG_SMP
smp_wmb();
smp_call_function(reset_context, NULL, 1);
#endif
cpu_last_asid += NR_CPUS - 1;
cpu_last_asid += NR_CPUS;
}
set_mm_context(mm, asid);

View File

@ -330,6 +330,12 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
memblock_reserve(__pa(_stext), _end - _stext);
#endif
#ifdef CONFIG_BLK_DEV_INITRD
if (phys_initrd_size &&
!memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
pr_err("INITRD: 0x%08lx+0x%08lx is not a memory region - disabling initrd\n",
phys_initrd_start, phys_initrd_size);
phys_initrd_start = phys_initrd_size = 0;
}
if (phys_initrd_size &&
memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n",
@ -635,7 +641,8 @@ void __init mem_init(void)
" modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
" .init : 0x%p" " - 0x%p" " (%4d kB)\n"
" .text : 0x%p" " - 0x%p" " (%4d kB)\n"
" .data : 0x%p" " - 0x%p" " (%4d kB)\n",
" .data : 0x%p" " - 0x%p" " (%4d kB)\n"
" .bss : 0x%p" " - 0x%p" " (%4d kB)\n",
MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
(PAGE_SIZE)),
@ -657,7 +664,8 @@ void __init mem_init(void)
MLK_ROUNDUP(__init_begin, __init_end),
MLK_ROUNDUP(_text, _etext),
MLK_ROUNDUP(_sdata, _edata));
MLK_ROUNDUP(_sdata, _edata),
MLK_ROUNDUP(__bss_start, __bss_stop));
#undef MLK
#undef MLM

View File

@ -146,7 +146,7 @@ __arm7tdmi_proc_info:
.long 0
.long 0
.long v4_cache_fns
.size __arm7tdmi_proc_info, . - __arm7dmi_proc_info
.size __arm7tdmi_proc_info, . - __arm7tdmi_proc_info
.type __triscenda7_proc_info, #object
__triscenda7_proc_info:

View File

@ -116,7 +116,7 @@ __arm9tdmi_proc_info:
.long 0
.long 0
.long v4_cache_fns
.size __arm9tdmi_proc_info, . - __arm9dmi_proc_info
.size __arm9tdmi_proc_info, . - __arm9tdmi_proc_info
.type __p2001_proc_info, #object
__p2001_proc_info:

View File

@ -108,16 +108,18 @@ ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_ARM_ERRATA_430973
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
#endif
mrc p15, 0, r2, c2, c0, 1 @ load TTB 1
mcr p15, 0, r2, c2, c0, 0 @ into TTB 0
#ifdef CONFIG_ARM_ERRATA_754322
dsb
#endif
mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID
isb
1: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
isb
#ifdef CONFIG_ARM_ERRATA_754322
dsb
#endif
mcr p15, 0, r1, c13, c0, 1 @ set context ID
isb
mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
isb
#endif
mov pc, lr
ENDPROC(cpu_v7_switch_mm)

View File

@ -139,7 +139,7 @@ static struct sdma_script_start_addrs addr_imx35_to2 = {
#endif
#ifdef CONFIG_SOC_IMX51
static struct sdma_script_start_addrs addr_imx51_to1 = {
static struct sdma_script_start_addrs addr_imx51 = {
.ap_2_ap_addr = 642,
.uart_2_mcu_addr = 817,
.mcu_2_app_addr = 747,
@ -196,7 +196,9 @@ static int __init imxXX_add_imx_dma(void)
#if defined(CONFIG_SOC_IMX51)
if (cpu_is_mx51()) {
imx51_imx_sdma_data.pdata.script_addrs = &addr_imx51_to1;
int to_version = mx51_revision() >> 4;
imx51_imx_sdma_data.pdata.to_version = to_version;
imx51_imx_sdma_data.pdata.script_addrs = &addr_imx51;
ret = imx_add_imx_sdma(&imx51_imx_sdma_data);
} else
#endif

View File

@ -84,6 +84,7 @@
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/pm_runtime.h>
#include <plat/omap_device.h>
#include <plat/omap_hwmod.h>
@ -539,20 +540,34 @@ int omap_early_device_register(struct omap_device *od)
static int _od_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
int ret;
return omap_device_idle(pdev);
ret = pm_generic_runtime_suspend(dev);
if (!ret)
omap_device_idle(pdev);
return ret;
}
static int _od_runtime_idle(struct device *dev)
{
return pm_generic_runtime_idle(dev);
}
static int _od_runtime_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
return omap_device_enable(pdev);
omap_device_enable(pdev);
return pm_generic_runtime_resume(dev);
}
static struct dev_power_domain omap_device_power_domain = {
.ops = {
.runtime_suspend = _od_runtime_suspend,
.runtime_idle = _od_runtime_idle,
.runtime_resume = _od_runtime_resume,
USE_PLATFORM_PM_SLEEP_OPS
}

View File

@ -110,7 +110,7 @@ CONFIG_LEDS_GPIO=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_RTC_CLASS=m
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_S35390A=m
CONFIG_RTC_DRV_AT32AP700X=m
CONFIG_DMADEVICES=y

View File

@ -112,7 +112,7 @@ CONFIG_USB_G_SERIAL=m
CONFIG_USB_G_PRINTER=m
CONFIG_MMC=m
CONFIG_SDH_BFIN=m
CONFIG_RTC_CLASS=m
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_BFIN=m
CONFIG_EXT2_FS=m
# CONFIG_DNOTIFY is not set

View File

@ -678,7 +678,7 @@ CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
CONFIG_RTC_CLASS=m
CONFIG_RTC_CLASS=y
CONFIG_RTC_INTF_DEV_UIE_EMUL=y
CONFIG_RTC_DRV_TEST=m
CONFIG_RTC_DRV_DS1307=m

View File

@ -15,6 +15,7 @@
* User space memory access functions
*/
#include <linux/thread_info.h>
#include <linux/kernel.h>
#include <asm/page.h>
#include <asm/errno.h>

View File

@ -85,7 +85,7 @@ CONFIG_USB_OHCI_HCD=m
CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
# CONFIG_USB_OHCI_HCD_PCI is not set
CONFIG_USB_STORAGE=m
CONFIG_RTC_CLASS=m
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_PCF8563=m
CONFIG_EXT2_FS=m
CONFIG_EXT3_FS=m

View File

@ -141,7 +141,7 @@ CONFIG_USB_EHCI_TT_NEWSCHED=y
# CONFIG_USB_EHCI_HCD_PPC_OF is not set
CONFIG_USB_OHCI_HCD=m
CONFIG_USB_STORAGE=m
CONFIG_RTC_CLASS=m
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_PS3=m
CONFIG_EXT2_FS=m
CONFIG_EXT3_FS=m

View File

@ -20,6 +20,7 @@
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/usb/r8a66597.h>
#include <linux/usb/renesas_usbhs.h>
#include <linux/i2c.h>
#include <linux/i2c/tsc2007.h>
#include <linux/spi/spi.h>
@ -232,6 +233,52 @@ static struct platform_device usb1_common_device = {
.resource = usb1_common_resources,
};
/*
* USBHS
*/
static int usbhs_get_id(struct platform_device *pdev)
{
return gpio_get_value(GPIO_PTB3);
}
static struct renesas_usbhs_platform_info usbhs_info = {
.platform_callback = {
.get_id = usbhs_get_id,
},
.driver_param = {
.buswait_bwait = 4,
.detection_delay = 5,
},
};
static struct resource usbhs_resources[] = {
[0] = {
.start = 0xa4d90000,
.end = 0xa4d90124 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 66,
.end = 66,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device usbhs_device = {
.name = "renesas_usbhs",
.id = 1,
.dev = {
.dma_mask = NULL, /* not use dma */
.coherent_dma_mask = 0xffffffff,
.platform_data = &usbhs_info,
},
.num_resources = ARRAY_SIZE(usbhs_resources),
.resource = usbhs_resources,
.archdata = {
.hwblk_id = HWBLK_USB1,
},
};
/* LCDC */
const static struct fb_videomode ecovec_lcd_modes[] = {
{
@ -897,6 +944,7 @@ static struct platform_device *ecovec_devices[] __initdata = {
&sh_eth_device,
&usb0_host_device,
&usb1_common_device,
&usbhs_device,
&lcdc_device,
&ceu0_device,
&ceu1_device,

View File

@ -27,8 +27,6 @@ IMAGE_OFFSET := $(shell /bin/bash -c 'printf "0x%08x" \
$(CONFIG_BOOT_LINK_OFFSET)]')
endif
LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
ifeq ($(CONFIG_MCOUNT),y)
ORIG_CFLAGS := $(KBUILD_CFLAGS)
KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
@ -37,7 +35,25 @@ endif
LDFLAGS_vmlinux := --oformat $(ld-bfd) -Ttext $(IMAGE_OFFSET) -e startup \
-T $(obj)/../../kernel/vmlinux.lds
$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o $(LIBGCC) FORCE
#
# Pull in the necessary libgcc bits from the in-kernel implementation.
#
lib1funcs-$(CONFIG_SUPERH32) := ashiftrt.S ashldi3.c ashrsi3.S ashlsi3.S \
lshrsi3.S
lib1funcs-obj := \
$(addsuffix .o, $(basename $(addprefix $(obj)/, $(lib1funcs-y))))
lib1funcs-dir := $(srctree)/arch/$(SRCARCH)/lib
ifeq ($(BITS),64)
lib1funcs-dir := $(addsuffix $(BITS), $(lib1funcs-dir))
endif
KBUILD_CFLAGS += -I$(lib1funcs-dir)
$(addprefix $(obj)/,$(lib1funcs-y)): $(obj)/%: $(lib1funcs-dir)/% FORCE
$(call cmd,shipped)
$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o $(lib1funcs-obj) FORCE
$(call if_changed,ld)
@:

View File

@ -227,7 +227,7 @@ CONFIG_USB_SERIAL=m
CONFIG_USB_SERIAL_GENERIC=y
CONFIG_USB_SERIAL_ARK3116=m
CONFIG_USB_SERIAL_PL2303=m
CONFIG_RTC_CLASS=m
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_SH=m
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y

View File

@ -15,8 +15,9 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
" mov.l %2, @%1 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (retval),
"+r" (m)
: "r" (val)
"+r" (m),
"+r" (val) /* inhibit r15 overloading */
:
: "memory", "r0", "r1");
return retval;
@ -36,8 +37,9 @@ static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
" mov.b %2, @%1 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (retval),
"+r" (m)
: "r" (val)
"+r" (m),
"+r" (val) /* inhibit r15 overloading */
:
: "memory" , "r0", "r1");
return retval;
@ -54,13 +56,14 @@ static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
" nop \n\t"
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-8, r15 \n\t" /* LOGIN */
" mov.l @%1, %0 \n\t" /* load old value */
" cmp/eq %0, %2 \n\t"
" mov.l @%3, %0 \n\t" /* load old value */
" cmp/eq %0, %1 \n\t"
" bf 1f \n\t" /* if not equal */
" mov.l %3, @%1 \n\t" /* store new value */
" mov.l %2, @%3 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (retval)
: "r" (m), "r" (old), "r" (new)
: "=&r" (retval),
"+r" (old), "+r" (new) /* old or new can be r15 */
: "r" (m)
: "memory" , "r0", "r1", "t");
return retval;

View File

@ -150,7 +150,6 @@ struct thread_struct {
#define SR_USER (SR_MMU | SR_FD)
#define start_thread(_regs, new_pc, new_sp) \
set_fs(USER_DS); \
_regs->sr = SR_USER; /* User mode. */ \
_regs->pc = new_pc - 4; /* Compensate syscall exit */ \
_regs->pc |= 1; /* Set SHmedia ! */ \

View File

@ -298,6 +298,14 @@ enum {
SHDMA_SLAVE_SCIF4_RX,
SHDMA_SLAVE_SCIF5_TX,
SHDMA_SLAVE_SCIF5_RX,
SHDMA_SLAVE_USB0D0_TX,
SHDMA_SLAVE_USB0D0_RX,
SHDMA_SLAVE_USB0D1_TX,
SHDMA_SLAVE_USB0D1_RX,
SHDMA_SLAVE_USB1D0_TX,
SHDMA_SLAVE_USB1D0_RX,
SHDMA_SLAVE_USB1D1_TX,
SHDMA_SLAVE_USB1D1_RX,
SHDMA_SLAVE_SDHI0_TX,
SHDMA_SLAVE_SDHI0_RX,
SHDMA_SLAVE_SDHI1_TX,

View File

@ -92,6 +92,46 @@ static const struct sh_dmae_slave_config sh7724_dmae_slaves[] = {
.addr = 0xa4e50024,
.chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x36,
}, {
.slave_id = SHDMA_SLAVE_USB0D0_TX,
.addr = 0xA4D80100,
.chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0x73,
}, {
.slave_id = SHDMA_SLAVE_USB0D0_RX,
.addr = 0xA4D80100,
.chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0x73,
}, {
.slave_id = SHDMA_SLAVE_USB0D1_TX,
.addr = 0xA4D80120,
.chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0x77,
}, {
.slave_id = SHDMA_SLAVE_USB0D1_RX,
.addr = 0xA4D80120,
.chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0x77,
}, {
.slave_id = SHDMA_SLAVE_USB1D0_TX,
.addr = 0xA4D90100,
.chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0xab,
}, {
.slave_id = SHDMA_SLAVE_USB1D0_RX,
.addr = 0xA4D90100,
.chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0xab,
}, {
.slave_id = SHDMA_SLAVE_USB1D1_TX,
.addr = 0xA4D90120,
.chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0xaf,
}, {
.slave_id = SHDMA_SLAVE_USB1D1_RX,
.addr = 0xA4D90120,
.chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
.mid_rid = 0xaf,
}, {
.slave_id = SHDMA_SLAVE_SDHI0_TX,
.addr = 0x04ce0030,

View File

@ -102,8 +102,6 @@ EXPORT_SYMBOL(kernel_thread);
void start_thread(struct pt_regs *regs, unsigned long new_pc,
unsigned long new_sp)
{
set_fs(USER_DS);
regs->pr = 0;
regs->sr = SR_FD;
regs->pc = new_pc;

View File

@ -26,9 +26,9 @@ static int cache_seq_show(struct seq_file *file, void *iter)
{
unsigned int cache_type = (unsigned int)file->private;
struct cache_info *cache;
unsigned int waysize, way, cache_size;
unsigned long ccr, base;
static unsigned long addrstart = 0;
unsigned int waysize, way;
unsigned long ccr;
unsigned long addrstart = 0;
/*
* Go uncached immediately so we don't skew the results any
@ -45,28 +45,13 @@ static int cache_seq_show(struct seq_file *file, void *iter)
}
if (cache_type == CACHE_TYPE_DCACHE) {
base = CACHE_OC_ADDRESS_ARRAY;
addrstart = CACHE_OC_ADDRESS_ARRAY;
cache = &current_cpu_data.dcache;
} else {
base = CACHE_IC_ADDRESS_ARRAY;
addrstart = CACHE_IC_ADDRESS_ARRAY;
cache = &current_cpu_data.icache;
}
/*
* Due to the amount of data written out (depending on the cache size),
* we may be iterated over multiple times. In this case, keep track of
* the entry position in addrstart, and rewind it when we've hit the
* end of the cache.
*
* Likewise, the same code is used for multiple caches, so care must
* be taken for bouncing addrstart back and forth so the appropriate
* cache is hit.
*/
cache_size = cache->ways * cache->sets * cache->linesz;
if (((addrstart & 0xff000000) != base) ||
(addrstart & 0x00ffffff) > cache_size)
addrstart = base;
waysize = cache->sets;
/*

View File

@ -4,7 +4,6 @@
#define ARCH_DISCARD_MEMBLOCK
u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align);
void memblock_x86_to_bootmem(u64 start, u64 end);
void memblock_x86_reserve_range(u64 start, u64 end, char *name);
void memblock_x86_free_range(u64 start, u64 end);
@ -19,5 +18,6 @@ u64 memblock_x86_hole_size(u64 start, u64 end);
u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align);
u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit);
u64 memblock_x86_memory_in_range(u64 addr, u64 limit);
bool memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align);
#endif

View File

@ -22,6 +22,8 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
u64 product;
#ifdef __i386__
u32 tmp1, tmp2;
#else
ulong tmp;
#endif
if (shift < 0)
@ -42,8 +44,11 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
: "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
#elif defined(__x86_64__)
__asm__ (
"mul %%rdx ; shrd $32,%%rdx,%%rax"
: "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
"mul %[mul_frac] ; shrd $32, %[hi], %[lo]"
: [lo]"=a"(product),
[hi]"=d"(tmp)
: "0"(delta),
[mul_frac]"rm"((u64)mul_frac));
#else
#error implement me!
#endif

View File

@ -565,7 +565,7 @@ gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn)
{
return gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true);
return !gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true);
}
static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)

View File

@ -121,7 +121,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
gva_t addr, u32 access)
{
pt_element_t pte;
pt_element_t __user *ptep_user;
pt_element_t __user *uninitialized_var(ptep_user);
gfn_t table_gfn;
unsigned index, pt_access, uninitialized_var(pte_access);
gpa_t pte_gpa;

View File

@ -2047,7 +2047,8 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
unsigned long cr0,
struct kvm_vcpu *vcpu)
{
vmx_decache_cr3(vcpu);
if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
vmx_decache_cr3(vcpu);
if (!(cr0 & X86_CR0_PG)) {
/* From paging/starting to nonpaging */
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,

View File

@ -8,7 +8,7 @@
#include <linux/range.h>
/* Check for already reserved areas */
static bool __init check_with_memblock_reserved_size(u64 *addrp, u64 *sizep, u64 align)
bool __init memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align)
{
struct memblock_region *r;
u64 addr = *addrp, last;
@ -59,7 +59,7 @@ u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align)
if (addr >= ei_last)
continue;
*sizep = ei_last - addr;
while (check_with_memblock_reserved_size(&addr, sizep, align))
while (memblock_x86_check_reserved_size(&addr, sizep, align))
;
if (*sizep)

View File

@ -310,14 +310,31 @@ void __init efi_reserve_boot_services(void)
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
efi_memory_desc_t *md = p;
unsigned long long start = md->phys_addr;
unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
u64 start = md->phys_addr;
u64 size = md->num_pages << EFI_PAGE_SHIFT;
if (md->type != EFI_BOOT_SERVICES_CODE &&
md->type != EFI_BOOT_SERVICES_DATA)
continue;
memblock_x86_reserve_range(start, start + size, "EFI Boot");
/* Only reserve where possible:
* - Not within any already allocated areas
* - Not over any memory area (really needed, if above?)
* - Not within any part of the kernel
* - Not the bios reserved area
*/
if ((start+size >= virt_to_phys(_text)
&& start <= virt_to_phys(_end)) ||
!e820_all_mapped(start, start+size, E820_RAM) ||
memblock_x86_check_reserved_size(&start, &size,
1<<EFI_PAGE_SHIFT)) {
/* Could not reserve, skip it */
md->num_pages = 0;
memblock_dbg(PFX "Could not reserve boot range "
"[0x%010llx-0x%010llx]\n",
start, start+size-1);
} else
memblock_x86_reserve_range(start, start+size,
"EFI Boot");
}
}
@ -334,6 +351,10 @@ static void __init efi_free_boot_services(void)
md->type != EFI_BOOT_SERVICES_DATA)
continue;
/* Could not reserve boot area */
if (!size)
continue;
free_bootmem_late(start, size);
}
}

View File

@ -1033,6 +1033,13 @@ static void xen_machine_halt(void)
xen_reboot(SHUTDOWN_poweroff);
}
static void xen_machine_power_off(void)
{
if (pm_power_off)
pm_power_off();
xen_reboot(SHUTDOWN_poweroff);
}
static void xen_crash_shutdown(struct pt_regs *regs)
{
xen_reboot(SHUTDOWN_crash);
@ -1058,7 +1065,7 @@ int xen_panic_handler_init(void)
static const struct machine_ops xen_machine_ops __initconst = {
.restart = xen_restart,
.halt = xen_machine_halt,
.power_off = xen_machine_halt,
.power_off = xen_machine_power_off,
.shutdown = xen_machine_halt,
.crash_shutdown = xen_crash_shutdown,
.emergency_restart = xen_emergency_restart,

View File

@ -59,6 +59,7 @@
#include <asm/page.h>
#include <asm/init.h>
#include <asm/pat.h>
#include <asm/smp.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
@ -1231,7 +1232,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
{
struct {
struct mmuext_op op;
DECLARE_BITMAP(mask, NR_CPUS);
DECLARE_BITMAP(mask, num_processors);
} *args;
struct multicall_space mcs;
@ -1599,6 +1600,11 @@ static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
pte_t pte;
#ifdef CONFIG_X86_32
if (pfn > max_pfn_mapped)
max_pfn_mapped = pfn;
#endif
if (!pte_none(pte_page[pteidx]))
continue;
@ -1766,7 +1772,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
initial_kernel_pmd =
extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
xen_start_info->nr_pt_frames * PAGE_SIZE +
512*1024);
kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);

View File

@ -227,11 +227,7 @@ char * __init xen_memory_setup(void)
memcpy(map_raw, map, sizeof(map));
e820.nr_map = 0;
#ifdef CONFIG_X86_32
xen_extra_mem_start = mem_end;
#else
xen_extra_mem_start = max((1ULL << 32), mem_end);
#endif
for (i = 0; i < memmap.nr_entries; i++) {
unsigned long long end;
@ -266,6 +262,12 @@ char * __init xen_memory_setup(void)
if (map[i].size > 0)
e820_add_region(map[i].addr, map[i].size, map[i].type);
}
/* Align the balloon area so that max_low_pfn does not get set
* to be at the _end_ of the PCI gap at the far end (fee01000).
* Note that xen_extra_mem_start gets set in the loop above to be
* past the last E820 region. */
if (xen_initial_domain() && (xen_extra_mem_start < (1ULL<<32)))
xen_extra_mem_start = (1ULL<<32);
/*
* In domU, the ISA region is normal, usable memory, but we

View File

@ -205,11 +205,18 @@ static void __init xen_smp_prepare_boot_cpu(void)
static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
{
unsigned cpu;
unsigned int i;
xen_init_lock_cpu(0);
smp_store_cpu_info(0);
cpu_data(0).x86_max_cores = 1;
for_each_possible_cpu(i) {
zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
}
set_cpu_sibling_map(0);
if (xen_smp_intr_init(0))

View File

@ -387,7 +387,7 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
clknb = container_of(nb, struct pm_clk_notifier_block, nb);
switch (action) {
case BUS_NOTIFY_ADD_DEVICE:
case BUS_NOTIFY_BIND_DRIVER:
if (clknb->con_ids[0]) {
for (con_id = clknb->con_ids; *con_id; con_id++)
enable_clock(dev, *con_id);
@ -395,7 +395,7 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
enable_clock(dev, NULL);
}
break;
case BUS_NOTIFY_DEL_DEVICE:
case BUS_NOTIFY_UNBOUND_DRIVER:
if (clknb->con_ids[0]) {
for (con_id = clknb->con_ids; *con_id; con_id++)
disable_clock(dev, *con_id);

View File

@ -57,7 +57,8 @@ static int async_error;
*/
void device_pm_init(struct device *dev)
{
dev->power.in_suspend = false;
dev->power.is_prepared = false;
dev->power.is_suspended = false;
init_completion(&dev->power.completion);
complete_all(&dev->power.completion);
dev->power.wakeup = NULL;
@ -91,7 +92,7 @@ void device_pm_add(struct device *dev)
pr_debug("PM: Adding info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
mutex_lock(&dpm_list_mtx);
if (dev->parent && dev->parent->power.in_suspend)
if (dev->parent && dev->parent->power.is_prepared)
dev_warn(dev, "parent %s should not be sleeping\n",
dev_name(dev->parent));
list_add_tail(&dev->power.entry, &dpm_list);
@ -511,7 +512,14 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
dpm_wait(dev->parent, async);
device_lock(dev);
dev->power.in_suspend = false;
/*
* This is a fib. But we'll allow new children to be added below
* a resumed device, even if the device hasn't been completed yet.
*/
dev->power.is_prepared = false;
if (!dev->power.is_suspended)
goto Unlock;
if (dev->pwr_domain) {
pm_dev_dbg(dev, state, "power domain ");
@ -548,6 +556,9 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
}
End:
dev->power.is_suspended = false;
Unlock:
device_unlock(dev);
complete_all(&dev->power.completion);
@ -670,7 +681,7 @@ void dpm_complete(pm_message_t state)
struct device *dev = to_device(dpm_prepared_list.prev);
get_device(dev);
dev->power.in_suspend = false;
dev->power.is_prepared = false;
list_move(&dev->power.entry, &list);
mutex_unlock(&dpm_list_mtx);
@ -835,11 +846,11 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
device_lock(dev);
if (async_error)
goto End;
goto Unlock;
if (pm_wakeup_pending()) {
async_error = -EBUSY;
goto End;
goto Unlock;
}
if (dev->pwr_domain) {
@ -877,6 +888,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
}
End:
dev->power.is_suspended = !error;
Unlock:
device_unlock(dev);
complete_all(&dev->power.completion);
@ -1042,7 +1056,7 @@ int dpm_prepare(pm_message_t state)
put_device(dev);
break;
}
dev->power.in_suspend = true;
dev->power.is_prepared = true;
if (!list_empty(&dev->power.entry))
list_move_tail(&dev->power.entry, &dpm_prepared_list);
put_device(dev);

View File

@ -64,6 +64,8 @@ static ssize_t btmrvl_hscfgcmd_write(struct file *file,
return -EFAULT;
ret = strict_strtol(buf, 10, &result);
if (ret)
return ret;
priv->btmrvl_dev.hscfgcmd = result;
@ -108,6 +110,8 @@ static ssize_t btmrvl_psmode_write(struct file *file, const char __user *ubuf,
return -EFAULT;
ret = strict_strtol(buf, 10, &result);
if (ret)
return ret;
priv->btmrvl_dev.psmode = result;
@ -147,6 +151,8 @@ static ssize_t btmrvl_pscmd_write(struct file *file, const char __user *ubuf,
return -EFAULT;
ret = strict_strtol(buf, 10, &result);
if (ret)
return ret;
priv->btmrvl_dev.pscmd = result;
@ -191,6 +197,8 @@ static ssize_t btmrvl_gpiogap_write(struct file *file, const char __user *ubuf,
return -EFAULT;
ret = strict_strtol(buf, 16, &result);
if (ret)
return ret;
priv->btmrvl_dev.gpio_gap = result;
@ -230,6 +238,8 @@ static ssize_t btmrvl_hscmd_write(struct file *file, const char __user *ubuf,
return -EFAULT;
ret = strict_strtol(buf, 10, &result);
if (ret)
return ret;
priv->btmrvl_dev.hscmd = result;
if (priv->btmrvl_dev.hscmd) {
@ -272,6 +282,8 @@ static ssize_t btmrvl_hsmode_write(struct file *file, const char __user *ubuf,
return -EFAULT;
ret = strict_strtol(buf, 10, &result);
if (ret)
return ret;
priv->btmrvl_dev.hsmode = result;

View File

@ -163,11 +163,32 @@ static irqreturn_t hpet_interrupt(int irq, void *data)
* This has the effect of treating non-periodic like periodic.
*/
if ((devp->hd_flags & (HPET_IE | HPET_PERIODIC)) == HPET_IE) {
unsigned long m, t;
unsigned long m, t, mc, base, k;
struct hpet __iomem *hpet = devp->hd_hpet;
struct hpets *hpetp = devp->hd_hpets;
t = devp->hd_ireqfreq;
m = read_counter(&devp->hd_timer->hpet_compare);
write_counter(t + m, &devp->hd_timer->hpet_compare);
mc = read_counter(&hpet->hpet_mc);
/* The time for the next interrupt would logically be t + m,
* however, if we are very unlucky and the interrupt is delayed
* for longer than t then we will completely miss the next
* interrupt if we set t + m and an application will hang.
* Therefore we need to make a more complex computation assuming
* that there exists a k for which the following is true:
* k * t + base < mc + delta
* (k + 1) * t + base > mc + delta
* where t is the interval in hpet ticks for the given freq,
* base is the theoretical start value 0 < base < t,
* mc is the main counter value at the time of the interrupt,
* delta is the time it takes to write the a value to the
* comparator.
* k may then be computed as (mc - base + delta) / t .
*/
base = mc % t;
k = (mc - base + hpetp->hp_delta) / t;
write_counter(t * (k + 1) + base,
&devp->hd_timer->hpet_compare);
}
if (devp->hd_flags & HPET_SHARED_IRQ)

View File

@ -298,11 +298,13 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
old_index = stat->last_index;
new_index = freq_table_get_index(stat, freq->new);
cpufreq_stats_update(freq->cpu);
if (old_index == new_index)
/* We can't do stat->time_in_state[-1]= .. */
if (old_index == -1 || new_index == -1)
return 0;
if (old_index == -1 || new_index == -1)
cpufreq_stats_update(freq->cpu);
if (old_index == new_index)
return 0;
spin_lock(&cpufreq_stats_lock);

View File

@ -1079,6 +1079,9 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
}
res = transition_fid_vid(data, fid, vid);
if (res)
return res;
freqs.new = find_khz_freq_from_fid(data->currfid);
for_each_cpu(i, data->available_cores) {
@ -1101,7 +1104,8 @@ static int transition_frequency_pstate(struct powernow_k8_data *data,
/* get MSR index for hardware pstate transition */
pstate = index & HW_PSTATE_MASK;
if (pstate > data->max_hw_pstate)
return 0;
return -EINVAL;
freqs.old = find_khz_freq_from_pstate(data->powernow_table,
data->currpstate);
freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);

View File

@ -1221,6 +1221,11 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
} else {
do {
for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
if (irq_cnt >= SH_DMAC_MAX_CHANNELS) {
irq_cap = 1;
break;
}
if ((errirq_res->flags & IORESOURCE_BITS) ==
IORESOURCE_IRQ_SHAREABLE)
chan_flag[irq_cnt] = IRQF_SHARED;
@ -1230,15 +1235,11 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
"Found IRQ %d for channel %d\n",
i, irq_cnt);
chan_irq[irq_cnt++] = i;
if (irq_cnt >= SH_DMAC_MAX_CHANNELS)
break;
}
if (irq_cnt >= SH_DMAC_MAX_CHANNELS) {
irq_cap = 1;
if (irq_cnt >= SH_DMAC_MAX_CHANNELS)
break;
}
chanirq_res = platform_get_resource(pdev,
IORESOURCE_IRQ, ++irqres);
} while (irq_cnt < pdata->channel_num && chanirq_res);

View File

@ -469,8 +469,9 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
+ OMAP24XX_GPIO_CLEARWKUENA);
}
}
/* This part needs to be executed always for OMAP34xx */
if (cpu_is_omap34xx() || (bank->non_wakeup_gpios & gpio_bit)) {
/* This part needs to be executed always for OMAP{34xx, 44xx} */
if (cpu_is_omap34xx() || cpu_is_omap44xx() ||
(bank->non_wakeup_gpios & gpio_bit)) {
/*
* Log the edge gpio and manually trigger the IRQ
* after resume if the input level changes

View File

@ -184,9 +184,9 @@ drm_edid_block_valid(u8 *raw_edid)
bad:
if (raw_edid) {
DRM_ERROR("Raw EDID:\n");
printk(KERN_ERR "Raw EDID:\n");
print_hex_dump_bytes(KERN_ERR, DUMP_PREFIX_NONE, raw_edid, EDID_LENGTH);
printk("\n");
printk(KERN_ERR "\n");
}
return 0;
}
@ -258,6 +258,17 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
return ret == 2 ? 0 : -1;
}
static bool drm_edid_is_zero(u8 *in_edid, int length)
{
int i;
u32 *raw_edid = (u32 *)in_edid;
for (i = 0; i < length / 4; i++)
if (*(raw_edid + i) != 0)
return false;
return true;
}
static u8 *
drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
{
@ -273,6 +284,10 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
goto out;
if (drm_edid_block_valid(block))
break;
if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
connector->null_edid_counter++;
goto carp;
}
}
if (i == 4)
goto carp;

View File

@ -28,6 +28,7 @@
* IN THE SOFTWARE.
*/
#include <linux/compat.h>
#include <linux/ratelimit.h>
#include "drmP.h"
#include "drm_core.h"
@ -253,10 +254,10 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd,
return -EFAULT;
m32.handle = (unsigned long)handle;
if (m32.handle != (unsigned long)handle && printk_ratelimit())
printk(KERN_ERR "compat_drm_addmap truncated handle"
" %p for type %d offset %x\n",
handle, m32.type, m32.offset);
if (m32.handle != (unsigned long)handle)
printk_ratelimited(KERN_ERR "compat_drm_addmap truncated handle"
" %p for type %d offset %x\n",
handle, m32.type, m32.offset);
if (copy_to_user(argp, &m32, sizeof(m32)))
return -EFAULT;

View File

@ -251,7 +251,7 @@ err:
}
int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
{
if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
(p->busnum & 0xff) != dev->pdev->bus->number ||
@ -292,6 +292,7 @@ static struct drm_bus drm_pci_bus = {
.get_name = drm_pci_get_name,
.set_busid = drm_pci_set_busid,
.set_unique = drm_pci_set_unique,
.irq_by_busid = drm_pci_irq_by_busid,
.agp_init = drm_pci_agp_init,
};

View File

@ -1740,6 +1740,16 @@ void ironlake_irq_preinstall(struct drm_device *dev)
INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
I915_WRITE(HWSTAM, 0xeffe);
if (IS_GEN6(dev)) {
/* Workaround stalls observed on Sandy Bridge GPUs by
* making the blitter command streamer generate a
* write to the Hardware Status Page for
* MI_USER_INTERRUPT. This appears to serialize the
* previous seqno write out before the interrupt
* happens.
*/
I915_WRITE(GEN6_BLITTER_HWSTAM, ~GEN6_BLITTER_USER_INTERRUPT);
}
/* XXX hotplug from PCH */

View File

@ -401,8 +401,7 @@ int intel_setup_gmbus(struct drm_device *dev)
bus->reg0 = i | GMBUS_RATE_100KHZ;
/* XXX force bit banging until GMBUS is fully debugged */
if (IS_GEN2(dev))
bus->force_bit = intel_gpio_create(dev_priv, i);
bus->force_bit = intel_gpio_create(dev_priv, i);
}
intel_i2c_reset(dev_priv->dev);

View File

@ -262,7 +262,6 @@ static bool nouveau_dsm_detect(void)
vga_count++;
retval = nouveau_dsm_pci_probe(pdev);
printk("ret val is %d\n", retval);
if (retval & NOUVEAU_DSM_HAS_MUX)
has_dsm |= 1;
if (retval & NOUVEAU_DSM_HAS_OPT)

View File

@ -339,11 +339,12 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
int ret;
if (dev_priv->chipset < 0x84) {
ret = RING_SPACE(chan, 3);
ret = RING_SPACE(chan, 4);
if (ret)
return ret;
BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 2);
BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 3);
OUT_RING (chan, NvSema);
OUT_RING (chan, sema->mem->start);
OUT_RING (chan, 1);
} else
@ -351,10 +352,12 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
u64 offset = vma->offset + sema->mem->start;
ret = RING_SPACE(chan, 5);
ret = RING_SPACE(chan, 7);
if (ret)
return ret;
BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
OUT_RING (chan, chan->vram_handle);
BEGIN_RING(chan, NvSubSw, 0x0010, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
@ -394,11 +397,12 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
int ret;
if (dev_priv->chipset < 0x84) {
ret = RING_SPACE(chan, 4);
ret = RING_SPACE(chan, 5);
if (ret)
return ret;
BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 1);
BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 2);
OUT_RING (chan, NvSema);
OUT_RING (chan, sema->mem->start);
BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_RELEASE, 1);
OUT_RING (chan, 1);
@ -407,10 +411,12 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
u64 offset = vma->offset + sema->mem->start;
ret = RING_SPACE(chan, 5);
ret = RING_SPACE(chan, 7);
if (ret)
return ret;
BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
OUT_RING (chan, chan->vram_handle);
BEGIN_RING(chan, NvSubSw, 0x0010, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
@ -504,22 +510,22 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
struct nouveau_gpuobj *obj = NULL;
int ret;
if (dev_priv->card_type >= NV_C0)
goto out_initialised;
if (dev_priv->card_type < NV_C0) {
/* Create an NV_SW object for various sync purposes */
ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
if (ret)
return ret;
/* Create an NV_SW object for various sync purposes */
ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
if (ret)
return ret;
ret = RING_SPACE(chan, 2);
if (ret)
return ret;
/* we leave subchannel empty for nvc0 */
ret = RING_SPACE(chan, 2);
if (ret)
return ret;
BEGIN_RING(chan, NvSubSw, 0, 1);
OUT_RING(chan, NvSw);
BEGIN_RING(chan, NvSubSw, 0, 1);
OUT_RING (chan, NvSw);
FIRE_RING (chan);
}
/* Create a DMA object for the shared cross-channel sync area. */
/* Setup area of memory shared between all channels for x-chan sync */
if (USE_SEMA(dev) && dev_priv->chipset < 0x84) {
struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
@ -534,23 +540,8 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
nouveau_gpuobj_ref(NULL, &obj);
if (ret)
return ret;
ret = RING_SPACE(chan, 2);
if (ret)
return ret;
BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
OUT_RING(chan, NvSema);
} else {
ret = RING_SPACE(chan, 2);
if (ret)
return ret;
BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
OUT_RING (chan, chan->vram_handle); /* whole VM */
}
FIRE_RING(chan);
out_initialised:
INIT_LIST_HEAD(&chan->fence.pending);
spin_lock_init(&chan->fence.lock);
atomic_set(&chan->fence.last_sequence_irq, 0);

View File

@ -182,6 +182,11 @@ nouveau_perf_init(struct drm_device *dev)
entries = perf[2];
}
if (entries > NOUVEAU_PM_MAX_LEVEL) {
NV_DEBUG(dev, "perf table has too many entries - buggy vbios?\n");
entries = NOUVEAU_PM_MAX_LEVEL;
}
entry = perf + headerlen;
for (i = 0; i < entries; i++) {
struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl];

Some files were not shown because too many files have changed in this diff Show More