Merge commit 'v2.6.37' into sched/core

Merge reason: Merge the final .37 tree.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Ingo Molnar 2011-01-05 14:14:42 +01:00
commit 27066fd484
424 changed files with 4337 additions and 3099 deletions

View File

@ -516,6 +516,7 @@ int main(int argc, char *argv[])
default:
fprintf(stderr, "Unknown nla_type %d\n",
na->nla_type);
case TASKSTATS_TYPE_NULL:
break;
}
na = (struct nlattr *) (GENLMSG_DATA(&msg) + len);

View File

@ -18,7 +18,6 @@ prototypes:
char *(*d_dname)((struct dentry *dentry, char *buffer, int buflen);
locking rules:
none have BKL
dcache_lock rename_lock ->d_lock may block
d_revalidate: no no no yes
d_hash no no no yes
@ -42,18 +41,23 @@ ata *);
int (*rename) (struct inode *, struct dentry *,
struct inode *, struct dentry *);
int (*readlink) (struct dentry *, char __user *,int);
int (*follow_link) (struct dentry *, struct nameidata *);
void * (*follow_link) (struct dentry *, struct nameidata *);
void (*put_link) (struct dentry *, struct nameidata *, void *);
void (*truncate) (struct inode *);
int (*permission) (struct inode *, int, struct nameidata *);
int (*check_acl)(struct inode *, int);
int (*setattr) (struct dentry *, struct iattr *);
int (*getattr) (struct vfsmount *, struct dentry *, struct kstat *);
int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*removexattr) (struct dentry *, const char *);
void (*truncate_range)(struct inode *, loff_t, loff_t);
long (*fallocate)(struct inode *inode, int mode, loff_t offset, loff_t len);
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len);
locking rules:
all may block, none have BKL
all may block
i_mutex(inode)
lookup: yes
create: yes
@ -66,19 +70,24 @@ rmdir: yes (both) (see below)
rename: yes (all) (see below)
readlink: no
follow_link: no
put_link: no
truncate: yes (see below)
setattr: yes
permission: no
check_acl: no
getattr: no
setxattr: yes
getxattr: no
listxattr: no
removexattr: yes
truncate_range: yes
fallocate: no
fiemap: no
Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on
victim.
cross-directory ->rename() has (per-superblock) ->s_vfs_rename_sem.
->truncate() is never called directly - it's a callback, not a
method. It's called by vmtruncate() - library function normally used by
method. It's called by vmtruncate() - deprecated library function used by
->setattr(). Locking information above applies to that call (i.e. is
inherited from ->setattr() - vmtruncate() is used when ATTR_SIZE had been
passed).
@ -91,7 +100,7 @@ prototypes:
struct inode *(*alloc_inode)(struct super_block *sb);
void (*destroy_inode)(struct inode *);
void (*dirty_inode) (struct inode *);
int (*write_inode) (struct inode *, int);
int (*write_inode) (struct inode *, struct writeback_control *wbc);
int (*drop_inode) (struct inode *);
void (*evict_inode) (struct inode *);
void (*put_super) (struct super_block *);
@ -105,10 +114,10 @@ prototypes:
int (*show_options)(struct seq_file *, struct vfsmount *);
ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
locking rules:
All may block [not true, see below]
None have BKL
s_umount
alloc_inode:
destroy_inode:
@ -127,6 +136,7 @@ umount_begin: no
show_options: no (namespace_sem)
quota_read: no (see below)
quota_write: no (see below)
bdev_try_to_free_page: no (see below)
->statfs() has s_umount (shared) when called by ustat(2) (native or
compat), but that's an accident of bad API; s_umount is used to pin
@ -139,19 +149,25 @@ be the only ones operating on the quota file by the quota code (via
dqio_sem) (unless an admin really wants to screw up something and
writes to quota files with quotas on). For other details about locking
see also dquot_operations section.
->bdev_try_to_free_page is called from the ->releasepage handler of
the block device inode. See there for more details.
--------------------------- file_system_type ---------------------------
prototypes:
int (*get_sb) (struct file_system_type *, int,
const char *, void *, struct vfsmount *);
struct dentry *(*mount) (struct file_system_type *, int,
const char *, void *);
void (*kill_sb) (struct super_block *);
locking rules:
may block BKL
get_sb yes no
kill_sb yes no
may block
get_sb yes
mount yes
kill_sb yes
->get_sb() returns error or 0 with locked superblock attached to the vfsmount
(exclusive on ->s_umount).
->mount() returns ERR_PTR or the root dentry.
->kill_sb() takes a write-locked superblock, does all shutdown work on it,
unlocks and drops the reference.
@ -176,27 +192,35 @@ prototypes:
void (*freepage)(struct page *);
int (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
loff_t offset, unsigned long nr_segs);
int (*launder_page) (struct page *);
int (*get_xip_mem)(struct address_space *, pgoff_t, int, void **,
unsigned long *);
int (*migratepage)(struct address_space *, struct page *, struct page *);
int (*launder_page)(struct page *);
int (*is_partially_uptodate)(struct page *, read_descriptor_t *, unsigned long);
int (*error_remove_page)(struct address_space *, struct page *);
locking rules:
All except set_page_dirty and freepage may block
BKL PageLocked(page) i_mutex
writepage: no yes, unlocks (see below)
readpage: no yes, unlocks
sync_page: no maybe
writepages: no
set_page_dirty no no
readpages: no
write_begin: no locks the page yes
write_end: no yes, unlocks yes
perform_write: no n/a yes
bmap: no
invalidatepage: no yes
releasepage: no yes
freepage: no yes
direct_IO: no
launder_page: no yes
PageLocked(page) i_mutex
writepage: yes, unlocks (see below)
readpage: yes, unlocks
sync_page: maybe
writepages:
set_page_dirty no
readpages:
write_begin: locks the page yes
write_end: yes, unlocks yes
bmap:
invalidatepage: yes
releasepage: yes
freepage: yes
direct_IO:
get_xip_mem: maybe
migratepage: yes (both)
launder_page: yes
is_partially_uptodate: yes
error_remove_page: yes
->write_begin(), ->write_end(), ->sync_page() and ->readpage()
may be called from the request handler (/dev/loop).
@ -276,9 +300,8 @@ under spinlock (it cannot block) and is sometimes called with the page
not locked.
->bmap() is currently used by legacy ioctl() (FIBMAP) provided by some
filesystems and by the swapper. The latter will eventually go away. All
instances do not actually need the BKL. Please, keep it that way and don't
breed new callers.
filesystems and by the swapper. The latter will eventually go away. Please,
keep it that way and don't breed new callers.
->invalidatepage() is called when the filesystem must attempt to drop
some or all of the buffers from the page when it is being truncated. It
@ -299,47 +322,37 @@ cleaned, or an error value if not. Note that in order to prevent the page
getting mapped back in and redirtied, it needs to be kept locked
across the entire operation.
Note: currently almost all instances of address_space methods are
using BKL for internal serialization and that's one of the worst sources
of contention. Normally they are calling library functions (in fs/buffer.c)
and pass foo_get_block() as a callback (on local block-based filesystems,
indeed). BKL is not needed for library stuff and is usually taken by
foo_get_block(). It's an overkill, since block bitmaps can be protected by
internal fs locking and real critical areas are much smaller than the areas
filesystems protect now.
----------------------- file_lock_operations ------------------------------
prototypes:
void (*fl_insert)(struct file_lock *); /* lock insertion callback */
void (*fl_remove)(struct file_lock *); /* lock removal callback */
void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
void (*fl_release_private)(struct file_lock *);
locking rules:
BKL may block
fl_insert: yes no
fl_remove: yes no
fl_copy_lock: yes no
fl_release_private: yes yes
file_lock_lock may block
fl_copy_lock: yes no
fl_release_private: maybe no
----------------------- lock_manager_operations ---------------------------
prototypes:
int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
void (*fl_notify)(struct file_lock *); /* unblock callback */
int (*fl_grant)(struct file_lock *, struct file_lock *, int);
void (*fl_release_private)(struct file_lock *);
void (*fl_break)(struct file_lock *); /* break_lease callback */
int (*fl_mylease)(struct file_lock *, struct file_lock *);
int (*fl_change)(struct file_lock **, int);
locking rules:
BKL may block
fl_compare_owner: yes no
fl_notify: yes no
fl_release_private: yes yes
fl_break: yes no
file_lock_lock may block
fl_compare_owner: yes no
fl_notify: yes no
fl_grant: no no
fl_release_private: maybe no
fl_break: yes no
fl_mylease: yes no
fl_change yes no
Currently only NFSD and NLM provide instances of this class. None of the
them block. If you have out-of-tree instances - please, show up. Locking
in that area will change.
--------------------------- buffer_head -----------------------------------
prototypes:
void (*b_end_io)(struct buffer_head *bh, int uptodate);
@ -364,17 +377,17 @@ prototypes:
void (*swap_slot_free_notify) (struct block_device *, unsigned long);
locking rules:
BKL bd_mutex
open: no yes
release: no yes
ioctl: no no
compat_ioctl: no no
direct_access: no no
media_changed: no no
unlock_native_capacity: no no
revalidate_disk: no no
getgeo: no no
swap_slot_free_notify: no no (see below)
bd_mutex
open: yes
release: yes
ioctl: no
compat_ioctl: no
direct_access: no
media_changed: no
unlock_native_capacity: no
revalidate_disk: no
getgeo: no
swap_slot_free_notify: no (see below)
media_changed, unlock_native_capacity and revalidate_disk are called only from
check_disk_change().
@ -413,34 +426,21 @@ prototypes:
unsigned long (*get_unmapped_area)(struct file *, unsigned long,
unsigned long, unsigned long, unsigned long);
int (*check_flags)(int);
int (*flock) (struct file *, int, struct file_lock *);
ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *,
size_t, unsigned int);
ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *,
size_t, unsigned int);
int (*setlease)(struct file *, long, struct file_lock **);
};
locking rules:
All may block.
BKL
llseek: no (see below)
read: no
aio_read: no
write: no
aio_write: no
readdir: no
poll: no
unlocked_ioctl: no
compat_ioctl: no
mmap: no
open: no
flush: no
release: no
fsync: no (see below)
aio_fsync: no
fasync: no
lock: yes
readv: no
writev: no
sendfile: no
sendpage: no
get_unmapped_area: no
check_flags: no
All may block except for ->setlease.
No VFS locks held on entry except for ->fsync and ->setlease.
->fsync() has i_mutex on inode.
->setlease has the file_list_lock held and must not sleep.
->llseek() locking has moved from llseek to the individual llseek
implementations. If your fs is not using generic_file_llseek, you
@ -450,17 +450,10 @@ mutex or just to use i_size_read() instead.
Note: this does not protect the file->f_pos against concurrent modifications
since this is something the userspace has to take care about.
Note: ext2_release() was *the* source of contention on fs-intensive
loads and dropping BKL on ->release() helps to get rid of that (we still
grab BKL for cases when we close a file that had been opened r/w, but that
can and should be done using the internal locking with smaller critical areas).
Current worst offender is ext2_get_block()...
->fasync() is called without BKL protection, and is responsible for
maintaining the FASYNC bit in filp->f_flags. Most instances call
fasync_helper(), which does that maintenance, so it's not normally
something one needs to worry about. Return values > 0 will be mapped to
zero in the VFS layer.
->fasync() is responsible for maintaining the FASYNC bit in filp->f_flags.
Most instances call fasync_helper(), which does that maintenance, so it's
not normally something one needs to worry about. Return values > 0 will be
mapped to zero in the VFS layer.
->readdir() and ->ioctl() on directories must be changed. Ideally we would
move ->readdir() to inode_operations and use a separate method for directory
@ -471,8 +464,6 @@ components. And there are other reasons why the current interface is a mess...
->read on directories probably must go away - we should just enforce -EISDIR
in sys_read() and friends.
->fsync() has i_mutex on inode.
--------------------------- dquot_operations -------------------------------
prototypes:
int (*write_dquot) (struct dquot *);
@ -507,12 +498,12 @@ prototypes:
int (*access)(struct vm_area_struct *, unsigned long, void*, int, int);
locking rules:
BKL mmap_sem PageLocked(page)
open: no yes
close: no yes
fault: no yes can return with page locked
page_mkwrite: no yes can return with page locked
access: no yes
mmap_sem PageLocked(page)
open: yes
close: yes
fault: yes can return with page locked
page_mkwrite: yes can return with page locked
access: yes
->fault() is called when a previously not present pte is about
to be faulted in. The filesystem must find and return the page associated
@ -539,6 +530,3 @@ VM_IO | VM_PFNMAP VMAs.
(if you break something or notice that it is broken and do not fix it yourself
- at least put it here)
ipc/shm.c::shm_delete() - may need BKL.
->read() and ->write() in many drivers are (probably) missing BKL.

View File

@ -1761,7 +1761,7 @@ and is between 256 and 4096 characters. It is defined in the file
nousb [USB] Disable the USB subsystem
nowatchdog [KNL] Disable the lockup detector.
nowatchdog [KNL] Disable the lockup detector (NMI watchdog).
nowb [ARM]
@ -2177,11 +2177,6 @@ and is between 256 and 4096 characters. It is defined in the file
reset_devices [KNL] Force drivers to reset the underlying device
during initialization.
resource_alloc_from_bottom
Allocate new resources from the beginning of available
space, not the end. If you need to use this, please
report a bug.
resume= [SWSUSP]
Specify the partition device for software suspend

View File

@ -379,8 +379,8 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
zero)
bool pm_runtime_suspended(struct device *dev);
- return true if the device's runtime PM status is 'suspended', or false
otherwise
- return true if the device's runtime PM status is 'suspended' and its
'power.disable_depth' field is equal to zero, or false otherwise
void pm_runtime_allow(struct device *dev);
- set the power.runtime_auto flag for the device and decrease its usage

View File

@ -1044,9 +1044,9 @@ Details:
/**
* queuecommand - queue scsi command, invoke 'done' on completion
* queuecommand - queue scsi command, invoke scp->scsi_done on completion
* @shost: pointer to the scsi host object
* @scp: pointer to scsi command object
* @done: function pointer to be invoked on completion
*
* Returns 0 on success.
*
@ -1074,42 +1074,45 @@ Details:
*
* Other types of errors that are detected immediately may be
* flagged by setting scp->result to an appropriate value,
* invoking the 'done' callback, and then returning 0 from this
* function. If the command is not performed immediately (and the
* LLD is starting (or will start) the given command) then this
* function should place 0 in scp->result and return 0.
* invoking the scp->scsi_done callback, and then returning 0
* from this function. If the command is not performed
* immediately (and the LLD is starting (or will start) the given
* command) then this function should place 0 in scp->result and
* return 0.
*
* Command ownership. If the driver returns zero, it owns the
* command and must take responsibility for ensuring the 'done'
* callback is executed. Note: the driver may call done before
* returning zero, but after it has called done, it may not
* return any value other than zero. If the driver makes a
* non-zero return, it must not execute the command's done
* callback at any time.
* command and must take responsibility for ensuring the
* scp->scsi_done callback is executed. Note: the driver may
* call scp->scsi_done before returning zero, but after it has
* called scp->scsi_done, it may not return any value other than
* zero. If the driver makes a non-zero return, it must not
* execute the command's scsi_done callback at any time.
*
* Locks: struct Scsi_Host::host_lock held on entry (with "irqsave")
* and is expected to be held on return.
* Locks: up to and including 2.6.36, struct Scsi_Host::host_lock
* held on entry (with "irqsave") and is expected to be
* held on return. From 2.6.37 onwards, queuecommand is
* called without any locks held.
*
* Calling context: in interrupt (soft irq) or process context
*
* Notes: This function should be relatively fast. Normally it will
* not wait for IO to complete. Hence the 'done' callback is invoked
* (often directly from an interrupt service routine) some time after
* this function has returned. In some cases (e.g. pseudo adapter
* drivers that manufacture the response to a SCSI INQUIRY)
* the 'done' callback may be invoked before this function returns.
* If the 'done' callback is not invoked within a certain period
* the SCSI mid level will commence error processing.
* If a status of CHECK CONDITION is placed in "result" when the
* 'done' callback is invoked, then the LLD driver should
* perform autosense and fill in the struct scsi_cmnd::sense_buffer
* Notes: This function should be relatively fast. Normally it
* will not wait for IO to complete. Hence the scp->scsi_done
* callback is invoked (often directly from an interrupt service
* routine) some time after this function has returned. In some
* cases (e.g. pseudo adapter drivers that manufacture the
* response to a SCSI INQUIRY) the scp->scsi_done callback may be
* invoked before this function returns. If the scp->scsi_done
* callback is not invoked within a certain period the SCSI mid
* level will commence error processing. If a status of CHECK
* CONDITION is placed in "result" when the scp->scsi_done
* callback is invoked, then the LLD driver should perform
* autosense and fill in the struct scsi_cmnd::sense_buffer
* array. The scsi_cmnd::sense_buffer array is zeroed prior to
* the mid level queuing a command to an LLD.
*
* Defined in: LLD
**/
int queuecommand(struct scsi_cmnd * scp,
void (*done)(struct scsi_cmnd *))
int queuecommand(struct Scsi_Host *shost, struct scsi_cmnd * scp)
/**

View File

@ -373,9 +373,18 @@ EVENT_PROCESS:
print " $regex_lru_isolate/o\n";
next;
}
my $isolate_mode = $1;
my $nr_scanned = $4;
my $nr_contig_dirty = $7;
$perprocesspid{$process_pid}->{HIGH_NR_SCANNED} += $nr_scanned;
# To closer match vmstat scanning statistics, only count isolate_both
# and isolate_inactive as scanning. isolate_active is rotation
# isolate_inactive == 0
# isolate_active == 1
# isolate_both == 2
if ($isolate_mode != 1) {
$perprocesspid{$process_pid}->{HIGH_NR_SCANNED} += $nr_scanned;
}
$perprocesspid{$process_pid}->{HIGH_NR_CONTIG_DIRTY} += $nr_contig_dirty;
} elsif ($tracepoint eq "mm_vmscan_lru_shrink_inactive") {
$details = $5;

View File

@ -405,7 +405,7 @@ S: Supported
F: drivers/usb/gadget/amd5536udc.*
AMD GEODE PROCESSOR/CHIPSET SUPPORT
P: Jordan Crouse
P: Andres Salomon <dilinger@queued.net>
L: linux-geode@lists.infradead.org (moderated for non-subscribers)
W: http://www.amd.com/us-en/ConnectivitySolutions/TechnicalResources/0,,50_2334_2452_11363,00.html
S: Supported
@ -792,11 +792,14 @@ S: Maintained
ARM/NOMADIK ARCHITECTURE
M: Alessandro Rubini <rubini@unipv.it>
M: Linus Walleij <linus.walleij@stericsson.com>
M: STEricsson <STEricsson_nomadik_linux@list.st.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-nomadik/
F: arch/arm/plat-nomadik/
F: drivers/i2c/busses/i2c-nomadik.c
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git
ARM/OPENMOKO NEO FREERUNNER (GTA02) MACHINE SUPPORT
M: Nelson Castillo <arhuaco@freaks-unidos.net>
@ -998,12 +1001,24 @@ F: drivers/i2c/busses/i2c-stu300.c
F: drivers/rtc/rtc-coh901331.c
F: drivers/watchdog/coh901327_wdt.c
F: drivers/dma/coh901318*
F: drivers/mfd/ab3100*
F: drivers/rtc/rtc-ab3100.c
F: drivers/rtc/rtc-coh901331.c
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git
ARM/U8500 ARM ARCHITECTURE
ARM/Ux500 ARM ARCHITECTURE
M: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
M: Linus Walleij <linus.walleij@stericsson.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-ux500/
F: drivers/dma/ste_dma40*
F: drivers/mfd/ab3550*
F: drivers/mfd/abx500*
F: drivers/mfd/ab8500*
F: drivers/mfd/stmpe*
F: drivers/rtc/rtc-ab8500.c
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git
ARM/VFP SUPPORT
M: Russell King <linux@arm.linux.org.uk>
@ -4590,7 +4605,7 @@ F: drivers/pcmcia/
F: include/pcmcia/
PCNET32 NETWORK DRIVER
M: Don Fry <pcnet32@verizon.net>
M: Don Fry <pcnet32@frontier.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/pcnet32.c

View File

@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 37
EXTRAVERSION = -rc6
EXTRAVERSION =
NAME = Flesh-Eating Bats with Fangs
# *DOCUMENTATION*

View File

@ -352,3 +352,4 @@ struct pci_bus * __init it8152_pci_scan_bus(int nr, struct pci_sys_data *sys)
return pci_scan_bus(nr, &it8152_ops, sys);
}
EXPORT_SYMBOL(dma_set_coherent_mask);

View File

@ -76,6 +76,7 @@ extern unsigned long it8152_base_address;
IT8152_PD_IRQ(0) Audio controller (ACR)
*/
#define IT8152_IRQ(x) (IRQ_BOARD_START + (x))
#define IT8152_LAST_IRQ (IRQ_BOARD_START + 40)
/* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */
#define IT8152_LD_IRQ_COUNT 9

View File

@ -25,9 +25,6 @@ extern void *kmap_high(struct page *page);
extern void *kmap_high_get(struct page *page);
extern void kunmap_high(struct page *page);
extern void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte);
extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte);
/*
* The following functions are already defined by <linux/highmem.h>
* when CONFIG_HIGHMEM is not set.

View File

@ -13,9 +13,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* DO NOT EDIT!! - this file automatically generated
* from .s file by awk -f s2h.awk
*/
/* Size definitions
* Copyright (C) ARM Limited 1998. All rights reserved.
*/
@ -25,6 +22,9 @@
/* handy sizes */
#define SZ_16 0x00000010
#define SZ_32 0x00000020
#define SZ_64 0x00000040
#define SZ_128 0x00000080
#define SZ_256 0x00000100
#define SZ_512 0x00000200

View File

@ -150,6 +150,7 @@ extern unsigned int user_debug;
#define rmb() dmb()
#define wmb() mb()
#else
#include <asm/memory.h>
#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)

View File

@ -29,6 +29,9 @@ ret_fast_syscall:
ldr r1, [tsk, #TI_FLAGS]
tst r1, #_TIF_WORK_MASK
bne fast_work_pending
#if defined(CONFIG_IRQSOFF_TRACER)
asm_trace_hardirqs_on
#endif
/* perform architecture specific actions before user return */
arch_ret_to_user r1, lr
@ -65,6 +68,9 @@ ret_slow_syscall:
tst r1, #_TIF_WORK_MASK
bne work_pending
no_work_pending:
#if defined(CONFIG_IRQSOFF_TRACER)
asm_trace_hardirqs_on
#endif
/* perform architecture specific actions before user return */
arch_ret_to_user r1, lr

View File

@ -310,7 +310,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
* All kernel threads share the same mm context; grab a
* reference and switch to it.
*/
atomic_inc(&mm->mm_users);
atomic_inc(&mm->mm_count);
current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));

View File

@ -65,7 +65,7 @@ obj-$(CONFIG_MACH_AT91SAM9G20EK) += board-sam9g20ek.o
obj-$(CONFIG_MACH_CPU9G20) += board-cpu9krea.o
obj-$(CONFIG_MACH_STAMP9G20) += board-stamp9g20.o
obj-$(CONFIG_MACH_PORTUXG20) += board-stamp9g20.o
obj-$(CONFIG_MACH_PCONTROL_G20) += board-pcontrol-g20.o
obj-$(CONFIG_MACH_PCONTROL_G20) += board-pcontrol-g20.o board-stamp9g20.o
# AT91SAM9260/AT91SAM9G20 board-specific support
obj-$(CONFIG_MACH_SNAPPER_9260) += board-snapper9260.o

View File

@ -31,6 +31,7 @@
#include <mach/board.h>
#include <mach/at91sam9_smc.h>
#include <mach/stamp9g20.h>
#include "sam9_smc.h"
#include "generic.h"
@ -38,11 +39,7 @@
static void __init pcontrol_g20_map_io(void)
{
/* Initialize processor: 18.432 MHz crystal */
at91sam9260_initialize(18432000);
/* DGBU on ttyS0. (Rx, Tx) only TTL -> JTAG connector X7 17,19 ) */
at91_register_uart(0, 0, 0);
stamp9g20_map_io();
/* USART0 on ttyS1. (Rx, Tx, CTS, RTS) piggyback A2 */
at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS
@ -54,9 +51,6 @@ static void __init pcontrol_g20_map_io(void)
/* USART2 on ttyS3. (Rx, Tx) 9bit-Bus Multidrop-mode X4 */
at91_register_uart(AT91SAM9260_ID_US4, 3, 0);
/* set serial console to ttyS0 (ie, DBGU) */
at91_set_serial_console(0);
}
@ -66,38 +60,6 @@ static void __init init_irq(void)
}
/*
* NAND flash 512MiB 1,8V 8-bit, sector size 128 KiB
*/
static struct atmel_nand_data __initdata nand_data = {
.ale = 21,
.cle = 22,
.rdy_pin = AT91_PIN_PC13,
.enable_pin = AT91_PIN_PC14,
};
/*
* Bus timings; unit = 7.57ns
*/
static struct sam9_smc_config __initdata nand_smc_config = {
.ncs_read_setup = 0,
.nrd_setup = 2,
.ncs_write_setup = 0,
.nwe_setup = 2,
.ncs_read_pulse = 4,
.nrd_pulse = 4,
.ncs_write_pulse = 4,
.nwe_pulse = 4,
.read_cycle = 7,
.write_cycle = 7,
.mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE
| AT91_SMC_EXNWMODE_DISABLE | AT91_SMC_DBW_8,
.tdf_cycles = 3,
};
static struct sam9_smc_config __initdata pcontrol_smc_config[2] = { {
.ncs_read_setup = 16,
.nrd_setup = 18,
@ -138,14 +100,6 @@ static struct sam9_smc_config __initdata pcontrol_smc_config[2] = { {
.tdf_cycles = 1,
} };
static void __init add_device_nand(void)
{
/* configure chip-select 3 (NAND) */
sam9_smc_configure(3, &nand_smc_config);
at91_add_device_nand(&nand_data);
}
static void __init add_device_pcontrol(void)
{
/* configure chip-select 4 (IO compatible to 8051 X4 ) */
@ -155,23 +109,6 @@ static void __init add_device_pcontrol(void)
}
/*
* MCI (SD/MMC)
* det_pin, wp_pin and vcc_pin are not connected
*/
#if defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_ATMELMCI_MODULE)
static struct mci_platform_data __initdata mmc_data = {
.slot[0] = {
.bus_width = 4,
},
};
#else
static struct at91_mmc_data __initdata mmc_data = {
.wire4 = 1,
};
#endif
/*
* USB Host port
*/
@ -265,42 +202,13 @@ static struct spi_board_info pcontrol_g20_spi_devices[] = {
};
/*
* Dallas 1-Wire DS2431
*/
static struct w1_gpio_platform_data w1_gpio_pdata = {
.pin = AT91_PIN_PA29,
.is_open_drain = 1,
};
static struct platform_device w1_device = {
.name = "w1-gpio",
.id = -1,
.dev.platform_data = &w1_gpio_pdata,
};
static void add_wire1(void)
{
at91_set_GPIO_periph(w1_gpio_pdata.pin, 1);
at91_set_multi_drive(w1_gpio_pdata.pin, 1);
platform_device_register(&w1_device);
}
static void __init pcontrol_g20_board_init(void)
{
at91_add_device_serial();
add_device_nand();
#if defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_ATMELMCI_MODULE)
at91_add_device_mci(0, &mmc_data);
#else
at91_add_device_mmc(0, &mmc_data);
#endif
stamp9g20_board_init();
at91_add_device_usbh(&usbh_data);
at91_add_device_eth(&macb_data);
at91_add_device_i2c(pcontrol_g20_i2c_devices,
ARRAY_SIZE(pcontrol_g20_i2c_devices));
add_wire1();
add_device_pcontrol();
at91_add_device_spi(pcontrol_g20_spi_devices,
ARRAY_SIZE(pcontrol_g20_spi_devices));

View File

@ -32,7 +32,7 @@
#include "generic.h"
static void __init portuxg20_map_io(void)
void __init stamp9g20_map_io(void)
{
/* Initialize processor: 18.432 MHz crystal */
at91sam9260_initialize(18432000);
@ -40,6 +40,24 @@ static void __init portuxg20_map_io(void)
/* DGBU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
/* set serial console to ttyS0 (ie, DBGU) */
at91_set_serial_console(0);
}
static void __init stamp9g20evb_map_io(void)
{
stamp9g20_map_io();
/* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */
at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS
| ATMEL_UART_DTR | ATMEL_UART_DSR
| ATMEL_UART_DCD | ATMEL_UART_RI);
}
static void __init portuxg20_map_io(void)
{
stamp9g20_map_io();
/* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */
at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS
| ATMEL_UART_DTR | ATMEL_UART_DSR
@ -56,26 +74,6 @@ static void __init portuxg20_map_io(void)
/* USART5 on ttyS6. (Rx, Tx only) */
at91_register_uart(AT91SAM9260_ID_US5, 6, 0);
/* set serial console to ttyS0 (ie, DBGU) */
at91_set_serial_console(0);
}
static void __init stamp9g20_map_io(void)
{
/* Initialize processor: 18.432 MHz crystal */
at91sam9260_initialize(18432000);
/* DGBU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
/* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */
at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS
| ATMEL_UART_DTR | ATMEL_UART_DSR
| ATMEL_UART_DCD | ATMEL_UART_RI);
/* set serial console to ttyS0 (ie, DBGU) */
at91_set_serial_console(0);
}
static void __init init_irq(void)
@ -156,7 +154,7 @@ static struct at91_udc_data __initdata portuxg20_udc_data = {
.pullup_pin = 0, /* pull-up driven by UDC */
};
static struct at91_udc_data __initdata stamp9g20_udc_data = {
static struct at91_udc_data __initdata stamp9g20evb_udc_data = {
.vbus_pin = AT91_PIN_PA22,
.pullup_pin = 0, /* pull-up driven by UDC */
};
@ -190,7 +188,7 @@ static struct gpio_led portuxg20_leds[] = {
}
};
static struct gpio_led stamp9g20_leds[] = {
static struct gpio_led stamp9g20evb_leds[] = {
{
.name = "D8",
.gpio = AT91_PIN_PB18,
@ -250,7 +248,7 @@ void add_w1(void)
}
static void __init generic_board_init(void)
void __init stamp9g20_board_init(void)
{
/* Serial */
at91_add_device_serial();
@ -262,34 +260,40 @@ static void __init generic_board_init(void)
#else
at91_add_device_mmc(0, &mmc_data);
#endif
/* USB Host */
at91_add_device_usbh(&usbh_data);
/* Ethernet */
at91_add_device_eth(&macb_data);
/* I2C */
at91_add_device_i2c(NULL, 0);
/* W1 */
add_w1();
}
static void __init portuxg20_board_init(void)
{
generic_board_init();
/* SPI */
at91_add_device_spi(portuxg20_spi_devices, ARRAY_SIZE(portuxg20_spi_devices));
stamp9g20_board_init();
/* USB Host */
at91_add_device_usbh(&usbh_data);
/* USB Device */
at91_add_device_udc(&portuxg20_udc_data);
/* Ethernet */
at91_add_device_eth(&macb_data);
/* I2C */
at91_add_device_i2c(NULL, 0);
/* SPI */
at91_add_device_spi(portuxg20_spi_devices, ARRAY_SIZE(portuxg20_spi_devices));
/* LEDs */
at91_gpio_leds(portuxg20_leds, ARRAY_SIZE(portuxg20_leds));
}
static void __init stamp9g20_board_init(void)
static void __init stamp9g20evb_board_init(void)
{
generic_board_init();
stamp9g20_board_init();
/* USB Host */
at91_add_device_usbh(&usbh_data);
/* USB Device */
at91_add_device_udc(&stamp9g20_udc_data);
at91_add_device_udc(&stamp9g20evb_udc_data);
/* Ethernet */
at91_add_device_eth(&macb_data);
/* I2C */
at91_add_device_i2c(NULL, 0);
/* LEDs */
at91_gpio_leds(stamp9g20_leds, ARRAY_SIZE(stamp9g20_leds));
at91_gpio_leds(stamp9g20evb_leds, ARRAY_SIZE(stamp9g20evb_leds));
}
MACHINE_START(PORTUXG20, "taskit PortuxG20")
@ -305,7 +309,7 @@ MACHINE_START(STAMP9G20, "taskit Stamp9G20")
/* Maintainer: taskit GmbH */
.boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
.map_io = stamp9g20_map_io,
.map_io = stamp9g20evb_map_io,
.init_irq = init_irq,
.init_machine = stamp9g20_board_init,
.init_machine = stamp9g20evb_board_init,
MACHINE_END

View File

@ -658,7 +658,7 @@ static void __init at91_upll_usbfs_clock_init(unsigned long main_clock)
/* Now set uhpck values */
uhpck.parent = &utmi_clk;
uhpck.pmc_mask = AT91SAM926x_PMC_UHP;
uhpck.rate_hz = utmi_clk.parent->rate_hz;
uhpck.rate_hz = utmi_clk.rate_hz;
uhpck.rate_hz /= 1 + ((at91_sys_read(AT91_PMC_USB) & AT91_PMC_OHCIUSBDIV) >> 8);
}

View File

@ -74,6 +74,8 @@
#define AT91_MCI_TRTYP_BLOCK (0 << 19)
#define AT91_MCI_TRTYP_MULTIPLE (1 << 19)
#define AT91_MCI_TRTYP_STREAM (2 << 19)
#define AT91_MCI_TRTYP_SDIO_BYTE (4 << 19)
#define AT91_MCI_TRTYP_SDIO_BLOCK (5 << 19)
#define AT91_MCI_BLKR 0x18 /* Block Register */
#define AT91_MCI_BLKR_BCNT(n) ((0xffff & (n)) << 0) /* Block count */

View File

@ -0,0 +1,7 @@
#ifndef __MACH_STAMP9G20_H
#define __MACH_STAMP9G20_H
void stamp9g20_map_io(void);
void stamp9g20_board_init(void);
#endif

View File

@ -513,4 +513,4 @@ int dma_set_coherent_mask(struct device *dev, u64 mask)
EXPORT_SYMBOL(ixp4xx_pci_read);
EXPORT_SYMBOL(ixp4xx_pci_write);
EXPORT_SYMBOL(dma_set_coherent_mask);

View File

@ -540,6 +540,7 @@ config MACH_ICONTROL
config ARCH_PXA_ESERIES
bool "PXA based Toshiba e-series PDAs"
select PXA25x
select FB_W100
config MACH_E330
bool "Toshiba e330"

View File

@ -353,8 +353,8 @@ resume_turn_on_mmu:
@ Let us ensure we jump to resume_after_mmu only when the mcr above
@ actually took effect. They call it the "cpwait" operation.
mrc p15, 0, r1, c2, c0, 0 @ queue a dependency on CP15
sub pc, r2, r1, lsr #32 @ jump to virtual addr
mrc p15, 0, r0, c2, c0, 0 @ queue a dependency on CP15
sub pc, r2, r0, lsr #32 @ jump to virtual addr
nop
nop
nop

View File

@ -28,9 +28,16 @@ config S3C2412_DMA
config S3C2412_PM
bool
select S3C2412_PM_SLEEP
help
Internal config node to apply S3C2412 power management
config S3C2412_PM_SLEEP
bool
help
Internal config node to apply sleep for S3C2412 power management.
Can be selected by another SoCs with similar sleep procedure.
# Note, the S3C2412 IOtiming support is in plat-s3c24xx
config S3C2412_CPUFREQ

View File

@ -14,7 +14,8 @@ obj-$(CONFIG_CPU_S3C2412) += irq.o
obj-$(CONFIG_CPU_S3C2412) += clock.o
obj-$(CONFIG_CPU_S3C2412) += gpio.o
obj-$(CONFIG_S3C2412_DMA) += dma.o
obj-$(CONFIG_S3C2412_PM) += pm.o sleep.o
obj-$(CONFIG_S3C2412_PM) += pm.o
obj-$(CONFIG_S3C2412_PM_SLEEP) += sleep.o
obj-$(CONFIG_S3C2412_CPUFREQ) += cpu-freq.o
# Machine support

View File

@ -27,6 +27,7 @@ config S3C2416_DMA
config S3C2416_PM
bool
select S3C2412_PM_SLEEP
help
Internal config node to apply S3C2416 power management

View File

@ -378,6 +378,12 @@ static struct max8998_regulator_data aquila_regulators[] = {
static struct max8998_platform_data aquila_max8998_pdata = {
.num_regulators = ARRAY_SIZE(aquila_regulators),
.regulators = aquila_regulators,
.buck1_set1 = S5PV210_GPH0(3),
.buck1_set2 = S5PV210_GPH0(4),
.buck2_set3 = S5PV210_GPH0(5),
.buck1_max_voltage1 = 1200000,
.buck1_max_voltage2 = 1200000,
.buck2_max_voltage = 1200000,
};
#endif

View File

@ -518,6 +518,12 @@ static struct max8998_regulator_data goni_regulators[] = {
static struct max8998_platform_data goni_max8998_pdata = {
.num_regulators = ARRAY_SIZE(goni_regulators),
.regulators = goni_regulators,
.buck1_set1 = S5PV210_GPH0(3),
.buck1_set2 = S5PV210_GPH0(4),
.buck2_set3 = S5PV210_GPH0(5),
.buck1_max_voltage1 = 1200000,
.buck1_max_voltage2 = 1200000,
.buck2_max_voltage = 1200000,
};
#endif

View File

@ -1,4 +1,5 @@
/*
* Copyright (C) 2010 Magnus Damm
* Copyright (C) 2008 Renesas Solutions Corp.
*
* This program is free software; you can redistribute it and/or modify
@ -14,24 +15,45 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <mach/hardware.h>
#include <mach/irqs.h>
#define INTCA_BASE 0xe6980000
#define INTFLGA_OFFS 0x00000018 /* accept pending interrupt */
#define INTEVTA_OFFS 0x00000020 /* vector number of accepted interrupt */
#define INTLVLA_OFFS 0x00000030 /* priority level of accepted interrupt */
#define INTLVLB_OFFS 0x00000034 /* previous priority level */
.macro disable_fiq
.endm
.macro get_irqnr_preamble, base, tmp
ldr \base, =INTFLGA
ldr \base, =INTCA_BASE
.endm
.macro arch_ret_to_user, tmp1, tmp2
.endm
.macro get_irqnr_and_base, irqnr, irqstat, base, tmp
ldr \irqnr, [\base]
/* The single INTFLGA read access below results in the following:
*
* 1. INTLVLB is updated with old priority value from INTLVLA
* 2. Highest priority interrupt is accepted
* 3. INTLVLA is updated to contain priority of accepted interrupt
* 4. Accepted interrupt vector is stored in INTFLGA and INTEVTA
*/
ldr \irqnr, [\base, #INTFLGA_OFFS]
/* Restore INTLVLA with the value saved in INTLVLB.
* This is required to support interrupt priorities properly.
*/
ldrb \tmp, [\base, #INTLVLB_OFFS]
strb \tmp, [\base, #INTLVLA_OFFS]
/* Handle invalid vector number case */
cmp \irqnr, #0
beq 1000f
/* intevt to irq number */
/* Convert vector to irq number, same as the evt2irq() macro */
lsr \irqnr, \irqnr, #0x5
subs \irqnr, \irqnr, #16

View File

@ -2,6 +2,6 @@
#define __ASM_MACH_VMALLOC_H
/* Vmalloc at ... - 0xe5ffffff */
#define VMALLOC_END 0xe6000000
#define VMALLOC_END 0xe6000000UL
#endif /* __ASM_MACH_VMALLOC_H */

View File

@ -13,13 +13,9 @@
*/
#include <linux/init.h>
#include <linux/highmem.h>
#include <asm/cacheflush.h>
#include <asm/kmap_types.h>
#include <asm/fixmap.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <plat/cache-feroceon-l2.h>
#include "mm.h"
/*
* Low-level cache maintenance operations.
@ -39,27 +35,30 @@
* between which we don't want to be preempted.
*/
static inline unsigned long l2_start_va(unsigned long paddr)
static inline unsigned long l2_get_va(unsigned long paddr)
{
#ifdef CONFIG_HIGHMEM
/*
* Let's do our own fixmap stuff in a minimal way here.
* Because range ops can't be done on physical addresses,
* we simply install a virtual mapping for it only for the
* TLB lookup to occur, hence no need to flush the untouched
* memory mapping. This is protected with the disabling of
* interrupts by the caller.
* memory mapping afterwards (note: a cache flush may happen
* in some circumstances depending on the path taken in kunmap_atomic).
*/
unsigned long idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id();
unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
set_pte_ext(TOP_PTE(vaddr), pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL), 0);
local_flush_tlb_kernel_page(vaddr);
return vaddr + (paddr & ~PAGE_MASK);
void *vaddr = kmap_atomic_pfn(paddr >> PAGE_SHIFT);
return (unsigned long)vaddr + (paddr & ~PAGE_MASK);
#else
return __phys_to_virt(paddr);
#endif
}
static inline void l2_put_va(unsigned long vaddr)
{
#ifdef CONFIG_HIGHMEM
kunmap_atomic((void *)vaddr);
#endif
}
static inline void l2_clean_pa(unsigned long addr)
{
__asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr));
@ -76,13 +75,14 @@ static inline void l2_clean_pa_range(unsigned long start, unsigned long end)
*/
BUG_ON((start ^ end) >> PAGE_SHIFT);
raw_local_irq_save(flags);
va_start = l2_start_va(start);
va_start = l2_get_va(start);
va_end = va_start + (end - start);
raw_local_irq_save(flags);
__asm__("mcr p15, 1, %0, c15, c9, 4\n\t"
"mcr p15, 1, %1, c15, c9, 5"
: : "r" (va_start), "r" (va_end));
raw_local_irq_restore(flags);
l2_put_va(va_start);
}
static inline void l2_clean_inv_pa(unsigned long addr)
@ -106,13 +106,14 @@ static inline void l2_inv_pa_range(unsigned long start, unsigned long end)
*/
BUG_ON((start ^ end) >> PAGE_SHIFT);
raw_local_irq_save(flags);
va_start = l2_start_va(start);
va_start = l2_get_va(start);
va_end = va_start + (end - start);
raw_local_irq_save(flags);
__asm__("mcr p15, 1, %0, c15, c11, 4\n\t"
"mcr p15, 1, %1, c15, c11, 5"
: : "r" (va_start), "r" (va_end));
raw_local_irq_restore(flags);
l2_put_va(va_start);
}
static inline void l2_inv_all(void)

View File

@ -17,14 +17,10 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/highmem.h>
#include <asm/system.h>
#include <asm/cputype.h>
#include <asm/cacheflush.h>
#include <asm/kmap_types.h>
#include <asm/fixmap.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include "mm.h"
#define CR_L2 (1 << 26)
@ -71,16 +67,15 @@ static inline void xsc3_l2_inv_all(void)
dsb();
}
static inline void l2_unmap_va(unsigned long va)
{
#ifdef CONFIG_HIGHMEM
#define l2_map_save_flags(x) raw_local_save_flags(x)
#define l2_map_restore_flags(x) raw_local_irq_restore(x)
#else
#define l2_map_save_flags(x) ((x) = 0)
#define l2_map_restore_flags(x) ((void)(x))
if (va != -1)
kunmap_atomic((void *)va);
#endif
}
static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va,
unsigned long flags)
static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va)
{
#ifdef CONFIG_HIGHMEM
unsigned long va = prev_va & PAGE_MASK;
@ -89,17 +84,10 @@ static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va,
/*
* Switching to a new page. Because cache ops are
* using virtual addresses only, we must put a mapping
* in place for it. We also enable interrupts for a
* short while and disable them again to protect this
* mapping.
* in place for it.
*/
unsigned long idx;
raw_local_irq_restore(flags);
idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id();
va = __fix_to_virt(FIX_KMAP_BEGIN + idx);
raw_local_irq_restore(flags | PSR_I_BIT);
set_pte_ext(TOP_PTE(va), pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL), 0);
local_flush_tlb_kernel_page(va);
l2_unmap_va(prev_va);
va = (unsigned long)kmap_atomic_pfn(pa >> PAGE_SHIFT);
}
return va + (pa_offset >> (32 - PAGE_SHIFT));
#else
@ -109,7 +97,7 @@ static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va,
static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
{
unsigned long vaddr, flags;
unsigned long vaddr;
if (start == 0 && end == -1ul) {
xsc3_l2_inv_all();
@ -117,13 +105,12 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
}
vaddr = -1; /* to force the first mapping */
l2_map_save_flags(flags);
/*
* Clean and invalidate partial first cache line.
*/
if (start & (CACHE_LINE_SIZE - 1)) {
vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr, flags);
vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr);
xsc3_l2_clean_mva(vaddr);
xsc3_l2_inv_mva(vaddr);
start = (start | (CACHE_LINE_SIZE - 1)) + 1;
@ -133,7 +120,7 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
* Invalidate all full cache lines between 'start' and 'end'.
*/
while (start < (end & ~(CACHE_LINE_SIZE - 1))) {
vaddr = l2_map_va(start, vaddr, flags);
vaddr = l2_map_va(start, vaddr);
xsc3_l2_inv_mva(vaddr);
start += CACHE_LINE_SIZE;
}
@ -142,31 +129,30 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
* Clean and invalidate partial last cache line.
*/
if (start < end) {
vaddr = l2_map_va(start, vaddr, flags);
vaddr = l2_map_va(start, vaddr);
xsc3_l2_clean_mva(vaddr);
xsc3_l2_inv_mva(vaddr);
}
l2_map_restore_flags(flags);
l2_unmap_va(vaddr);
dsb();
}
static void xsc3_l2_clean_range(unsigned long start, unsigned long end)
{
unsigned long vaddr, flags;
unsigned long vaddr;
vaddr = -1; /* to force the first mapping */
l2_map_save_flags(flags);
start &= ~(CACHE_LINE_SIZE - 1);
while (start < end) {
vaddr = l2_map_va(start, vaddr, flags);
vaddr = l2_map_va(start, vaddr);
xsc3_l2_clean_mva(vaddr);
start += CACHE_LINE_SIZE;
}
l2_map_restore_flags(flags);
l2_unmap_va(vaddr);
dsb();
}
@ -193,7 +179,7 @@ static inline void xsc3_l2_flush_all(void)
static void xsc3_l2_flush_range(unsigned long start, unsigned long end)
{
unsigned long vaddr, flags;
unsigned long vaddr;
if (start == 0 && end == -1ul) {
xsc3_l2_flush_all();
@ -201,17 +187,16 @@ static void xsc3_l2_flush_range(unsigned long start, unsigned long end)
}
vaddr = -1; /* to force the first mapping */
l2_map_save_flags(flags);
start &= ~(CACHE_LINE_SIZE - 1);
while (start < end) {
vaddr = l2_map_va(start, vaddr, flags);
vaddr = l2_map_va(start, vaddr);
xsc3_l2_clean_mva(vaddr);
xsc3_l2_inv_mva(vaddr);
start += CACHE_LINE_SIZE;
}
l2_map_restore_flags(flags);
l2_unmap_va(vaddr);
dsb();
}

View File

@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/highmem.h>
#include <asm/memory.h>
#include <asm/highmem.h>
@ -480,10 +481,10 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
op(vaddr, len, dir);
kunmap_high(page);
} else if (cache_is_vipt()) {
pte_t saved_pte;
vaddr = kmap_high_l1_vipt(page, &saved_pte);
/* unmapped pages might still be cached */
vaddr = kmap_atomic(page);
op(vaddr + offset, len, dir);
kunmap_high_l1_vipt(page, saved_pte);
kunmap_atomic(vaddr);
}
} else {
vaddr = page_address(page) + offset;

View File

@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <asm/cacheflush.h>
#include <asm/cachetype.h>
@ -180,10 +181,10 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
kunmap_high(page);
} else if (cache_is_vipt()) {
pte_t saved_pte;
addr = kmap_high_l1_vipt(page, &saved_pte);
/* unmapped pages might still be cached */
addr = kmap_atomic(page);
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
kunmap_high_l1_vipt(page, saved_pte);
kunmap_atomic(addr);
}
}

View File

@ -140,90 +140,3 @@ struct page *kmap_atomic_to_page(const void *ptr)
pte = TOP_PTE(vaddr);
return pte_page(*pte);
}
#ifdef CONFIG_CPU_CACHE_VIPT
#include <linux/percpu.h>
/*
* The VIVT cache of a highmem page is always flushed before the page
* is unmapped. Hence unmapped highmem pages need no cache maintenance
* in that case.
*
* However unmapped pages may still be cached with a VIPT cache, and
* it is not possible to perform cache maintenance on them using physical
* addresses unfortunately. So we have no choice but to set up a temporary
* virtual mapping for that purpose.
*
* Yet this VIPT cache maintenance may be triggered from DMA support
* functions which are possibly called from interrupt context. As we don't
* want to keep interrupt disabled all the time when such maintenance is
* taking place, we therefore allow for some reentrancy by preserving and
* restoring the previous fixmap entry before the interrupted context is
* resumed. If the reentrancy depth is 0 then there is no need to restore
* the previous fixmap, and leaving the current one in place allow it to
* be reused the next time without a TLB flush (common with DMA).
*/
static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
{
unsigned int idx, cpu;
int *depth;
unsigned long vaddr, flags;
pte_t pte, *ptep;
if (!in_interrupt())
preempt_disable();
cpu = smp_processor_id();
depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
ptep = TOP_PTE(vaddr);
pte = mk_pte(page, kmap_prot);
raw_local_irq_save(flags);
(*depth)++;
if (pte_val(*ptep) == pte_val(pte)) {
*saved_pte = pte;
} else {
*saved_pte = *ptep;
set_pte_ext(ptep, pte, 0);
local_flush_tlb_kernel_page(vaddr);
}
raw_local_irq_restore(flags);
return (void *)vaddr;
}
void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte)
{
unsigned int idx, cpu = smp_processor_id();
int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
unsigned long vaddr, flags;
pte_t pte, *ptep;
idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
ptep = TOP_PTE(vaddr);
pte = mk_pte(page, kmap_prot);
BUG_ON(pte_val(*ptep) != pte_val(pte));
BUG_ON(*depth <= 0);
raw_local_irq_save(flags);
(*depth)--;
if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) {
set_pte_ext(ptep, saved_pte, 0);
local_flush_tlb_kernel_page(vaddr);
}
raw_local_irq_restore(flags);
if (!in_interrupt())
preempt_enable();
}
#endif /* CONFIG_CPU_CACHE_VIPT */

View File

@ -8,7 +8,7 @@ config PLAT_S3C24XX
default y
select NO_IOPORT
select ARCH_REQUIRE_GPIOLIB
select S3C_DEVICE_NAND
select S3C_DEV_NAND
select S3C_GPIO_CFG_S3C24XX
help
Base platform code for any Samsung S3C24XX device

View File

@ -19,6 +19,8 @@ config MIPS
select GENERIC_ATOMIC64 if !64BIT
select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_PROBE
menu "Machine selection"
@ -1664,6 +1666,28 @@ config PAGE_SIZE_64KB
endchoice
config FORCE_MAX_ZONEORDER
int "Maximum zone order"
range 13 64 if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_32KB
default "13" if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_32KB
range 12 64 if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_16KB
default "12" if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_16KB
range 11 64
default "11"
help
The kernel memory allocator divides physically contiguous memory
blocks into "zones", where each zone is a power of two number of
pages. This option selects the largest power of two that the kernel
keeps in the memory allocator. If you need to allocate very large
blocks of physically contiguous memory, then you may need to
increase this value.
This config option is actually maximum order plus one. For example,
a value of 11 means that the largest free memory block is 2^10 pages.
The page size is not necessarily 4KB. Keep this in mind
when choosing a value for this option.
config BOARD_SCACHE
bool
@ -1921,20 +1945,6 @@ config CPU_R4000_WORKAROUNDS
config CPU_R4400_WORKAROUNDS
bool
#
# Use the generic interrupt handling code in kernel/irq/:
#
config GENERIC_HARDIRQS
bool
default y
config GENERIC_IRQ_PROBE
bool
default y
config IRQ_PER_CPU
bool
#
# - Highmem only makes sense for the 32-bit kernel.
# - The current highmem code will only work properly on physically indexed

View File

@ -27,6 +27,7 @@
static void alchemy_8250_pm(struct uart_port *port, unsigned int state,
unsigned int old_state)
{
#ifdef CONFIG_SERIAL_8250
switch (state) {
case 0:
if ((__raw_readl(port->membase + UART_MOD_CNTRL) & 3) != 3) {
@ -49,6 +50,7 @@ static void alchemy_8250_pm(struct uart_port *port, unsigned int state,
serial8250_do_pm(port, state, old_state);
break;
}
#endif
}
#define PORT(_base, _irq) \

View File

@ -54,10 +54,9 @@ void __init prom_init(void)
prom_init_cmdline();
memsize_str = prom_getenv("memsize");
if (!memsize_str)
if (!memsize_str || strict_strtoul(memsize_str, 0, &memsize))
memsize = ALCHEMY_BOARD_DEFAULT_MEMSIZE;
else
strict_strtoul(memsize_str, 0, &memsize);
add_memory_region(0, memsize, BOOT_MEM_RAM);
}

View File

@ -239,12 +239,12 @@ static void tnetd7300_set_clock(u32 shift, struct tnetd7300_clock *clock,
calculate(base_clock, frequency, &prediv, &postdiv, &mul);
writel(((prediv - 1) << PREDIV_SHIFT) | (postdiv - 1), &clock->ctrl);
msleep(1);
mdelay(1);
writel(4, &clock->pll);
while (readl(&clock->pll) & PLL_STATUS)
;
writel(((mul - 1) << MUL_SHIFT) | (0xff << 3) | 0x0e, &clock->pll);
msleep(75);
mdelay(75);
}
static void __init tnetd7300_init_clocks(void)
@ -456,7 +456,7 @@ void clk_put(struct clk *clk)
}
EXPORT_SYMBOL(clk_put);
int __init ar7_init_clocks(void)
void __init ar7_init_clocks(void)
{
switch (ar7_chip_id()) {
case AR7_CHIP_7100:
@ -472,7 +472,4 @@ int __init ar7_init_clocks(void)
}
/* adjust vbus clock rate */
vbus_clk.rate = bus_clk.rate / 2;
return 0;
}
arch_initcall(ar7_init_clocks);

View File

@ -30,6 +30,9 @@ void __init plat_time_init(void)
{
struct clk *cpu_clk;
/* Initialize ar7 clocks so the CPU clock frequency is correct */
ar7_init_clocks();
cpu_clk = clk_get(NULL, "cpu");
if (IS_ERR(cpu_clk)) {
printk(KERN_ERR "unable to get cpu clock\n");

View File

@ -32,7 +32,6 @@
#include <asm/reboot.h>
#include <asm/time.h>
#include <bcm47xx.h>
#include <asm/fw/cfe/cfe_api.h>
#include <asm/mach-bcm47xx/nvram.h>
struct ssb_bus ssb_bcm47xx;
@ -57,68 +56,112 @@ static void bcm47xx_machine_halt(void)
cpu_relax();
}
static void str2eaddr(char *str, char *dest)
#define READ_FROM_NVRAM(_outvar, name, buf) \
if (nvram_getenv(name, buf, sizeof(buf)) >= 0)\
sprom->_outvar = simple_strtoul(buf, NULL, 0);
static void bcm47xx_fill_sprom(struct ssb_sprom *sprom)
{
int i = 0;
char buf[100];
u32 boardflags;
if (str == NULL) {
memset(dest, 0, 6);
return;
memset(sprom, 0, sizeof(struct ssb_sprom));
sprom->revision = 1; /* Fallback: Old hardware does not define this. */
READ_FROM_NVRAM(revision, "sromrev", buf);
if (nvram_getenv("il0macaddr", buf, sizeof(buf)) >= 0)
nvram_parse_macaddr(buf, sprom->il0mac);
if (nvram_getenv("et0macaddr", buf, sizeof(buf)) >= 0)
nvram_parse_macaddr(buf, sprom->et0mac);
if (nvram_getenv("et1macaddr", buf, sizeof(buf)) >= 0)
nvram_parse_macaddr(buf, sprom->et1mac);
READ_FROM_NVRAM(et0phyaddr, "et0phyaddr", buf);
READ_FROM_NVRAM(et1phyaddr, "et1phyaddr", buf);
READ_FROM_NVRAM(et0mdcport, "et0mdcport", buf);
READ_FROM_NVRAM(et1mdcport, "et1mdcport", buf);
READ_FROM_NVRAM(board_rev, "boardrev", buf);
READ_FROM_NVRAM(country_code, "ccode", buf);
READ_FROM_NVRAM(ant_available_a, "aa5g", buf);
READ_FROM_NVRAM(ant_available_bg, "aa2g", buf);
READ_FROM_NVRAM(pa0b0, "pa0b0", buf);
READ_FROM_NVRAM(pa0b1, "pa0b1", buf);
READ_FROM_NVRAM(pa0b2, "pa0b2", buf);
READ_FROM_NVRAM(pa1b0, "pa1b0", buf);
READ_FROM_NVRAM(pa1b1, "pa1b1", buf);
READ_FROM_NVRAM(pa1b2, "pa1b2", buf);
READ_FROM_NVRAM(pa1lob0, "pa1lob0", buf);
READ_FROM_NVRAM(pa1lob2, "pa1lob1", buf);
READ_FROM_NVRAM(pa1lob1, "pa1lob2", buf);
READ_FROM_NVRAM(pa1hib0, "pa1hib0", buf);
READ_FROM_NVRAM(pa1hib2, "pa1hib1", buf);
READ_FROM_NVRAM(pa1hib1, "pa1hib2", buf);
READ_FROM_NVRAM(gpio0, "wl0gpio0", buf);
READ_FROM_NVRAM(gpio1, "wl0gpio1", buf);
READ_FROM_NVRAM(gpio2, "wl0gpio2", buf);
READ_FROM_NVRAM(gpio3, "wl0gpio3", buf);
READ_FROM_NVRAM(maxpwr_bg, "pa0maxpwr", buf);
READ_FROM_NVRAM(maxpwr_al, "pa1lomaxpwr", buf);
READ_FROM_NVRAM(maxpwr_a, "pa1maxpwr", buf);
READ_FROM_NVRAM(maxpwr_ah, "pa1himaxpwr", buf);
READ_FROM_NVRAM(itssi_a, "pa1itssit", buf);
READ_FROM_NVRAM(itssi_bg, "pa0itssit", buf);
READ_FROM_NVRAM(tri2g, "tri2g", buf);
READ_FROM_NVRAM(tri5gl, "tri5gl", buf);
READ_FROM_NVRAM(tri5g, "tri5g", buf);
READ_FROM_NVRAM(tri5gh, "tri5gh", buf);
READ_FROM_NVRAM(rxpo2g, "rxpo2g", buf);
READ_FROM_NVRAM(rxpo5g, "rxpo5g", buf);
READ_FROM_NVRAM(rssisav2g, "rssisav2g", buf);
READ_FROM_NVRAM(rssismc2g, "rssismc2g", buf);
READ_FROM_NVRAM(rssismf2g, "rssismf2g", buf);
READ_FROM_NVRAM(bxa2g, "bxa2g", buf);
READ_FROM_NVRAM(rssisav5g, "rssisav5g", buf);
READ_FROM_NVRAM(rssismc5g, "rssismc5g", buf);
READ_FROM_NVRAM(rssismf5g, "rssismf5g", buf);
READ_FROM_NVRAM(bxa5g, "bxa5g", buf);
READ_FROM_NVRAM(cck2gpo, "cck2gpo", buf);
READ_FROM_NVRAM(ofdm2gpo, "ofdm2gpo", buf);
READ_FROM_NVRAM(ofdm5glpo, "ofdm5glpo", buf);
READ_FROM_NVRAM(ofdm5gpo, "ofdm5gpo", buf);
READ_FROM_NVRAM(ofdm5ghpo, "ofdm5ghpo", buf);
if (nvram_getenv("boardflags", buf, sizeof(buf)) >= 0) {
boardflags = simple_strtoul(buf, NULL, 0);
if (boardflags) {
sprom->boardflags_lo = (boardflags & 0x0000FFFFU);
sprom->boardflags_hi = (boardflags & 0xFFFF0000U) >> 16;
}
}
for (;;) {
dest[i++] = (char) simple_strtoul(str, NULL, 16);
str += 2;
if (!*str++ || i == 6)
break;
if (nvram_getenv("boardflags2", buf, sizeof(buf)) >= 0) {
boardflags = simple_strtoul(buf, NULL, 0);
if (boardflags) {
sprom->boardflags2_lo = (boardflags & 0x0000FFFFU);
sprom->boardflags2_hi = (boardflags & 0xFFFF0000U) >> 16;
}
}
}
static int bcm47xx_get_invariants(struct ssb_bus *bus,
struct ssb_init_invariants *iv)
{
char buf[100];
char buf[20];
/* Fill boardinfo structure */
memset(&(iv->boardinfo), 0 , sizeof(struct ssb_boardinfo));
if (cfe_getenv("boardvendor", buf, sizeof(buf)) >= 0 ||
nvram_getenv("boardvendor", buf, sizeof(buf)) >= 0)
if (nvram_getenv("boardvendor", buf, sizeof(buf)) >= 0)
iv->boardinfo.vendor = (u16)simple_strtoul(buf, NULL, 0);
else
iv->boardinfo.vendor = SSB_BOARDVENDOR_BCM;
if (nvram_getenv("boardtype", buf, sizeof(buf)) >= 0)
iv->boardinfo.type = (u16)simple_strtoul(buf, NULL, 0);
if (cfe_getenv("boardtype", buf, sizeof(buf)) >= 0 ||
nvram_getenv("boardtype", buf, sizeof(buf)) >= 0)
iv->boardinfo.type = (u16)simple_strtoul(buf, NULL, 0);
if (cfe_getenv("boardrev", buf, sizeof(buf)) >= 0 ||
nvram_getenv("boardrev", buf, sizeof(buf)) >= 0)
if (nvram_getenv("boardrev", buf, sizeof(buf)) >= 0)
iv->boardinfo.rev = (u16)simple_strtoul(buf, NULL, 0);
/* Fill sprom structure */
memset(&(iv->sprom), 0, sizeof(struct ssb_sprom));
iv->sprom.revision = 3;
bcm47xx_fill_sprom(&iv->sprom);
if (cfe_getenv("et0macaddr", buf, sizeof(buf)) >= 0 ||
nvram_getenv("et0macaddr", buf, sizeof(buf)) >= 0)
str2eaddr(buf, iv->sprom.et0mac);
if (cfe_getenv("et1macaddr", buf, sizeof(buf)) >= 0 ||
nvram_getenv("et1macaddr", buf, sizeof(buf)) >= 0)
str2eaddr(buf, iv->sprom.et1mac);
if (cfe_getenv("et0phyaddr", buf, sizeof(buf)) >= 0 ||
nvram_getenv("et0phyaddr", buf, sizeof(buf)) >= 0)
iv->sprom.et0phyaddr = simple_strtoul(buf, NULL, 0);
if (cfe_getenv("et1phyaddr", buf, sizeof(buf)) >= 0 ||
nvram_getenv("et1phyaddr", buf, sizeof(buf)) >= 0)
iv->sprom.et1phyaddr = simple_strtoul(buf, NULL, 0);
if (cfe_getenv("et0mdcport", buf, sizeof(buf)) >= 0 ||
nvram_getenv("et0mdcport", buf, sizeof(buf)) >= 0)
iv->sprom.et0mdcport = simple_strtoul(buf, NULL, 10);
if (cfe_getenv("et1mdcport", buf, sizeof(buf)) >= 0 ||
nvram_getenv("et1mdcport", buf, sizeof(buf)) >= 0)
iv->sprom.et1mdcport = simple_strtoul(buf, NULL, 10);
if (nvram_getenv("cardbus", buf, sizeof(buf)) >= 0)
iv->has_cardbus_slot = !!simple_strtoul(buf, NULL, 10);
return 0;
}
@ -126,12 +169,28 @@ static int bcm47xx_get_invariants(struct ssb_bus *bus,
void __init plat_mem_setup(void)
{
int err;
char buf[100];
struct ssb_mipscore *mcore;
err = ssb_bus_ssbbus_register(&ssb_bcm47xx, SSB_ENUM_BASE,
bcm47xx_get_invariants);
if (err)
panic("Failed to initialize SSB bus (err %d)\n", err);
mcore = &ssb_bcm47xx.mipscore;
if (nvram_getenv("kernel_args", buf, sizeof(buf)) >= 0) {
if (strstr(buf, "console=ttyS1")) {
struct ssb_serial_port port;
printk(KERN_DEBUG "Swapping serial ports!\n");
/* swap serial ports */
memcpy(&port, &mcore->serial_ports[0], sizeof(port));
memcpy(&mcore->serial_ports[0], &mcore->serial_ports[1],
sizeof(port));
memcpy(&mcore->serial_ports[1], &port, sizeof(port));
}
}
_machine_restart = bcm47xx_machine_restart;
_machine_halt = bcm47xx_machine_halt;
pm_power_off = bcm47xx_machine_halt;

View File

@ -111,8 +111,8 @@
* These are the PRID's for when 23:16 == PRID_COMP_BROADCOM
*/
#define PRID_IMP_BMIPS4KC 0x4000
#define PRID_IMP_BMIPS32 0x8000
#define PRID_IMP_BMIPS32_REV4 0x4000
#define PRID_IMP_BMIPS32_REV8 0x8000
#define PRID_IMP_BMIPS3300 0x9000
#define PRID_IMP_BMIPS3300_ALT 0x9100
#define PRID_IMP_BMIPS3300_BUG 0x0000

View File

@ -249,7 +249,8 @@ extern struct mips_abi mips_abi_n32;
#define SET_PERSONALITY(ex) \
do { \
set_personality(PER_LINUX); \
if (personality(current->personality) != PER_LINUX) \
set_personality(PER_LINUX); \
\
current->thread.abi = &mips_abi; \
} while (0)
@ -296,6 +297,8 @@ do { \
#define SET_PERSONALITY(ex) \
do { \
unsigned int p; \
\
clear_thread_flag(TIF_32BIT_REGS); \
clear_thread_flag(TIF_32BIT_ADDR); \
\
@ -304,7 +307,8 @@ do { \
else \
current->thread.abi = &mips_abi; \
\
if (current->personality != PER_LINUX32) \
p = personality(current->personality); \
if (p != PER_LINUX32 && p != PER_LINUX) \
set_personality(PER_LINUX); \
} while (0)

View File

@ -329,10 +329,14 @@ static inline void pfx##write##bwlq(type val, \
"dsrl32 %L0, %L0, 0" "\n\t" \
"dsll32 %M0, %M0, 0" "\n\t" \
"or %L0, %L0, %M0" "\n\t" \
".set push" "\n\t" \
".set noreorder" "\n\t" \
".set nomacro" "\n\t" \
"sd %L0, %2" "\n\t" \
".set pop" "\n\t" \
".set mips0" "\n" \
: "=r" (__tmp) \
: "0" (__val), "m" (*__mem)); \
: "0" (__val), "R" (*__mem)); \
if (irq) \
local_irq_restore(__flags); \
} else \
@ -355,12 +359,16 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
local_irq_save(__flags); \
__asm__ __volatile__( \
".set mips3" "\t\t# __readq" "\n\t" \
".set push" "\n\t" \
".set noreorder" "\n\t" \
".set nomacro" "\n\t" \
"ld %L0, %1" "\n\t" \
".set pop" "\n\t" \
"dsra32 %M0, %L0, 0" "\n\t" \
"sll %L0, %L0, 0" "\n\t" \
".set mips0" "\n" \
: "=r" (__val) \
: "m" (*__mem)); \
: "R" (*__mem)); \
if (irq) \
local_irq_restore(__flags); \
} else { \

View File

@ -201,7 +201,6 @@ static inline void ar7_device_off(u32 bit)
}
int __init ar7_gpio_init(void);
int __init ar7_gpio_init(void);
void __init ar7_init_clocks(void);
#endif /* __AR7_H__ */

View File

@ -12,6 +12,7 @@
#define __NVRAM_H
#include <linux/types.h>
#include <linux/kernel.h>
struct nvram_header {
u32 magic;
@ -36,4 +37,10 @@ struct nvram_header {
extern int nvram_getenv(char *name, char *val, size_t val_len);
static inline void nvram_parse_macaddr(char *buf, u8 *macaddr)
{
sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &macaddr[0], &macaddr[1],
&macaddr[2], &macaddr[3], &macaddr[4], &macaddr[5]);
}
#endif

View File

@ -5,7 +5,7 @@
*
* Copyright (c) 2009 Qi Hardware inc.,
* Author: Xiangfu Liu <xiangfu@qi-hardware.com>
* Copyright 2010, Lars-Petrer Clausen <lars@metafoo.de>
* Copyright 2010, Lars-Peter Clausen <lars@metafoo.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 or later
@ -235,7 +235,7 @@ static const unsigned int qi_lb60_keypad_rows[] = {
QI_LB60_GPIO_KEYIN(3),
QI_LB60_GPIO_KEYIN(4),
QI_LB60_GPIO_KEYIN(5),
QI_LB60_GPIO_KEYIN(7),
QI_LB60_GPIO_KEYIN(6),
QI_LB60_GPIO_KEYIN8,
};

View File

@ -208,7 +208,7 @@ struct platform_device jz4740_i2s_device = {
/* PCM */
struct platform_device jz4740_pcm_device = {
.name = "jz4740-pcm",
.name = "jz4740-pcm-audio",
.id = -1,
};

View File

@ -23,7 +23,7 @@
#include <asm/bootinfo.h>
#include <asm/mach-jz4740/base.h>
void jz4740_init_cmdline(int argc, char *argv[])
static __init void jz4740_init_cmdline(int argc, char *argv[])
{
unsigned int count = COMMAND_LINE_SIZE - 1;
int i;

View File

@ -32,7 +32,7 @@ static int mips_next_event(unsigned long delta,
cnt = read_c0_count();
cnt += delta;
write_c0_compare(cnt);
res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0;
res = ((int)(read_c0_count() - cnt) >= 0) ? -ETIME : 0;
return res;
}

View File

@ -905,7 +905,8 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
switch (c->processor_id & 0xff00) {
case PRID_IMP_BMIPS32:
case PRID_IMP_BMIPS32_REV4:
case PRID_IMP_BMIPS32_REV8:
c->cputype = CPU_BMIPS32;
__cpu_name[cpu] = "Broadcom BMIPS32";
break;
@ -933,10 +934,6 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "Broadcom BMIPS5000";
c->options |= MIPS_CPU_ULRI;
break;
case PRID_IMP_BMIPS4KC:
c->cputype = CPU_4KC;
__cpu_name[cpu] = "MIPS 4Kc";
break;
}
}

View File

@ -251,14 +251,15 @@ SYSCALL_DEFINE5(n32_msgrcv, int, msqid, u32, msgp, size_t, msgsz,
SYSCALL_DEFINE1(32_personality, unsigned long, personality)
{
unsigned int p = personality & 0xffffffff;
int ret;
personality &= 0xffffffff;
if (personality(current->personality) == PER_LINUX32 &&
personality == PER_LINUX)
personality = PER_LINUX32;
ret = sys_personality(personality);
if (ret == PER_LINUX32)
ret = PER_LINUX;
personality(p) == PER_LINUX)
p = (p & ~PER_MASK) | PER_LINUX32;
ret = sys_personality(p);
if (ret != -1 && personality(ret) == PER_LINUX32)
ret = (ret & ~PER_MASK) | PER_LINUX;
return ret;
}

View File

@ -142,7 +142,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
childregs->regs[7] = 0; /* Clear error flag */
childregs->regs[2] = 0; /* Child gets zero as return value */
regs->regs[2] = p->pid;
if (childregs->cp0_status & ST0_CU0) {
childregs->regs[28] = (unsigned long) ti;

View File

@ -100,7 +100,7 @@ void __init device_tree_init(void)
return;
base = virt_to_phys((void *)initial_boot_params);
size = initial_boot_params->totalsize;
size = be32_to_cpu(initial_boot_params->totalsize);
/* Before we do anything, lets reserve the dt blob */
reserve_mem_mach(base, size);

View File

@ -153,7 +153,7 @@ static void __cpuinit vsmp_init_secondary(void)
{
extern int gic_present;
/* This is Malta specific: IPI,performance and timer inetrrupts */
/* This is Malta specific: IPI,performance and timer interrupts */
if (gic_present)
change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
STATUSF_IP6 | STATUSF_IP7);

View File

@ -83,7 +83,8 @@ extern asmlinkage void handle_mcheck(void);
extern asmlinkage void handle_reserved(void);
extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
struct mips_fpu_struct *ctx, int has_fpu);
struct mips_fpu_struct *ctx, int has_fpu,
void *__user *fault_addr);
void (*board_be_init)(void);
int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
@ -661,12 +662,36 @@ asmlinkage void do_ov(struct pt_regs *regs)
force_sig_info(SIGFPE, &info, current);
}
static int process_fpemu_return(int sig, void __user *fault_addr)
{
if (sig == SIGSEGV || sig == SIGBUS) {
struct siginfo si = {0};
si.si_addr = fault_addr;
si.si_signo = sig;
if (sig == SIGSEGV) {
if (find_vma(current->mm, (unsigned long)fault_addr))
si.si_code = SEGV_ACCERR;
else
si.si_code = SEGV_MAPERR;
} else {
si.si_code = BUS_ADRERR;
}
force_sig_info(sig, &si, current);
return 1;
} else if (sig) {
force_sig(sig, current);
return 1;
} else {
return 0;
}
}
/*
* XXX Delayed fp exceptions when doing a lazy ctx switch XXX
*/
asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
{
siginfo_t info;
siginfo_t info = {0};
if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), SIGFPE)
== NOTIFY_STOP)
@ -675,6 +700,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
if (fcr31 & FPU_CSR_UNI_X) {
int sig;
void __user *fault_addr = NULL;
/*
* Unimplemented operation exception. If we've got the full
@ -690,7 +716,8 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
lose_fpu(1);
/* Run the emulator */
sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1);
sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
&fault_addr);
/*
* We can't allow the emulated instruction to leave any of
@ -702,8 +729,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
own_fpu(1); /* Using the FPU again. */
/* If something went wrong, signal */
if (sig)
force_sig(sig, current);
process_fpemu_return(sig, fault_addr);
return;
} else if (fcr31 & FPU_CSR_INV_X)
@ -996,11 +1022,11 @@ asmlinkage void do_cpu(struct pt_regs *regs)
if (!raw_cpu_has_fpu) {
int sig;
void __user *fault_addr = NULL;
sig = fpu_emulator_cop1Handler(regs,
&current->thread.fpu, 0);
if (sig)
force_sig(sig, current);
else
&current->thread.fpu,
0, &fault_addr);
if (!process_fpemu_return(sig, fault_addr))
mt_ase_fp_affinity();
}

View File

@ -1092,6 +1092,10 @@ static int vpe_open(struct inode *inode, struct file *filp)
/* this of-course trashes what was there before... */
v->pbuffer = vmalloc(P_SIZE);
if (!v->pbuffer) {
pr_warning("VPE loader: unable to allocate memory\n");
return -ENOMEM;
}
v->plen = P_SIZE;
v->load_addr = NULL;
v->len = 0;
@ -1149,10 +1153,9 @@ static int vpe_release(struct inode *inode, struct file *filp)
if (ret < 0)
v->shared_ptr = NULL;
// cleanup any temp buffers
if (v->pbuffer)
vfree(v->pbuffer);
vfree(v->pbuffer);
v->plen = 0;
return ret;
}
@ -1169,11 +1172,6 @@ static ssize_t vpe_write(struct file *file, const char __user * buffer,
if (v == NULL)
return -ENODEV;
if (v->pbuffer == NULL) {
printk(KERN_ERR "VPE loader: no buffer for program\n");
return -ENOMEM;
}
if ((count + v->len) > v->plen) {
printk(KERN_WARNING
"VPE loader: elf size too big. Perhaps strip uneeded symbols\n");

View File

@ -161,16 +161,16 @@ FEXPORT(__bzero)
.Lfwd_fixup:
PTR_L t0, TI_TASK($28)
LONG_L t0, THREAD_BUADDR(t0)
andi a2, 0x3f
LONG_L t0, THREAD_BUADDR(t0)
LONG_ADDU a2, t1
jr ra
LONG_SUBU a2, t0
.Lpartial_fixup:
PTR_L t0, TI_TASK($28)
LONG_L t0, THREAD_BUADDR(t0)
andi a2, LONGMASK
LONG_L t0, THREAD_BUADDR(t0)
LONG_ADDU a2, t1
jr ra
LONG_SUBU a2, t0

View File

@ -29,9 +29,9 @@ unsigned long memsize, highmemsize;
#define parse_even_earlier(res, option, p) \
do { \
int ret; \
if (strncmp(option, (char *)p, strlen(option)) == 0) \
strict_strtol((char *)p + strlen(option"="), \
10, &res); \
ret = strict_strtol((char *)p + strlen(option"="), 10, &res); \
} while (0)
void __init prom_init_env(void)

View File

@ -64,7 +64,7 @@ static int fpu_emu(struct pt_regs *, struct mips_fpu_struct *,
#if __mips >= 4 && __mips != 32
static int fpux_emu(struct pt_regs *,
struct mips_fpu_struct *, mips_instruction);
struct mips_fpu_struct *, mips_instruction, void *__user *);
#endif
/* Further private data for which no space exists in mips_fpu_struct */
@ -208,16 +208,23 @@ static inline int cop1_64bit(struct pt_regs *xcp)
* Two instructions if the instruction is in a branch delay slot.
*/
static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx)
static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
void *__user *fault_addr)
{
mips_instruction ir;
unsigned long emulpc, contpc;
unsigned int cond;
if (get_user(ir, (mips_instruction __user *) xcp->cp0_epc)) {
if (!access_ok(VERIFY_READ, xcp->cp0_epc, sizeof(mips_instruction))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
return SIGBUS;
}
if (__get_user(ir, (mips_instruction __user *) xcp->cp0_epc)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
return SIGSEGV;
}
/* XXX NEC Vr54xx bug workaround */
if ((xcp->cp0_cause & CAUSEF_BD) && !isBranchInstr(&ir))
@ -245,10 +252,16 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx)
#endif
return SIGILL;
}
if (get_user(ir, (mips_instruction __user *) emulpc)) {
if (!access_ok(VERIFY_READ, emulpc, sizeof(mips_instruction))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = (mips_instruction __user *)emulpc;
return SIGBUS;
}
if (__get_user(ir, (mips_instruction __user *) emulpc)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = (mips_instruction __user *)emulpc;
return SIGSEGV;
}
/* __compute_return_epc() will have updated cp0_epc */
contpc = xcp->cp0_epc;
/* In order not to confuse ptrace() et al, tweak context */
@ -269,10 +282,17 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx)
u64 val;
MIPS_FPU_EMU_INC_STATS(loads);
if (get_user(val, va)) {
if (!access_ok(VERIFY_READ, va, sizeof(u64))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (__get_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGSEGV;
}
DITOREG(val, MIPSInst_RT(ir));
break;
}
@ -284,10 +304,16 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx)
MIPS_FPU_EMU_INC_STATS(stores);
DIFROMREG(val, MIPSInst_RT(ir));
if (put_user(val, va)) {
if (!access_ok(VERIFY_WRITE, va, sizeof(u64))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (__put_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGSEGV;
}
break;
}
@ -297,10 +323,16 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx)
u32 val;
MIPS_FPU_EMU_INC_STATS(loads);
if (get_user(val, va)) {
if (!access_ok(VERIFY_READ, va, sizeof(u32))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (__get_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGSEGV;
}
SITOREG(val, MIPSInst_RT(ir));
break;
}
@ -312,10 +344,16 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx)
MIPS_FPU_EMU_INC_STATS(stores);
SIFROMREG(val, MIPSInst_RT(ir));
if (put_user(val, va)) {
if (!access_ok(VERIFY_WRITE, va, sizeof(u32))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (__put_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGSEGV;
}
break;
}
@ -440,10 +478,17 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx)
contpc = (xcp->cp0_epc +
(MIPSInst_SIMM(ir) << 2));
if (get_user(ir,
if (!access_ok(VERIFY_READ, xcp->cp0_epc,
sizeof(mips_instruction))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
return SIGBUS;
}
if (__get_user(ir,
(mips_instruction __user *) xcp->cp0_epc)) {
MIPS_FPU_EMU_INC_STATS(errors);
return SIGBUS;
*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
return SIGSEGV;
}
switch (MIPSInst_OPCODE(ir)) {
@ -506,9 +551,8 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx)
#if __mips >= 4 && __mips != 32
case cop1x_op:{
int sig;
if ((sig = fpux_emu(xcp, ctx, ir)))
int sig = fpux_emu(xcp, ctx, ir, fault_addr);
if (sig)
return sig;
break;
}
@ -604,7 +648,7 @@ DEF3OP(nmadd, dp, ieee754dp_mul, ieee754dp_add, ieee754dp_neg);
DEF3OP(nmsub, dp, ieee754dp_mul, ieee754dp_sub, ieee754dp_neg);
static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
mips_instruction ir)
mips_instruction ir, void *__user *fault_addr)
{
unsigned rcsr = 0; /* resulting csr */
@ -624,10 +668,16 @@ static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
xcp->regs[MIPSInst_FT(ir)]);
MIPS_FPU_EMU_INC_STATS(loads);
if (get_user(val, va)) {
if (!access_ok(VERIFY_READ, va, sizeof(u32))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (__get_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGSEGV;
}
SITOREG(val, MIPSInst_FD(ir));
break;
@ -638,9 +688,15 @@ static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
MIPS_FPU_EMU_INC_STATS(stores);
SIFROMREG(val, MIPSInst_FS(ir));
if (!access_ok(VERIFY_WRITE, va, sizeof(u32))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (put_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
return SIGBUS;
*fault_addr = va;
return SIGSEGV;
}
break;
@ -701,10 +757,16 @@ static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
xcp->regs[MIPSInst_FT(ir)]);
MIPS_FPU_EMU_INC_STATS(loads);
if (get_user(val, va)) {
if (!access_ok(VERIFY_READ, va, sizeof(u64))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (__get_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGSEGV;
}
DITOREG(val, MIPSInst_FD(ir));
break;
@ -714,10 +776,16 @@ static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
MIPS_FPU_EMU_INC_STATS(stores);
DIFROMREG(val, MIPSInst_FS(ir));
if (put_user(val, va)) {
if (!access_ok(VERIFY_WRITE, va, sizeof(u64))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (__put_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGSEGV;
}
break;
case madd_d_op:
@ -1242,7 +1310,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
}
int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
int has_fpu)
int has_fpu, void *__user *fault_addr)
{
unsigned long oldepc, prevepc;
mips_instruction insn;
@ -1252,10 +1320,16 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
do {
prevepc = xcp->cp0_epc;
if (get_user(insn, (mips_instruction __user *) xcp->cp0_epc)) {
if (!access_ok(VERIFY_READ, xcp->cp0_epc, sizeof(mips_instruction))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
return SIGBUS;
}
if (__get_user(insn, (mips_instruction __user *) xcp->cp0_epc)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
return SIGSEGV;
}
if (insn == 0)
xcp->cp0_epc += 4; /* skip nops */
else {
@ -1267,7 +1341,7 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
*/
/* convert to ieee library modes */
ieee754_csr.rm = ieee_rm[ieee754_csr.rm];
sig = cop1Emulate(xcp, ctx);
sig = cop1Emulate(xcp, ctx, fault_addr);
/* revert to mips rounding mode */
ieee754_csr.rm = mips_rm[ieee754_csr.rm];
}

View File

@ -288,7 +288,7 @@ int mips_dma_supported(struct device *dev, u64 mask)
return plat_dma_supported(dev, mask);
}
void mips_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
@ -298,6 +298,8 @@ void mips_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
__dma_sync((unsigned long)vaddr, size, direction);
}
EXPORT_SYMBOL(dma_cache_sync);
static struct dma_map_ops mips_default_dma_map_ops = {
.alloc_coherent = mips_dma_alloc_coherent,
.free_coherent = mips_dma_free_coherent,

View File

@ -68,6 +68,9 @@ static struct bcache_ops mips_sc_ops = {
*/
static inline int mips_sc_is_activated(struct cpuinfo_mips *c)
{
unsigned int config2 = read_c0_config2();
unsigned int tmp;
/* Check the bypass bit (L2B) */
switch (c->cputype) {
case CPU_34K:
@ -83,6 +86,7 @@ static inline int mips_sc_is_activated(struct cpuinfo_mips *c)
c->scache.linesz = 2 << tmp;
else
return 0;
return 1;
}
static inline int __init mips_sc_probe(void)

View File

@ -65,11 +65,15 @@ static unsigned char readb_outer_space(unsigned long long phys)
__asm__ __volatile__ (
" .set mips3 \n"
" .set push \n"
" .set noreorder \n"
" .set nomacro \n"
" ld %0, %1 \n"
" .set pop \n"
" lbu %0, (%0) \n"
" .set mips0 \n"
: "=r" (res)
: "m" (vaddr));
: "R" (vaddr));
write_c0_status(sr);
ssnop_4();
@ -89,11 +93,15 @@ static void writeb_outer_space(unsigned long long phys, unsigned char c)
__asm__ __volatile__ (
" .set mips3 \n"
" .set push \n"
" .set noreorder \n"
" .set nomacro \n"
" ld %0, %1 \n"
" .set pop \n"
" sb %2, (%0) \n"
" .set mips0 \n"
: "=&r" (tmp)
: "m" (vaddr), "r" (c));
: "R" (vaddr), "r" (c));
write_c0_status(sr);
ssnop_4();

View File

@ -82,7 +82,7 @@ int swarm_be_handler(struct pt_regs *regs, int is_fixup)
enum swarm_rtc_type {
RTC_NONE,
RTC_XICOR,
RTC_M4LT81
RTC_M41T81,
};
enum swarm_rtc_type swarm_rtc_type;
@ -96,7 +96,7 @@ void read_persistent_clock(struct timespec *ts)
sec = xicor_get_time();
break;
case RTC_M4LT81:
case RTC_M41T81:
sec = m41t81_get_time();
break;
@ -115,7 +115,7 @@ int rtc_mips_set_time(unsigned long sec)
case RTC_XICOR:
return xicor_set_time(sec);
case RTC_M4LT81:
case RTC_M41T81:
return m41t81_set_time(sec);
case RTC_NONE:
@ -141,7 +141,7 @@ void __init plat_mem_setup(void)
if (xicor_probe())
swarm_rtc_type = RTC_XICOR;
if (m41t81_probe())
swarm_rtc_type = RTC_M4LT81;
swarm_rtc_type = RTC_M41T81;
#ifdef CONFIG_VT
screen_info = (struct screen_info) {

View File

@ -459,7 +459,7 @@ void migrate_irqs(void)
tmp = CROSS_GxICR(irq, new);
x &= GxICR_LEVEL | GxICR_ENABLE;
if (GxICR(irq) & GxICR_REQUEST) {
if (GxICR(irq) & GxICR_REQUEST)
x |= GxICR_REQUEST | GxICR_DETECT;
CROSS_GxICR(irq, new) = x;
tmp = CROSS_GxICR(irq, new);

View File

@ -40,21 +40,17 @@ unsigned long long sched_clock(void)
unsigned long long ll;
unsigned l[2];
} tsc64, result;
unsigned long tsc, tmp;
unsigned long tmp;
unsigned product[3]; /* 96-bit intermediate value */
/* cnt32_to_63() is not safe with preemption */
preempt_disable();
/* read the TSC value
*/
tsc = get_cycles();
/* expand to 64-bits.
/* expand the tsc to 64-bits.
* - sched_clock() must be called once a minute or better or the
* following will go horribly wrong - see cnt32_to_63()
*/
tsc64.ll = cnt32_to_63(tsc) & 0x7fffffffffffffffULL;
tsc64.ll = cnt32_to_63(get_cycles()) & 0x7fffffffffffffffULL;
preempt_enable();

View File

@ -63,6 +63,7 @@
#include <linux/of_gpio.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/watchdog.h>
#include <linux/miscdevice.h>
#include <linux/uaccess.h>

View File

@ -140,7 +140,7 @@ void __init init_se7206_IRQ(void)
make_se7206_irq(IRQ1_IRQ); /* ATA */
make_se7206_irq(IRQ3_IRQ); /* SLOT / PCM */
__raw_writew(__raw_readw(INTC_ICR1) | 0x000b, INTC_ICR); /* ICR1 */
__raw_writew(__raw_readw(INTC_ICR1) | 0x000b, INTC_ICR1); /* ICR1 */
/* FPGA System register setup*/
__raw_writew(0x0000,INTSTS0); /* Clear INTSTS0 */

View File

@ -34,7 +34,7 @@ static const int pfc_divisors[]={1,2,3,4,6,8,12};
static void master_clk_init(struct clk *clk)
{
return 10000000 * PLL2 * pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007];
clk->rate = 10000000 * PLL2 * pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007];
}
static struct clk_ops sh7201_master_clk_ops = {

View File

@ -81,8 +81,7 @@ static void shoc_clk_init(struct clk *clk)
for (i = 0; i < ARRAY_SIZE(frqcr3_divisors); i++) {
int divisor = frqcr3_divisors[i];
if (clk->ops->set_rate(clk, clk->parent->rate /
divisor, 0) == 0)
if (clk->ops->set_rate(clk, clk->parent->rate / divisor) == 0)
break;
}

View File

@ -25,7 +25,7 @@
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
struct pt_regs;
int restore_sigcontext(struct pt_regs *, struct sigcontext __user *, long *);
int restore_sigcontext(struct pt_regs *, struct sigcontext __user *);
int setup_sigcontext(struct sigcontext __user *, struct pt_regs *);
void do_signal(struct pt_regs *regs);
#endif

View File

@ -290,12 +290,12 @@ long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr,
return ret;
}
/* The assembly shim for this function arranges to ignore the return value. */
long compat_sys_rt_sigreturn(struct pt_regs *regs)
{
struct compat_rt_sigframe __user *frame =
(struct compat_rt_sigframe __user *) compat_ptr(regs->sp);
sigset_t set;
long r0;
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
@ -308,13 +308,13 @@ long compat_sys_rt_sigreturn(struct pt_regs *regs)
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
goto badframe;
if (compat_sys_sigaltstack(&frame->uc.uc_stack, NULL, regs) != 0)
goto badframe;
return r0;
return 0;
badframe:
force_sig(SIGSEGV, current);

View File

@ -1342,8 +1342,8 @@ handle_syscall:
lw r20, r20
/* Jump to syscall handler. */
jalr r20; .Lhandle_syscall_link:
FEEDBACK_REENTER(handle_syscall)
jalr r20
.Lhandle_syscall_link: /* value of "lr" after "jalr r20" above */
/*
* Write our r0 onto the stack so it gets restored instead
@ -1352,6 +1352,9 @@ handle_syscall:
PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
sw r29, r0
.Lsyscall_sigreturn_skip:
FEEDBACK_REENTER(handle_syscall)
/* Do syscall trace again, if requested. */
lw r30, r31
andi r30, r30, _TIF_SYSCALL_TRACE
@ -1536,9 +1539,24 @@ STD_ENTRY_LOCAL(bad_intr)
}; \
STD_ENDPROC(_##x)
/*
* Special-case sigreturn to not write r0 to the stack on return.
* This is technically more efficient, but it also avoids difficulties
* in the 64-bit OS when handling 32-bit compat code, since we must not
* sign-extend r0 for the sigreturn return-value case.
*/
#define PTREGS_SYSCALL_SIGRETURN(x, reg) \
STD_ENTRY(_##x); \
addli lr, lr, .Lsyscall_sigreturn_skip - .Lhandle_syscall_link; \
{ \
PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \
j x \
}; \
STD_ENDPROC(_##x)
PTREGS_SYSCALL(sys_execve, r3)
PTREGS_SYSCALL(sys_sigaltstack, r2)
PTREGS_SYSCALL(sys_rt_sigreturn, r0)
PTREGS_SYSCALL_SIGRETURN(sys_rt_sigreturn, r0)
PTREGS_SYSCALL(sys_cmpxchg_badaddr, r1)
/* Save additional callee-saves to pt_regs, put address in r4 and jump. */

View File

@ -211,6 +211,13 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
childregs->regs[0] = 0; /* return value is zero */
childregs->sp = sp; /* override with new user stack pointer */
/*
* If CLONE_SETTLS is set, set "tp" in the new task to "r4",
* which is passed in as arg #5 to sys_clone().
*/
if (clone_flags & CLONE_SETTLS)
childregs->tp = regs->regs[4];
/*
* Copy the callee-saved registers from the passed pt_regs struct
* into the context-switch callee-saved registers area.
@ -539,6 +546,7 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
return __switch_to(prev, next, next_current_ksp0(next));
}
/* Note there is an implicit fifth argument if (clone_flags & CLONE_SETTLS). */
SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
void __user *, parent_tidptr, void __user *, child_tidptr,
struct pt_regs *, regs)

View File

@ -52,7 +52,7 @@ SYSCALL_DEFINE3(sigaltstack, const stack_t __user *, uss,
*/
int restore_sigcontext(struct pt_regs *regs,
struct sigcontext __user *sc, long *pr0)
struct sigcontext __user *sc)
{
int err = 0;
int i;
@ -75,17 +75,15 @@ int restore_sigcontext(struct pt_regs *regs,
regs->faultnum = INT_SWINT_1_SIGRETURN;
err |= __get_user(*pr0, &sc->gregs[0]);
return err;
}
/* sigreturn() returns long since it restores r0 in the interrupted code. */
/* The assembly shim for this function arranges to ignore the return value. */
SYSCALL_DEFINE1(rt_sigreturn, struct pt_regs *, regs)
{
struct rt_sigframe __user *frame =
(struct rt_sigframe __user *)(regs->sp);
sigset_t set;
long r0;
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
@ -98,13 +96,13 @@ SYSCALL_DEFINE1(rt_sigreturn, struct pt_regs *, regs)
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
goto badframe;
if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT)
goto badframe;
return r0;
return 0;
badframe:
force_sig(SIGSEGV, current);

View File

@ -355,7 +355,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
if (heap > 0x3fffffffffffUL)
error("Destination address too large");
#else
if (heap > ((-__PAGE_OFFSET-(512<<20)-1) & 0x7fffffff))
if (heap > ((-__PAGE_OFFSET-(128<<20)-1) & 0x7fffffff))
error("Destination address too large");
#endif
#ifndef CONFIG_RELOCATABLE

View File

@ -72,6 +72,9 @@ struct e820map {
#define BIOS_BEGIN 0x000a0000
#define BIOS_END 0x00100000
#define BIOS_ROM_BASE 0xffe00000
#define BIOS_ROM_END 0xffffffff
#ifdef __KERNEL__
/* see comment in arch/x86/kernel/e820.c */
extern struct e820map e820;

View File

@ -79,7 +79,7 @@
#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
#define KVM_MIN_FREE_MMU_PAGES 5
#define KVM_REFILL_PAGES 25
#define KVM_MAX_CPUID_ENTRIES 40
#define KVM_MAX_CPUID_ENTRIES 80
#define KVM_NR_FIXED_MTRR_REGION 88
#define KVM_NR_VAR_MTRR 8

View File

@ -45,6 +45,7 @@ obj-y += pci-dma.o quirks.o i8237.o topology.o kdebugfs.o
obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o
obj-y += tsc.o io_delay.o rtc.o
obj-y += pci-iommu_table.o
obj-y += resource.o
obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
obj-y += process.o

View File

@ -1389,6 +1389,14 @@ void __cpuinit end_local_APIC_setup(void)
setup_apic_nmi_watchdog(NULL);
apic_pm_activate();
/*
* Now that local APIC setup is completed for BP, configure the fault
* handling for interrupt remapping.
*/
if (!smp_processor_id() && intr_remapping_enabled)
enable_drhd_fault_handling();
}
#ifdef CONFIG_X86_X2APIC

View File

@ -2430,13 +2430,12 @@ static void ack_apic_level(struct irq_data *data)
{
struct irq_cfg *cfg = data->chip_data;
int i, do_unmask_irq = 0, irq = data->irq;
struct irq_desc *desc = irq_to_desc(irq);
unsigned long v;
irq_complete_move(cfg);
#ifdef CONFIG_GENERIC_PENDING_IRQ
/* If we are moving the irq we need to mask it */
if (unlikely(desc->status & IRQ_MOVE_PENDING)) {
if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) {
do_unmask_irq = 1;
mask_ioapic(cfg);
}
@ -3413,6 +3412,7 @@ dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
msg.data |= MSI_DATA_VECTOR(cfg->vector);
msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest);
dmar_msi_write(irq, &msg);

View File

@ -79,13 +79,6 @@ void __init default_setup_apic_routing(void)
/* need to update phys_pkg_id */
apic->phys_pkg_id = apicid_phys_pkg_id;
}
/*
* Now that apic routing model is selected, configure the
* fault handling for intr remapping.
*/
if (intr_remapping_enabled)
enable_drhd_fault_handling();
}
/* Same for both flat and physical. */

View File

@ -60,16 +60,18 @@
#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
#endif
/* Number of possible pages in the lowmem region */
LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
/* Enough space to fit pagetables for the low memory linear map */
MAPPING_BEYOND_END = \
PAGE_TABLE_SIZE(((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT) << PAGE_SHIFT
MAPPING_BEYOND_END = PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT
/*
* Worst-case size of the kernel mapping we need to make:
* the worst-case size of the kernel itself, plus the extra we need
* to map for the linear map.
* a relocatable kernel can live anywhere in lowmem, so we need to be able
* to map all of lowmem.
*/
KERNEL_PAGES = (KERNEL_IMAGE_SIZE + MAPPING_BEYOND_END)>>PAGE_SHIFT
KERNEL_PAGES = LOWMEM_PAGES
INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm
RESERVE_BRK(pagetables, INIT_MAP_SIZE)
@ -620,13 +622,13 @@ ENTRY(initial_code)
__PAGE_ALIGNED_BSS
.align PAGE_SIZE_asm
#ifdef CONFIG_X86_PAE
initial_pg_pmd:
ENTRY(initial_pg_pmd)
.fill 1024*KPMDS,4,0
#else
ENTRY(initial_page_table)
.fill 1024,4,0
#endif
initial_pg_fixmap:
ENTRY(initial_pg_fixmap)
.fill 1024,4,0
ENTRY(empty_zero_page)
.fill 4096,1,0

View File

@ -27,6 +27,9 @@
#define HPET_DEV_FSB_CAP 0x1000
#define HPET_DEV_PERI_CAP 0x2000
#define HPET_MIN_CYCLES 128
#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1))
#define EVT_TO_HPET_DEV(evt) container_of(evt, struct hpet_dev, evt)
/*
@ -299,8 +302,9 @@ static void hpet_legacy_clockevent_register(void)
/* Calculate the min / max delta */
hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF,
&hpet_clockevent);
/* 5 usec minimum reprogramming delta. */
hpet_clockevent.min_delta_ns = 5000;
/* Setup minimum reprogramming delta. */
hpet_clockevent.min_delta_ns = clockevent_delta2ns(HPET_MIN_PROG_DELTA,
&hpet_clockevent);
/*
* Start hpet with the boot cpu mask and make it
@ -393,22 +397,24 @@ static int hpet_next_event(unsigned long delta,
* the wraparound into account) nor a simple count down event
* mode. Further the write to the comparator register is
* delayed internally up to two HPET clock cycles in certain
* chipsets (ATI, ICH9,10). We worked around that by reading
* back the compare register, but that required another
* workaround for ICH9,10 chips where the first readout after
* write can return the old stale value. We already have a
* minimum delta of 5us enforced, but a NMI or SMI hitting
* chipsets (ATI, ICH9,10). Some newer AMD chipsets have even
* longer delays. We worked around that by reading back the
* compare register, but that required another workaround for
* ICH9,10 chips where the first readout after write can
* return the old stale value. We already had a minimum
* programming delta of 5us enforced, but a NMI or SMI hitting
* between the counter readout and the comparator write can
* move us behind that point easily. Now instead of reading
* the compare register back several times, we make the ETIME
* decision based on the following: Return ETIME if the
* counter value after the write is less than 8 HPET cycles
* counter value after the write is less than HPET_MIN_CYCLES
* away from the event or if the counter is already ahead of
* the event.
* the event. The minimum programming delta for the generic
* clockevents code is set to 1.5 * HPET_MIN_CYCLES.
*/
res = (s32)(cnt - hpet_readl(HPET_COUNTER));
return res < 8 ? -ETIME : 0;
return res < HPET_MIN_CYCLES ? -ETIME : 0;
}
static void hpet_legacy_set_mode(enum clock_event_mode mode,

View File

@ -364,8 +364,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
/* For performance reasons, reuse mc area when possible */
if (!mc || mc_size > curr_mc_size) {
if (mc)
vfree(mc);
vfree(mc);
mc = vmalloc(mc_size);
if (!mc)
break;
@ -374,13 +373,11 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
if (get_ucode_data(mc, ucode_ptr, mc_size) ||
microcode_sanity_check(mc) < 0) {
vfree(mc);
break;
}
if (get_matching_microcode(&uci->cpu_sig, mc, new_rev)) {
if (new_mc)
vfree(new_mc);
vfree(new_mc);
new_rev = mc_header.rev;
new_mc = mc;
mc = NULL; /* trigger new vmalloc */
@ -390,12 +387,10 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
leftover -= mc_size;
}
if (mc)
vfree(mc);
vfree(mc);
if (leftover) {
if (new_mc)
vfree(new_mc);
vfree(new_mc);
state = UCODE_ERROR;
goto out;
}
@ -405,8 +400,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
goto out;
}
if (uci->mc)
vfree(uci->mc);
vfree(uci->mc);
uci->mc = (struct microcode_intel *)new_mc;
pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",

View File

@ -0,0 +1,48 @@
#include <linux/ioport.h>
#include <asm/e820.h>
static void resource_clip(struct resource *res, resource_size_t start,
resource_size_t end)
{
resource_size_t low = 0, high = 0;
if (res->end < start || res->start > end)
return; /* no conflict */
if (res->start < start)
low = start - res->start;
if (res->end > end)
high = res->end - end;
/* Keep the area above or below the conflict, whichever is larger */
if (low > high)
res->end = start - 1;
else
res->start = end + 1;
}
static void remove_e820_regions(struct resource *avail)
{
int i;
struct e820entry *entry;
for (i = 0; i < e820.nr_map; i++) {
entry = &e820.map[i];
resource_clip(avail, entry->addr,
entry->addr + entry->size - 1);
}
}
void arch_remove_reservations(struct resource *avail)
{
/* Trim out BIOS areas (low 1MB and high 2MB) and E820 regions */
if (avail->flags & IORESOURCE_MEM) {
if (avail->start < BIOS_END)
avail->start = BIOS_END;
resource_clip(avail, BIOS_ROM_BASE, BIOS_ROM_END);
remove_e820_regions(avail);
}
}

View File

@ -501,7 +501,18 @@ static inline unsigned long long get_total_mem(void)
return total << PAGE_SHIFT;
}
#define DEFAULT_BZIMAGE_ADDR_MAX 0x37FFFFFF
/*
* Keep the crash kernel below this limit. On 32 bits earlier kernels
* would limit the kernel to the low 512 MiB due to mapping restrictions.
* On 64 bits, kexec-tools currently limits us to 896 MiB; increase this
* limit once kexec-tools are fixed.
*/
#ifdef CONFIG_X86_32
# define CRASH_KERNEL_ADDR_MAX (512 << 20)
#else
# define CRASH_KERNEL_ADDR_MAX (896 << 20)
#endif
static void __init reserve_crashkernel(void)
{
unsigned long long total_mem;
@ -520,10 +531,10 @@ static void __init reserve_crashkernel(void)
const unsigned long long alignment = 16<<20; /* 16M */
/*
* kexec want bzImage is below DEFAULT_BZIMAGE_ADDR_MAX
* kexec want bzImage is below CRASH_KERNEL_ADDR_MAX
*/
crash_base = memblock_find_in_range(alignment,
DEFAULT_BZIMAGE_ADDR_MAX, crash_size, alignment);
CRASH_KERNEL_ADDR_MAX, crash_size, alignment);
if (crash_base == MEMBLOCK_ERROR) {
pr_info("crashkernel reservation failed - No suitable area found.\n");
@ -769,7 +780,6 @@ void __init setup_arch(char **cmdline_p)
x86_init.oem.arch_setup();
resource_alloc_from_bottom = 0;
iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
setup_memory_map();
parse_setup_data();

View File

@ -394,7 +394,8 @@ static void __init setup_xstate_init(void)
* Setup init_xstate_buf to represent the init state of
* all the features managed by the xsave
*/
init_xstate_buf = alloc_bootmem(xstate_size);
init_xstate_buf = alloc_bootmem_align(xstate_size,
__alignof__(struct xsave_struct));
init_xstate_buf->i387.mxcsr = MXCSR_DEFAULT;
clts();

View File

@ -575,6 +575,8 @@ struct kvm_pic *kvm_create_pic(struct kvm *kvm)
s->pics[1].elcr_mask = 0xde;
s->pics[0].pics_state = s;
s->pics[1].pics_state = s;
s->pics[0].isr_ack = 0xff;
s->pics[1].isr_ack = 0xff;
/*
* Initialize PIO device

View File

@ -2394,7 +2394,8 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
ASSERT(!VALID_PAGE(root));
spin_lock(&vcpu->kvm->mmu_lock);
kvm_mmu_free_some_pages(vcpu);
sp = kvm_mmu_get_page(vcpu, i << 30, i << 30,
sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
i << 30,
PT32_ROOT_LEVEL, 1, ACC_ALL,
NULL);
root = __pa(sp->spt);

View File

@ -3494,6 +3494,10 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
{
switch (func) {
case 0x00000001:
/* Mask out xsave bit as long as it is not supported by SVM */
entry->ecx &= ~(bit(X86_FEATURE_XSAVE));
break;
case 0x80000001:
if (nested)
entry->ecx |= (1 << 2); /* Set SVM bit */

View File

@ -4227,11 +4227,6 @@ static int vmx_get_lpage_level(void)
return PT_PDPE_LEVEL;
}
static inline u32 bit(int bitno)
{
return 1 << (bitno & 31);
}
static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;

View File

@ -155,11 +155,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
u64 __read_mostly host_xcr0;
static inline u32 bit(int bitno)
{
return 1 << (bitno & 31);
}
static void kvm_on_user_return(struct user_return_notifier *urn)
{
unsigned slot;
@ -4569,9 +4564,11 @@ static void kvm_timer_init(void)
#ifdef CONFIG_CPU_FREQ
struct cpufreq_policy policy;
memset(&policy, 0, sizeof(policy));
cpufreq_get_policy(&policy, get_cpu());
cpu = get_cpu();
cpufreq_get_policy(&policy, cpu);
if (policy.cpuinfo.max_freq)
max_tsc_khz = policy.cpuinfo.max_freq;
put_cpu();
#endif
cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
@ -5522,6 +5519,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
if (sregs->cr4 & X86_CR4_OSXSAVE)
update_cpuid(vcpu);
if (!is_long_mode(vcpu) && is_pae(vcpu)) {
load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3);
mmu_reset_needed = 1;

View File

@ -70,6 +70,11 @@ static inline int is_paging(struct kvm_vcpu *vcpu)
return kvm_read_cr0_bits(vcpu, X86_CR0_PG);
}
static inline u32 bit(int bitno)
{
return 1 << (bitno & 31);
}
void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq);

View File

@ -531,7 +531,10 @@ static void lguest_write_cr3(unsigned long cr3)
{
lguest_data.pgdir = cr3;
lazy_hcall1(LHCALL_NEW_PGTABLE, cr3);
cr3_changed = true;
/* These two page tables are simple, linear, and used during boot */
if (cr3 != __pa(swapper_pg_dir) && cr3 != __pa(initial_page_table))
cr3_changed = true;
}
static unsigned long lguest_read_cr3(void)
@ -703,9 +706,9 @@ static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
* to forget all of them. Fortunately, this is very rare.
*
* ... except in early boot when the kernel sets up the initial pagetables,
* which makes booting astonishingly slow: 1.83 seconds! So we don't even tell
* the Host anything changed until we've done the first page table switch,
* which brings boot back to 0.25 seconds.
* which makes booting astonishingly slow: 48 seconds! So we don't even tell
* the Host anything changed until we've done the first real page table switch,
* which brings boot back to 4.3 seconds.
*/
static void lguest_set_pte(pte_t *ptep, pte_t pteval)
{
@ -1002,7 +1005,7 @@ static void lguest_time_init(void)
clockevents_register_device(&lguest_clockevent);
/* Finally, we unblock the timer interrupt. */
enable_lguest_irq(0);
clear_bit(0, lguest_data.blocked_interrupts);
}
/*
@ -1349,9 +1352,6 @@ __init void lguest_init(void)
*/
switch_to_new_gdt(0);
/* We actually boot with all memory mapped, but let's say 128MB. */
max_pfn_mapped = (128*1024*1024) >> PAGE_SHIFT;
/*
* The Host<->Guest Switcher lives at the top of our address space, and
* the Host told us how big it is when we made LGUEST_INIT hypercall:

View File

@ -4,6 +4,7 @@
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/processor-flags.h>
#include <asm/pgtable.h>
/*G:020
* Our story starts with the kernel booting into startup_32 in
@ -37,9 +38,113 @@ ENTRY(lguest_entry)
/* Set up the initial stack so we can run C code. */
movl $(init_thread_union+THREAD_SIZE),%esp
call init_pagetables
/* Jumps are relative: we're running __PAGE_OFFSET too low. */
jmp lguest_init+__PAGE_OFFSET
/*
* Initialize page tables. This creates a PDE and a set of page
* tables, which are located immediately beyond __brk_base. The variable
* _brk_end is set up to point to the first "safe" location.
* Mappings are created both at virtual address 0 (identity mapping)
* and PAGE_OFFSET for up to _end.
*
* FIXME: This code is taken verbatim from arch/x86/kernel/head_32.S: they
* don't have a stack at this point, so we can't just use call and ret.
*/
init_pagetables:
#if PTRS_PER_PMD > 1
#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
#else
#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
#endif
#define pa(X) ((X) - __PAGE_OFFSET)
/* Enough space to fit pagetables for the low memory linear map */
MAPPING_BEYOND_END = \
PAGE_TABLE_SIZE(((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT) << PAGE_SHIFT
#ifdef CONFIG_X86_PAE
/*
* In PAE mode initial_page_table is statically defined to contain
* enough entries to cover the VMSPLIT option (that is the top 1, 2 or 3
* entries). The identity mapping is handled by pointing two PGD entries
* to the first kernel PMD.
*
* Note the upper half of each PMD or PTE are always zero at this stage.
*/
#define KPMDS (((-__PAGE_OFFSET) >> 30) & 3) /* Number of kernel PMDs */
xorl %ebx,%ebx /* %ebx is kept at zero */
movl $pa(__brk_base), %edi
movl $pa(initial_pg_pmd), %edx
movl $PTE_IDENT_ATTR, %eax
10:
leal PDE_IDENT_ATTR(%edi),%ecx /* Create PMD entry */
movl %ecx,(%edx) /* Store PMD entry */
/* Upper half already zero */
addl $8,%edx
movl $512,%ecx
11:
stosl
xchgl %eax,%ebx
stosl
xchgl %eax,%ebx
addl $0x1000,%eax
loop 11b
/*
* End condition: we must map up to the end + MAPPING_BEYOND_END.
*/
movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp
cmpl %ebp,%eax
jb 10b
1:
addl $__PAGE_OFFSET, %edi
movl %edi, pa(_brk_end)
shrl $12, %eax
movl %eax, pa(max_pfn_mapped)
/* Do early initialization of the fixmap area */
movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
#else /* Not PAE */
page_pde_offset = (__PAGE_OFFSET >> 20);
movl $pa(__brk_base), %edi
movl $pa(initial_page_table), %edx
movl $PTE_IDENT_ATTR, %eax
10:
leal PDE_IDENT_ATTR(%edi),%ecx /* Create PDE entry */
movl %ecx,(%edx) /* Store identity PDE entry */
movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */
addl $4,%edx
movl $1024, %ecx
11:
stosl
addl $0x1000,%eax
loop 11b
/*
* End condition: we must map up to the end + MAPPING_BEYOND_END.
*/
movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp
cmpl %ebp,%eax
jb 10b
addl $__PAGE_OFFSET, %edi
movl %edi, pa(_brk_end)
shrl $12, %eax
movl %eax, pa(max_pfn_mapped)
/* Do early initialization of the fixmap area */
movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
movl %eax,pa(initial_page_table+0xffc)
#endif
ret
/*G:055
* We create a macro which puts the assembler code between lgstart_ and lgend_
* markers. These templates are put in the .text section: they can't be

View File

@ -630,21 +630,29 @@ static int __init_ibs_nmi(void)
return 0;
}
/* initialize the APIC for the IBS interrupts if available */
/*
* check and reserve APIC extended interrupt LVT offset for IBS if
* available
*
* init_ibs() preforms implicitly cpu-local operations, so pin this
* thread to its current CPU
*/
static void init_ibs(void)
{
preempt_disable();
ibs_caps = get_ibs_caps();
if (!ibs_caps)
return;
goto out;
if (__init_ibs_nmi()) {
if (__init_ibs_nmi() < 0)
ibs_caps = 0;
return;
}
else
printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps);
printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n",
(unsigned)ibs_caps);
out:
preempt_enable();
}
static int (*create_arch_files)(struct super_block *sb, struct dentry *root);

View File

@ -65,21 +65,13 @@ pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
{
struct pci_dev *dev = data;
resource_size_t start = round_down(res->end - size + 1, align);
resource_size_t start = res->start;
if (res->flags & IORESOURCE_IO) {
/*
* If we're avoiding ISA aliases, the largest contiguous I/O
* port space is 256 bytes. Clearing bits 9 and 10 preserves
* all 256-byte and smaller alignments, so the result will
* still be correctly aligned.
*/
if (!skip_isa_ioresource_align(dev))
start &= ~0x300;
} else if (res->flags & IORESOURCE_MEM) {
if (start < BIOS_END)
start = res->end; /* fail; no space */
if (skip_isa_ioresource_align(dev))
return start;
if (start & 0x300)
start = (start + 0x3ff) & ~0x3ff;
}
return start;
}

Some files were not shown because too many files have changed in this diff Show More