Merge branch 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus

* 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus:
  [MIPS] Kconfig: Move missplaced NR_CPUS default from SMTC to VSMP.
  [MIPS] Lockdep: Fix recursion bug.
  [MIPS] RTLX: Handle copy_*_user return values.
  [MIPS] RTLX: Protect rtlx_{read,write} with mutex.
  [MIPS] RTLX: Harden against compiler reordering and optimization.
  [MIPS] RTLX: Don't use volatile; it's fragile.
  [MIPS] Lasat: Downgrade 64-bit kernel from experimental to broken.
  [MIPS] Compat: Fix build if CONFIG_SYSVIPC is disabled.
  [CHAR] lcd: Fix two warnings.
  [MIPS] FPU ownership management & preemption fixes
  [MIPS] Check FCSR for pending interrupts, alternative version
  [MIPS] IP27, IP35: Fix warnings.
This commit is contained in:
Linus Torvalds 2007-03-16 19:28:15 -07:00
commit 4745591167
24 changed files with 339 additions and 227 deletions

View File

@ -250,7 +250,7 @@ config LASAT
select R5000_CPU_SCACHE
select SYS_HAS_CPU_R5000
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_64BIT_KERNEL if EXPERIMENTAL
select SYS_SUPPORTS_64BIT_KERNEL if BROKEN
select SYS_SUPPORTS_LITTLE_ENDIAN
select GENERIC_HARDIRQS_NO__DO_IRQ
@ -1559,6 +1559,7 @@ config MIPS_MT_SMP
select CPU_MIPSR2_IRQ_VI
select CPU_MIPSR2_SRS
select MIPS_MT
select NR_CPUS_DEFAULT_2
select SMP
select SYS_SUPPORTS_SMP
help
@ -1573,7 +1574,6 @@ config MIPS_MT_SMTC
select CPU_MIPSR2_IRQ_VI
select CPU_MIPSR2_SRS
select MIPS_MT
select NR_CPUS_DEFAULT_2
select NR_CPUS_DEFAULT_8
select SMP
select SYS_SUPPORTS_SMP

View File

@ -191,6 +191,8 @@ void sp_work_handle_request(void)
struct mtsp_syscall_generic generic;
struct mtsp_syscall_ret ret;
struct kspd_notifications *n;
unsigned long written;
mm_segment_t old_fs;
struct timeval tv;
struct timezone tz;
int cmd;
@ -201,7 +203,11 @@ void sp_work_handle_request(void)
ret.retval = -1;
if (!rtlx_read(RTLX_CHANNEL_SYSIO, &sc, sizeof(struct mtsp_syscall), 0)) {
old_fs = get_fs();
set_fs(KERNEL_DS);
if (!rtlx_read(RTLX_CHANNEL_SYSIO, &sc, sizeof(struct mtsp_syscall))) {
set_fs(old_fs);
printk(KERN_ERR "Expected request but nothing to read\n");
return;
}
@ -209,7 +215,8 @@ void sp_work_handle_request(void)
size = sc.size;
if (size) {
if (!rtlx_read(RTLX_CHANNEL_SYSIO, &generic, size, 0)) {
if (!rtlx_read(RTLX_CHANNEL_SYSIO, &generic, size)) {
set_fs(old_fs);
printk(KERN_ERR "Expected request but nothing to read\n");
return;
}
@ -282,8 +289,11 @@ void sp_work_handle_request(void)
if (vpe_getuid(SP_VPE))
sp_setfsuidgid( 0, 0);
if ((rtlx_write(RTLX_CHANNEL_SYSIO, &ret, sizeof(struct mtsp_syscall_ret), 0))
< sizeof(struct mtsp_syscall_ret))
old_fs = get_fs();
set_fs(KERNEL_DS);
written = rtlx_write(RTLX_CHANNEL_SYSIO, &ret, sizeof(ret));
set_fs(old_fs);
if (written < sizeof(ret))
printk("KSPD: sp_work_handle_request failed to send to SP\n");
}

View File

@ -311,6 +311,8 @@ asmlinkage int sys32_sched_rr_get_interval(compat_pid_t pid,
return ret;
}
#ifdef CONFIG_SYSVIPC
asmlinkage long
sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
{
@ -368,6 +370,16 @@ sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
return err;
}
#else
asmlinkage long
sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
{
return -ENOSYS;
}
#endif /* CONFIG_SYSVIPC */
#ifdef CONFIG_MIPS32_N32
asmlinkage long sysn32_semctl(int semid, int semnum, int cmd, u32 arg)
{

View File

@ -49,8 +49,7 @@ LEAF(resume)
#ifndef CONFIG_CPU_HAS_LLSC
sw zero, ll_bit
#endif
mfc0 t1, CP0_STATUS
sw t1, THREAD_STATUS(a0)
mfc0 t2, CP0_STATUS
cpu_save_nonscratch a0
sw ra, THREAD_REG31(a0)
@ -60,8 +59,8 @@ LEAF(resume)
lw t3, TASK_THREAD_INFO(a0)
lw t0, TI_FLAGS(t3)
li t1, _TIF_USEDFPU
and t2, t0, t1
beqz t2, 1f
and t1, t0
beqz t1, 1f
nor t1, zero, t1
and t0, t0, t1
@ -74,10 +73,13 @@ LEAF(resume)
li t1, ~ST0_CU1
and t0, t0, t1
sw t0, ST_OFF(t3)
/* clear thread_struct CU1 bit */
and t2, t1
fpu_save_single a0, t0 # clobbers t0
1:
sw t2, THREAD_STATUS(a0)
/*
* The order of restoring the registers takes care of the race
* updating $28, $29 and kernelsp without disabling ints.

View File

@ -114,14 +114,6 @@ LEAF(_save_fp_context32)
*/
LEAF(_restore_fp_context)
EX lw t0, SC_FPC_CSR(a0)
/* Fail if the CSR has exceptions pending */
srl t1, t0, 5
and t1, t0
andi t1, 0x1f << 7
bnez t1, fault
nop
#ifdef CONFIG_64BIT
EX ldc1 $f1, SC_FPREGS+8(a0)
EX ldc1 $f3, SC_FPREGS+24(a0)
@ -165,14 +157,6 @@ LEAF(_restore_fp_context)
LEAF(_restore_fp_context32)
/* Restore an o32 sigcontext. */
EX lw t0, SC32_FPC_CSR(a0)
/* Fail if the CSR has exceptions pending */
srl t1, t0, 5
and t1, t0
andi t1, 0x1f << 7
bnez t1, fault
nop
EX ldc1 $f0, SC32_FPREGS+0(a0)
EX ldc1 $f2, SC32_FPREGS+16(a0)
EX ldc1 $f4, SC32_FPREGS+32(a0)

View File

@ -48,8 +48,7 @@
#ifndef CONFIG_CPU_HAS_LLSC
sw zero, ll_bit
#endif
mfc0 t1, CP0_STATUS
LONG_S t1, THREAD_STATUS(a0)
mfc0 t2, CP0_STATUS
cpu_save_nonscratch a0
LONG_S ra, THREAD_REG31(a0)
@ -59,8 +58,8 @@
PTR_L t3, TASK_THREAD_INFO(a0)
LONG_L t0, TI_FLAGS(t3)
li t1, _TIF_USEDFPU
and t2, t0, t1
beqz t2, 1f
and t1, t0
beqz t1, 1f
nor t1, zero, t1
and t0, t0, t1
@ -73,10 +72,13 @@
li t1, ~ST0_CU1
and t0, t0, t1
LONG_S t0, ST_OFF(t3)
/* clear thread_struct CU1 bit */
and t2, t1
fpu_save_double a0 t0 t1 # c0_status passed in t0
# clobbers t1
1:
LONG_S t2, THREAD_STATUS(a0)
/*
* The order of restoring the registers takes care of the race

View File

@ -54,6 +54,7 @@ static struct chan_waitqueues {
wait_queue_head_t rt_queue;
wait_queue_head_t lx_queue;
atomic_t in_open;
struct mutex mutex;
} channel_wqs[RTLX_CHANNELS];
static struct irqaction irq;
@ -146,7 +147,7 @@ static void stopping(int vpe)
int rtlx_open(int index, int can_sleep)
{
volatile struct rtlx_info **p;
struct rtlx_info **p;
struct rtlx_channel *chan;
enum rtlx_state state;
int ret = 0;
@ -179,13 +180,24 @@ int rtlx_open(int index, int can_sleep)
}
}
smp_rmb();
if (*p == NULL) {
if (can_sleep) {
__wait_event_interruptible(channel_wqs[index].lx_queue,
*p != NULL,
ret);
if (ret)
DEFINE_WAIT(wait);
for (;;) {
prepare_to_wait(&channel_wqs[index].lx_queue, &wait, TASK_INTERRUPTIBLE);
smp_rmb();
if (*p != NULL)
break;
if (!signal_pending(current)) {
schedule();
continue;
}
ret = -ERESTARTSYS;
goto out_fail;
}
finish_wait(&channel_wqs[index].lx_queue, &wait);
} else {
printk(" *vpe_get_shared is NULL. "
"Has an SP program been loaded?\n");
@ -277,56 +289,52 @@ unsigned int rtlx_write_poll(int index)
return write_spacefree(chan->rt_read, chan->rt_write, chan->buffer_size);
}
static inline void copy_to(void *dst, void *src, size_t count, int user)
ssize_t rtlx_read(int index, void __user *buff, size_t count, int user)
{
if (user)
copy_to_user(dst, src, count);
else
memcpy(dst, src, count);
}
static inline void copy_from(void *dst, void *src, size_t count, int user)
{
if (user)
copy_from_user(dst, src, count);
else
memcpy(dst, src, count);
}
ssize_t rtlx_read(int index, void *buff, size_t count, int user)
{
size_t fl = 0L;
size_t lx_write, fl = 0L;
struct rtlx_channel *lx;
unsigned long failed;
if (rtlx == NULL)
return -ENOSYS;
lx = &rtlx->channel[index];
mutex_lock(&channel_wqs[index].mutex);
smp_rmb();
lx_write = lx->lx_write;
/* find out how much in total */
count = min(count,
(size_t)(lx->lx_write + lx->buffer_size - lx->lx_read)
(size_t)(lx_write + lx->buffer_size - lx->lx_read)
% lx->buffer_size);
/* then how much from the read pointer onwards */
fl = min( count, (size_t)lx->buffer_size - lx->lx_read);
fl = min(count, (size_t)lx->buffer_size - lx->lx_read);
copy_to(buff, &lx->lx_buffer[lx->lx_read], fl, user);
failed = copy_to_user(buff, lx->lx_buffer + lx->lx_read, fl);
if (failed)
goto out;
/* and if there is anything left at the beginning of the buffer */
if ( count - fl )
copy_to (buff + fl, lx->lx_buffer, count - fl, user);
if (count - fl)
failed = copy_to_user(buff + fl, lx->lx_buffer, count - fl);
/* update the index */
lx->lx_read += count;
lx->lx_read %= lx->buffer_size;
out:
count -= failed;
smp_wmb();
lx->lx_read = (lx->lx_read + count) % lx->buffer_size;
smp_wmb();
mutex_unlock(&channel_wqs[index].mutex);
return count;
}
ssize_t rtlx_write(int index, void *buffer, size_t count, int user)
ssize_t rtlx_write(int index, const void __user *buffer, size_t count, int user)
{
struct rtlx_channel *rt;
size_t rt_read;
size_t fl;
if (rtlx == NULL)
@ -334,24 +342,35 @@ ssize_t rtlx_write(int index, void *buffer, size_t count, int user)
rt = &rtlx->channel[index];
mutex_lock(&channel_wqs[index].mutex);
smp_rmb();
rt_read = rt->rt_read;
/* total number of bytes to copy */
count = min(count,
(size_t)write_spacefree(rt->rt_read, rt->rt_write,
rt->buffer_size));
(size_t)write_spacefree(rt_read, rt->rt_write, rt->buffer_size));
/* first bit from write pointer to the end of the buffer, or count */
fl = min(count, (size_t) rt->buffer_size - rt->rt_write);
copy_from (&rt->rt_buffer[rt->rt_write], buffer, fl, user);
failed = copy_from_user(rt->rt_buffer + rt->rt_write, buffer, fl);
if (failed)
goto out;
/* if there's any left copy to the beginning of the buffer */
if( count - fl )
copy_from (rt->rt_buffer, buffer + fl, count - fl, user);
if (count - fl) {
failed = copy_from_user(rt->rt_buffer, buffer + fl, count - fl);
}
rt->rt_write += count;
rt->rt_write %= rt->buffer_size;
out:
count -= cailed;
return(count);
smp_wmb();
rt->rt_write = (rt->rt_write + count) % rt->buffer_size;
smp_wmb();
mutex_unlock(&channel_wqs[index].mutex);
return count;
}
@ -403,7 +422,7 @@ static ssize_t file_read(struct file *file, char __user * buffer, size_t count,
return 0; // -EAGAIN makes cat whinge
}
return rtlx_read(minor, buffer, count, 1);
return rtlx_read(minor, buffer, count);
}
static ssize_t file_write(struct file *file, const char __user * buffer,
@ -429,7 +448,7 @@ static ssize_t file_write(struct file *file, const char __user * buffer,
return ret;
}
return rtlx_write(minor, (void *)buffer, count, 1);
return rtlx_write(minor, buffer, count);
}
static const struct file_operations rtlx_fops = {
@ -468,6 +487,7 @@ static int rtlx_module_init(void)
init_waitqueue_head(&channel_wqs[i].rt_queue);
init_waitqueue_head(&channel_wqs[i].lx_queue);
atomic_set(&channel_wqs[i].in_open, 0);
mutex_init(&channel_wqs[i].mutex);
dev = device_create(mt_class, NULL, MKDEV(major, i),
"%s%d", module_name, i);

View File

@ -31,4 +31,7 @@ extern void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
*/
extern int install_sigtramp(unsigned int __user *tramp, unsigned int syscall);
/* Check and clear pending FPU exceptions in saved CSR */
extern int fpcsr_pending(unsigned int __user *fpcsr);
#endif /* __SIGNAL_COMMON_H */

View File

@ -82,6 +82,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
{
int err = 0;
int i;
unsigned int used_math;
err |= __put_user(regs->cp0_epc, &sc->sc_pc);
@ -104,26 +105,53 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
}
err |= __put_user(!!used_math(), &sc->sc_used_math);
used_math = !!used_math();
err |= __put_user(used_math, &sc->sc_used_math);
if (used_math()) {
if (used_math) {
/*
* Save FPU state to signal context. Signal handler
* will "inherit" current FPU state.
*/
preempt_disable();
if (!is_fpu_owner()) {
own_fpu();
restore_fp(current);
}
own_fpu(1);
enable_fp_in_kernel();
err |= save_fp_context(sc);
preempt_enable();
disable_fp_in_kernel();
}
return err;
}
int fpcsr_pending(unsigned int __user *fpcsr)
{
int err, sig = 0;
unsigned int csr, enabled;
err = __get_user(csr, fpcsr);
enabled = FPU_CSR_UNI_X | ((csr & FPU_CSR_ALL_E) << 5);
/*
* If the signal handler set some FPU exceptions, clear it and
* send SIGFPE.
*/
if (csr & enabled) {
csr &= ~enabled;
err |= __put_user(csr, fpcsr);
sig = SIGFPE;
}
return err ?: sig;
}
static int
check_and_restore_fp_context(struct sigcontext __user *sc)
{
int err, sig;
err = sig = fpcsr_pending(&sc->sc_fpc_csr);
if (err > 0)
err = 0;
err |= restore_fp_context(sc);
return err ?: sig;
}
int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
{
unsigned int used_math;
@ -157,19 +185,18 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
err |= __get_user(used_math, &sc->sc_used_math);
conditional_used_math(used_math);
preempt_disable();
if (used_math()) {
if (used_math) {
/* restore fpu context if we have used it before */
own_fpu();
err |= restore_fp_context(sc);
own_fpu(0);
enable_fp_in_kernel();
if (!err)
err = check_and_restore_fp_context(sc);
disable_fp_in_kernel();
} else {
/* signal handler may have used FPU. Give it up. */
lose_fpu();
lose_fpu(0);
}
preempt_enable();
return err;
}
@ -332,6 +359,7 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
{
struct sigframe __user *frame;
sigset_t blocked;
int sig;
frame = (struct sigframe __user *) regs.regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
@ -345,8 +373,11 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
if (restore_sigcontext(&regs, &frame->sf_sc))
sig = restore_sigcontext(&regs, &frame->sf_sc);
if (sig < 0)
goto badframe;
else if (sig)
force_sig(sig, current);
/*
* Don't let your children do this ...
@ -368,6 +399,7 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
struct rt_sigframe __user *frame;
sigset_t set;
stack_t st;
int sig;
frame = (struct rt_sigframe __user *) regs.regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
@ -381,8 +413,11 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
if (restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext))
sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
if (sig < 0)
goto badframe;
else if (sig)
force_sig(sig, current);
if (__copy_from_user(&st, &frame->rs_uc.uc_stack, sizeof(st)))
goto badframe;

View File

@ -181,6 +181,7 @@ static int setup_sigcontext32(struct pt_regs *regs,
{
int err = 0;
int i;
u32 used_math;
err |= __put_user(regs->cp0_epc, &sc->sc_pc);
@ -200,26 +201,34 @@ static int setup_sigcontext32(struct pt_regs *regs,
err |= __put_user(mflo3(), &sc->sc_lo3);
}
err |= __put_user(!!used_math(), &sc->sc_used_math);
used_math = !!used_math();
err |= __put_user(used_math, &sc->sc_used_math);
if (used_math()) {
if (used_math) {
/*
* Save FPU state to signal context. Signal handler
* will "inherit" current FPU state.
*/
preempt_disable();
if (!is_fpu_owner()) {
own_fpu();
restore_fp(current);
}
own_fpu(1);
enable_fp_in_kernel();
err |= save_fp_context32(sc);
preempt_enable();
disable_fp_in_kernel();
}
return err;
}
static int
check_and_restore_fp_context32(struct sigcontext32 __user *sc)
{
int err, sig;
err = sig = fpcsr_pending(&sc->sc_fpc_csr);
if (err > 0)
err = 0;
err |= restore_fp_context32(sc);
return err ?: sig;
}
static int restore_sigcontext32(struct pt_regs *regs,
struct sigcontext32 __user *sc)
{
@ -250,19 +259,18 @@ static int restore_sigcontext32(struct pt_regs *regs,
err |= __get_user(used_math, &sc->sc_used_math);
conditional_used_math(used_math);
preempt_disable();
if (used_math()) {
if (used_math) {
/* restore fpu context if we have used it before */
own_fpu();
err |= restore_fp_context32(sc);
own_fpu(0);
enable_fp_in_kernel();
if (!err)
err = check_and_restore_fp_context32(sc);
disable_fp_in_kernel();
} else {
/* signal handler may have used FPU. Give it up. */
lose_fpu();
lose_fpu(0);
}
preempt_enable();
return err;
}
@ -508,6 +516,7 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
{
struct sigframe32 __user *frame;
sigset_t blocked;
int sig;
frame = (struct sigframe32 __user *) regs.regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
@ -521,8 +530,11 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
if (restore_sigcontext32(&regs, &frame->sf_sc))
sig = restore_sigcontext32(&regs, &frame->sf_sc);
if (sig < 0)
goto badframe;
else if (sig)
force_sig(sig, current);
/*
* Don't let your children do this ...
@ -545,6 +557,7 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
sigset_t set;
stack_t st;
s32 sp;
int sig;
frame = (struct rt_sigframe32 __user *) regs.regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
@ -558,8 +571,11 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
if (restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext))
sig = restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext);
if (sig < 0)
goto badframe;
else if (sig)
force_sig(sig, current);
/* The ucontext contains a stack32_t, so we must convert! */
if (__get_user(sp, &frame->rs_uc.uc_stack.ss_sp))

View File

@ -127,6 +127,7 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
sigset_t set;
stack_t st;
s32 sp;
int sig;
frame = (struct rt_sigframe_n32 __user *) regs.regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
@ -140,8 +141,11 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
if (restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext))
sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
if (sig < 0)
goto badframe;
else if (sig)
force_sig(sig, current);
/* The ucontext contains a stack32_t, so we must convert! */
if (__get_user(sp, &frame->rs_uc.uc_stack.ss_sp))

View File

@ -610,16 +610,6 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
if (fcr31 & FPU_CSR_UNI_X) {
int sig;
preempt_disable();
#ifdef CONFIG_PREEMPT
if (!is_fpu_owner()) {
/* We might lose fpu before disabling preempt... */
own_fpu();
BUG_ON(!used_math());
restore_fp(current);
}
#endif
/*
* Unimplemented operation exception. If we've got the full
* software emulator on-board, let's use it...
@ -630,18 +620,12 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
* register operands before invoking the emulator, which seems
* a bit extreme for what should be an infrequent event.
*/
save_fp(current);
/* Ensure 'resume' not overwrite saved fp context again. */
lose_fpu();
preempt_enable();
lose_fpu(1);
/* Run the emulator */
sig = fpu_emulator_cop1Handler (regs, &current->thread.fpu, 1);
preempt_disable();
own_fpu(); /* Using the FPU again. */
/*
* We can't allow the emulated instruction to leave any of
* the cause bit set in $fcr31.
@ -649,9 +633,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
/* Restore the hardware register state */
restore_fp(current);
preempt_enable();
own_fpu(1); /* Using the FPU again. */
/* If something went wrong, signal */
if (sig)
@ -775,12 +757,11 @@ asmlinkage void do_cpu(struct pt_regs *regs)
{
unsigned int cpid;
die_if_kernel("do_cpu invoked from kernel context!", regs);
cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
switch (cpid) {
case 0:
die_if_kernel("do_cpu invoked from kernel context!", regs);
if (!cpu_has_llsc)
if (!simulate_llsc(regs))
return;
@ -791,21 +772,30 @@ asmlinkage void do_cpu(struct pt_regs *regs)
break;
case 1:
preempt_disable();
own_fpu();
if (used_math()) { /* Using the FPU again. */
restore_fp(current);
} else { /* First time FPU user. */
if (!test_thread_flag(TIF_ALLOW_FP_IN_KERNEL))
die_if_kernel("do_cpu invoked from kernel context!",
regs);
if (used_math()) /* Using the FPU again. */
own_fpu(1);
else { /* First time FPU user. */
init_fpu();
set_used_math();
}
if (cpu_has_fpu) {
preempt_enable();
if (raw_cpu_has_fpu) {
if (test_thread_flag(TIF_ALLOW_FP_IN_KERNEL)) {
local_irq_disable();
if (cpu_has_fpu)
regs->cp0_status |= ST0_CU1;
/*
* We must return without enabling
* interrupts to ensure keep FPU
* ownership until resume.
*/
return;
}
} else {
int sig;
preempt_enable();
sig = fpu_emulator_cop1Handler(regs,
&current->thread.fpu, 0);
if (sig)
@ -1259,26 +1249,26 @@ static inline void mips_srs_init(void)
/*
* This is used by native signal handling
*/
asmlinkage int (*save_fp_context)(struct sigcontext *sc);
asmlinkage int (*restore_fp_context)(struct sigcontext *sc);
asmlinkage int (*save_fp_context)(struct sigcontext __user *sc);
asmlinkage int (*restore_fp_context)(struct sigcontext __user *sc);
extern asmlinkage int _save_fp_context(struct sigcontext *sc);
extern asmlinkage int _restore_fp_context(struct sigcontext *sc);
extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc);
extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc);
extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc);
extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc);
#ifdef CONFIG_SMP
static int smp_save_fp_context(struct sigcontext *sc)
static int smp_save_fp_context(struct sigcontext __user *sc)
{
return cpu_has_fpu
return raw_cpu_has_fpu
? _save_fp_context(sc)
: fpu_emulator_save_context(sc);
}
static int smp_restore_fp_context(struct sigcontext *sc)
static int smp_restore_fp_context(struct sigcontext __user *sc)
{
return cpu_has_fpu
return raw_cpu_has_fpu
? _restore_fp_context(sc)
: fpu_emulator_restore_context(sc);
}
@ -1306,14 +1296,14 @@ static inline void signal_init(void)
/*
* This is used by 32-bit signal stuff on the 64-bit kernel
*/
asmlinkage int (*save_fp_context32)(struct sigcontext32 *sc);
asmlinkage int (*restore_fp_context32)(struct sigcontext32 *sc);
asmlinkage int (*save_fp_context32)(struct sigcontext32 __user *sc);
asmlinkage int (*restore_fp_context32)(struct sigcontext32 __user *sc);
extern asmlinkage int _save_fp_context32(struct sigcontext32 *sc);
extern asmlinkage int _restore_fp_context32(struct sigcontext32 *sc);
extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc);
extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc);
extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 *sc);
extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 *sc);
extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 __user *sc);
extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user *sc);
static inline void signal32_init(void)
{

View File

@ -51,7 +51,7 @@ void fpu_emulator_init_fpu(void)
* with appropriate macros from uaccess.h
*/
int fpu_emulator_save_context(struct sigcontext *sc)
int fpu_emulator_save_context(struct sigcontext __user *sc)
{
int i;
int err = 0;
@ -65,7 +65,7 @@ int fpu_emulator_save_context(struct sigcontext *sc)
return err;
}
int fpu_emulator_restore_context(struct sigcontext *sc)
int fpu_emulator_restore_context(struct sigcontext __user *sc)
{
int i;
int err = 0;
@ -84,7 +84,7 @@ int fpu_emulator_restore_context(struct sigcontext *sc)
* This is the o32 version
*/
int fpu_emulator_save_context32(struct sigcontext32 *sc)
int fpu_emulator_save_context32(struct sigcontext32 __user *sc)
{
int i;
int err = 0;
@ -98,7 +98,7 @@ int fpu_emulator_save_context32(struct sigcontext32 *sc)
return err;
}
int fpu_emulator_restore_context32(struct sigcontext32 *sc)
int fpu_emulator_restore_context32(struct sigcontext32 __user *sc)
{
int i;
int err = 0;

View File

@ -11,9 +11,6 @@
* March 2001: Ported from 2.0.34 by Liam Davies
*
*/
#define RTC_IO_EXTENT 0x10 /*Only really two ports, but... */
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/miscdevice.h>
@ -32,8 +29,6 @@
#include "lcd.h"
static DEFINE_SPINLOCK(lcd_lock);
static int lcd_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg);

View File

@ -79,9 +79,9 @@ static __inline__ void atomic_add(int i, atomic_t * v)
} else {
unsigned long flags;
local_irq_save(flags);
raw_local_irq_save(flags);
v->counter += i;
local_irq_restore(flags);
raw_local_irq_restore(flags);
}
}
@ -124,9 +124,9 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
} else {
unsigned long flags;
local_irq_save(flags);
raw_local_irq_save(flags);
v->counter -= i;
local_irq_restore(flags);
raw_local_irq_restore(flags);
}
}
@ -173,11 +173,11 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
} else {
unsigned long flags;
local_irq_save(flags);
raw_local_irq_save(flags);
result = v->counter;
result += i;
v->counter = result;
local_irq_restore(flags);
raw_local_irq_restore(flags);
}
smp_mb();
@ -225,11 +225,11 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
} else {
unsigned long flags;
local_irq_save(flags);
raw_local_irq_save(flags);
result = v->counter;
result -= i;
v->counter = result;
local_irq_restore(flags);
raw_local_irq_restore(flags);
}
smp_mb();
@ -293,12 +293,12 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
} else {
unsigned long flags;
local_irq_save(flags);
raw_local_irq_save(flags);
result = v->counter;
result -= i;
if (result >= 0)
v->counter = result;
local_irq_restore(flags);
raw_local_irq_restore(flags);
}
smp_mb();
@ -454,9 +454,9 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
} else {
unsigned long flags;
local_irq_save(flags);
raw_local_irq_save(flags);
v->counter += i;
local_irq_restore(flags);
raw_local_irq_restore(flags);
}
}
@ -499,9 +499,9 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
} else {
unsigned long flags;
local_irq_save(flags);
raw_local_irq_save(flags);
v->counter -= i;
local_irq_restore(flags);
raw_local_irq_restore(flags);
}
}
@ -548,11 +548,11 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
} else {
unsigned long flags;
local_irq_save(flags);
raw_local_irq_save(flags);
result = v->counter;
result += i;
v->counter = result;
local_irq_restore(flags);
raw_local_irq_restore(flags);
}
smp_mb();
@ -600,11 +600,11 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
} else {
unsigned long flags;
local_irq_save(flags);
raw_local_irq_save(flags);
result = v->counter;
result -= i;
v->counter = result;
local_irq_restore(flags);
raw_local_irq_restore(flags);
}
smp_mb();
@ -668,12 +668,12 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
} else {
unsigned long flags;
local_irq_save(flags);
raw_local_irq_save(flags);
result = v->counter;
result -= i;
if (result >= 0)
v->counter = result;
local_irq_restore(flags);
raw_local_irq_restore(flags);
}
smp_mb();

View File

@ -100,9 +100,9 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
local_irq_save(flags);
raw_local_irq_save(flags);
*a |= mask;
local_irq_restore(flags);
raw_local_irq_restore(flags);
}
}
@ -165,9 +165,9 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
local_irq_save(flags);
raw_local_irq_save(flags);
*a &= ~mask;
local_irq_restore(flags);
raw_local_irq_restore(flags);
}
}
@ -220,9 +220,9 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
local_irq_save(flags);
raw_local_irq_save(flags);
*a ^= mask;
local_irq_restore(flags);
raw_local_irq_restore(flags);
}
}
@ -287,10 +287,10 @@ static inline int test_and_set_bit(unsigned long nr,
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
local_irq_save(flags);
raw_local_irq_save(flags);
retval = (mask & *a) != 0;
*a |= mask;
local_irq_restore(flags);
raw_local_irq_restore(flags);
return retval;
}
@ -381,10 +381,10 @@ static inline int test_and_clear_bit(unsigned long nr,
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
local_irq_save(flags);
raw_local_irq_save(flags);
retval = (mask & *a) != 0;
*a &= ~mask;
local_irq_restore(flags);
raw_local_irq_restore(flags);
return retval;
}
@ -452,10 +452,10 @@ static inline int test_and_change_bit(unsigned long nr,
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
local_irq_save(flags);
raw_local_irq_save(flags);
retval = (mask & *a) != 0;
*a ^= mask;
local_irq_restore(flags);
raw_local_irq_restore(flags);
return retval;
}

View File

@ -40,6 +40,9 @@
#endif
#ifndef cpu_has_fpu
#define cpu_has_fpu (current_cpu_data.options & MIPS_CPU_FPU)
#define raw_cpu_has_fpu (raw_current_cpu_data.options & MIPS_CPU_FPU)
#else
#define raw_cpu_has_fpu cpu_has_fpu
#endif
#ifndef cpu_has_32fpr
#define cpu_has_32fpr (cpu_data[0].options & MIPS_CPU_32FPR)

View File

@ -87,6 +87,7 @@ struct cpuinfo_mips {
extern struct cpuinfo_mips cpu_data[];
#define current_cpu_data cpu_data[smp_processor_id()]
#define raw_current_cpu_data cpu_data[raw_smp_processor_id()]
extern void cpu_probe(void);
extern void cpu_report(void);

View File

@ -27,11 +27,11 @@
struct sigcontext;
struct sigcontext32;
extern asmlinkage int (*save_fp_context)(struct sigcontext *sc);
extern asmlinkage int (*restore_fp_context)(struct sigcontext *sc);
extern asmlinkage int (*save_fp_context)(struct sigcontext __user *sc);
extern asmlinkage int (*restore_fp_context)(struct sigcontext __user *sc);
extern asmlinkage int (*save_fp_context32)(struct sigcontext32 *sc);
extern asmlinkage int (*restore_fp_context32)(struct sigcontext32 *sc);
extern asmlinkage int (*save_fp_context32)(struct sigcontext32 __user *sc);
extern asmlinkage int (*restore_fp_context32)(struct sigcontext32 __user *sc);
extern void fpu_emulator_init_fpu(void);
extern void _init_fpu(void);
@ -68,6 +68,8 @@ do { \
/* We don't care about the c0 hazard here */ \
} while (0)
#define __fpu_enabled() (read_c0_status() & ST0_CU1)
#define enable_fpu() \
do { \
if (cpu_has_fpu) \
@ -93,31 +95,47 @@ static inline int is_fpu_owner(void)
return cpu_has_fpu && __is_fpu_owner();
}
static inline void own_fpu(void)
static inline void __own_fpu(void)
{
if (cpu_has_fpu) {
__enable_fpu();
KSTK_STATUS(current) |= ST0_CU1;
set_thread_flag(TIF_USEDFPU);
}
__enable_fpu();
KSTK_STATUS(current) |= ST0_CU1;
set_thread_flag(TIF_USEDFPU);
}
static inline void lose_fpu(void)
static inline void own_fpu(int restore)
{
if (cpu_has_fpu) {
preempt_disable();
if (cpu_has_fpu && !__is_fpu_owner()) {
__own_fpu();
if (restore)
_restore_fp(current);
}
preempt_enable();
}
static inline void lose_fpu(int save)
{
preempt_disable();
if (is_fpu_owner()) {
if (save)
_save_fp(current);
KSTK_STATUS(current) &= ~ST0_CU1;
clear_thread_flag(TIF_USEDFPU);
__disable_fpu();
}
preempt_enable();
}
static inline void init_fpu(void)
{
preempt_disable();
if (cpu_has_fpu) {
__own_fpu();
_init_fpu();
} else {
fpu_emulator_init_fpu();
}
preempt_enable();
}
static inline void save_fp(struct task_struct *tsk)
@ -144,4 +162,18 @@ static inline fpureg_t *get_fpu_regs(struct task_struct *tsk)
return tsk->thread.fpu.fpr;
}
static inline void enable_fp_in_kernel(void)
{
set_thread_flag(TIF_ALLOW_FP_IN_KERNEL);
/* make sure CU1 and FPU ownership are consistent */
if (!__is_fpu_owner() && __fpu_enabled())
__disable_fpu();
}
static inline void disable_fp_in_kernel(void)
{
BUG_ON(!__is_fpu_owner() && __fpu_enabled());
clear_thread_flag(TIF_ALLOW_FP_IN_KERNEL);
}
#endif /* _ASM_FPU_H */

View File

@ -18,7 +18,8 @@
struct device;
static dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size)
static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
size_t size)
{
dma_addr_t pa = dev_to_baddr(dev, virt_to_phys(addr));
@ -37,7 +38,7 @@ static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
return dma_addr & (0xffUL << 56);
}
static void plat_unmap_dma_mem(dma_addr_t dma_addr)
static inline void plat_unmap_dma_mem(dma_addr_t dma_addr)
{
}

View File

@ -26,7 +26,8 @@ struct device;
#define RAM_OFFSET_MASK 0x3fffffffUL
static dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size)
static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
size_t size)
{
dma_addr_t pa = virt_to_phys(addr) & RAM_OFFSET_MASK;
@ -59,7 +60,7 @@ static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
return addr;
}
static void plat_unmap_dma_mem(dma_addr_t dma_addr)
static inline void plat_unmap_dma_mem(dma_addr_t dma_addr)
{
}

View File

@ -23,8 +23,8 @@
extern int rtlx_open(int index, int can_sleep);
extern int rtlx_release(int index);
extern ssize_t rtlx_read(int index, void *buff, size_t count, int user);
extern ssize_t rtlx_write(int index, void *buffer, size_t count, int user);
extern ssize_t rtlx_read(int index, void __user *buff, size_t count);
extern ssize_t rtlx_write(int index, const void __user *buffer, size_t count);
extern unsigned int rtlx_read_poll(int index, int can_sleep);
extern unsigned int rtlx_write_poll(int index);

View File

@ -121,10 +121,10 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
} else {
unsigned long flags;
local_irq_save(flags);
raw_local_irq_save(flags);
retval = *m;
*m = val;
local_irq_restore(flags); /* implies memory barrier */
raw_local_irq_restore(flags); /* implies memory barrier */
}
smp_mb();
@ -169,10 +169,10 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
} else {
unsigned long flags;
local_irq_save(flags);
raw_local_irq_save(flags);
retval = *m;
*m = val;
local_irq_restore(flags); /* implies memory barrier */
raw_local_irq_restore(flags); /* implies memory barrier */
}
smp_mb();
@ -250,11 +250,11 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
} else {
unsigned long flags;
local_irq_save(flags);
raw_local_irq_save(flags);
retval = *m;
if (retval == old)
*m = new;
local_irq_restore(flags); /* implies memory barrier */
raw_local_irq_restore(flags); /* implies memory barrier */
}
smp_mb();
@ -304,11 +304,11 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
} else {
unsigned long flags;
local_irq_save(flags);
raw_local_irq_save(flags);
retval = *m;
if (retval == old)
*m = new;
local_irq_restore(flags); /* implies memory barrier */
raw_local_irq_restore(flags); /* implies memory barrier */
}
smp_mb();

View File

@ -119,6 +119,7 @@ register struct thread_info *__current_thread_info __asm__("$28");
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_MEMDIE 18
#define TIF_FREEZE 19
#define TIF_ALLOW_FP_IN_KERNEL 20
#define TIF_SYSCALL_TRACE 31 /* syscall trace active */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)