2010-05-29 11:09:12 +08:00
|
|
|
/*
|
|
|
|
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation, version 2.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
|
|
* NON INFRINGEMENT. See the GNU General Public License for
|
|
|
|
* more details.
|
|
|
|
*
|
|
|
|
* These routines make two important assumptions:
|
|
|
|
*
|
|
|
|
* 1. atomic_t is really an int and can be freely cast back and forth
|
|
|
|
* (validated in __init_atomic_per_cpu).
|
|
|
|
*
|
|
|
|
* 2. userspace uses sys_cmpxchg() for all atomic operations, thus using
|
|
|
|
* the same locking convention that all the kernel atomic routines use.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _ASM_TILE_FUTEX_H
|
|
|
|
#define _ASM_TILE_FUTEX_H
|
|
|
|
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
|
|
|
|
#include <linux/futex.h>
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
#include <linux/errno.h>
|
2012-03-30 01:39:51 +08:00
|
|
|
#include <asm/atomic.h>
|
2010-05-29 11:09:12 +08:00
|
|
|
|
2012-03-30 01:39:51 +08:00
|
|
|
/*
|
|
|
|
* Support macros for futex operations. Do not use these macros directly.
|
|
|
|
* They assume "ret", "val", "oparg", and "uaddr" in the lexical context.
|
|
|
|
* __futex_cmpxchg() additionally assumes "oldval".
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifdef __tilegx__
|
|
|
|
|
|
|
|
#define __futex_asm(OP) \
|
|
|
|
asm("1: {" #OP " %1, %3, %4; movei %0, 0 }\n" \
|
|
|
|
".pushsection .fixup,\"ax\"\n" \
|
|
|
|
"0: { movei %0, %5; j 9f }\n" \
|
|
|
|
".section __ex_table,\"a\"\n" \
|
2013-08-10 03:38:43 +08:00
|
|
|
".align 8\n" \
|
2012-03-30 01:39:51 +08:00
|
|
|
".quad 1b, 0b\n" \
|
|
|
|
".popsection\n" \
|
|
|
|
"9:" \
|
|
|
|
: "=r" (ret), "=r" (val), "+m" (*(uaddr)) \
|
|
|
|
: "r" (uaddr), "r" (oparg), "i" (-EFAULT))
|
|
|
|
|
|
|
|
#define __futex_set() __futex_asm(exch4)
|
|
|
|
#define __futex_add() __futex_asm(fetchadd4)
|
|
|
|
#define __futex_or() __futex_asm(fetchor4)
|
|
|
|
#define __futex_andn() ({ oparg = ~oparg; __futex_asm(fetchand4); })
|
|
|
|
#define __futex_cmpxchg() \
|
|
|
|
({ __insn_mtspr(SPR_CMPEXCH_VALUE, oldval); __futex_asm(cmpexch4); })
|
|
|
|
|
|
|
|
#define __futex_xor() \
|
|
|
|
({ \
|
|
|
|
u32 oldval, n = oparg; \
|
|
|
|
if ((ret = __get_user(oldval, uaddr)) == 0) { \
|
|
|
|
do { \
|
|
|
|
oparg = oldval ^ n; \
|
|
|
|
__futex_cmpxchg(); \
|
|
|
|
} while (ret == 0 && oldval != val); \
|
|
|
|
} \
|
|
|
|
})
|
|
|
|
|
|
|
|
/* No need to prefetch, since the atomic ops go to the home cache anyway. */
|
|
|
|
#define __futex_prolog()
|
2010-05-29 11:09:12 +08:00
|
|
|
|
|
|
|
#else
|
2012-03-30 01:39:51 +08:00
|
|
|
|
|
|
|
#define __futex_call(FN) \
|
|
|
|
{ \
|
|
|
|
struct __get_user gu = FN((u32 __force *)uaddr, lock, oparg); \
|
|
|
|
val = gu.val; \
|
|
|
|
ret = gu.err; \
|
2010-05-29 11:09:12 +08:00
|
|
|
}
|
2012-03-30 01:39:51 +08:00
|
|
|
|
|
|
|
#define __futex_set() __futex_call(__atomic_xchg)
|
|
|
|
#define __futex_add() __futex_call(__atomic_xchg_add)
|
|
|
|
#define __futex_or() __futex_call(__atomic_or)
|
|
|
|
#define __futex_andn() __futex_call(__atomic_andn)
|
|
|
|
#define __futex_xor() __futex_call(__atomic_xor)
|
|
|
|
|
|
|
|
#define __futex_cmpxchg() \
|
|
|
|
{ \
|
|
|
|
struct __get_user gu = __atomic_cmpxchg((u32 __force *)uaddr, \
|
|
|
|
lock, oldval, oparg); \
|
|
|
|
val = gu.val; \
|
|
|
|
ret = gu.err; \
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find the lock pointer for the atomic calls to use, and issue a
|
|
|
|
* prefetch to the user address to bring it into cache. Similar to
|
|
|
|
* __atomic_setup(), but we can't do a read into the L1 since it might
|
|
|
|
* fault; instead we do a prefetch into the L2.
|
|
|
|
*/
|
|
|
|
#define __futex_prolog() \
|
|
|
|
int *lock; \
|
|
|
|
__insn_prefetch(uaddr); \
|
|
|
|
lock = __atomic_hashed_lock((int __force *)uaddr)
|
2010-05-29 11:09:12 +08:00
|
|
|
#endif
|
|
|
|
|
2011-03-11 10:50:58 +08:00
|
|
|
static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
|
2010-05-29 11:09:12 +08:00
|
|
|
{
|
|
|
|
int op = (encoded_op >> 28) & 7;
|
|
|
|
int cmp = (encoded_op >> 24) & 15;
|
|
|
|
int oparg = (encoded_op << 8) >> 20;
|
|
|
|
int cmparg = (encoded_op << 20) >> 20;
|
2012-03-30 01:39:51 +08:00
|
|
|
int uninitialized_var(val), ret;
|
|
|
|
|
|
|
|
__futex_prolog();
|
|
|
|
|
|
|
|
/* The 32-bit futex code makes this assumption, so validate it here. */
|
|
|
|
BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
|
2010-05-29 11:09:12 +08:00
|
|
|
|
|
|
|
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
|
|
|
oparg = 1 << oparg;
|
|
|
|
|
2011-03-11 10:50:58 +08:00
|
|
|
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
2010-05-29 11:09:12 +08:00
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
pagefault_disable();
|
|
|
|
switch (op) {
|
|
|
|
case FUTEX_OP_SET:
|
2012-03-30 01:39:51 +08:00
|
|
|
__futex_set();
|
2010-05-29 11:09:12 +08:00
|
|
|
break;
|
|
|
|
case FUTEX_OP_ADD:
|
2012-03-30 01:39:51 +08:00
|
|
|
__futex_add();
|
2010-05-29 11:09:12 +08:00
|
|
|
break;
|
|
|
|
case FUTEX_OP_OR:
|
2012-03-30 01:39:51 +08:00
|
|
|
__futex_or();
|
2010-05-29 11:09:12 +08:00
|
|
|
break;
|
|
|
|
case FUTEX_OP_ANDN:
|
2012-03-30 01:39:51 +08:00
|
|
|
__futex_andn();
|
2010-05-29 11:09:12 +08:00
|
|
|
break;
|
|
|
|
case FUTEX_OP_XOR:
|
2012-03-30 01:39:51 +08:00
|
|
|
__futex_xor();
|
2010-05-29 11:09:12 +08:00
|
|
|
break;
|
|
|
|
default:
|
2012-03-30 01:39:51 +08:00
|
|
|
ret = -ENOSYS;
|
|
|
|
break;
|
2010-05-29 11:09:12 +08:00
|
|
|
}
|
|
|
|
pagefault_enable();
|
|
|
|
|
|
|
|
if (!ret) {
|
|
|
|
switch (cmp) {
|
|
|
|
case FUTEX_OP_CMP_EQ:
|
2012-03-30 01:39:51 +08:00
|
|
|
ret = (val == cmparg);
|
2010-05-29 11:09:12 +08:00
|
|
|
break;
|
|
|
|
case FUTEX_OP_CMP_NE:
|
2012-03-30 01:39:51 +08:00
|
|
|
ret = (val != cmparg);
|
2010-05-29 11:09:12 +08:00
|
|
|
break;
|
|
|
|
case FUTEX_OP_CMP_LT:
|
2012-03-30 01:39:51 +08:00
|
|
|
ret = (val < cmparg);
|
2010-05-29 11:09:12 +08:00
|
|
|
break;
|
|
|
|
case FUTEX_OP_CMP_GE:
|
2012-03-30 01:39:51 +08:00
|
|
|
ret = (val >= cmparg);
|
2010-05-29 11:09:12 +08:00
|
|
|
break;
|
|
|
|
case FUTEX_OP_CMP_LE:
|
2012-03-30 01:39:51 +08:00
|
|
|
ret = (val <= cmparg);
|
2010-05-29 11:09:12 +08:00
|
|
|
break;
|
|
|
|
case FUTEX_OP_CMP_GT:
|
2012-03-30 01:39:51 +08:00
|
|
|
ret = (val > cmparg);
|
2010-05-29 11:09:12 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ret = -ENOSYS;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-03-11 10:50:58 +08:00
|
|
|
static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
2012-03-30 01:39:51 +08:00
|
|
|
u32 oldval, u32 oparg)
|
2010-05-29 11:09:12 +08:00
|
|
|
{
|
2012-03-30 01:39:51 +08:00
|
|
|
int ret, val;
|
|
|
|
|
|
|
|
__futex_prolog();
|
2010-05-29 11:09:12 +08:00
|
|
|
|
2011-03-11 10:50:58 +08:00
|
|
|
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
2010-05-29 11:09:12 +08:00
|
|
|
return -EFAULT;
|
|
|
|
|
2012-03-30 01:39:51 +08:00
|
|
|
__futex_cmpxchg();
|
2010-05-29 11:09:12 +08:00
|
|
|
|
2012-03-30 01:39:51 +08:00
|
|
|
*uval = val;
|
|
|
|
return ret;
|
|
|
|
}
|
2010-06-26 05:04:17 +08:00
|
|
|
|
2010-05-29 11:09:12 +08:00
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
|
|
|
|
#endif /* _ASM_TILE_FUTEX_H */
|