[XFS] Kill direct access to ->count in valusema(); all we ever use it for
is check if semaphore is actually locked, which can be trivially done in portable way. Code gets more reabable, while we are at it... SGI-PV: 953915 SGI-Modid: xfs-linux-melb:xfs-kern:26274a Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Nathan Scott <nathans@sgi.com>
This commit is contained in:
parent
a805bad5da
commit
0d8fee3270
|
@ -34,20 +34,21 @@ typedef struct semaphore sema_t;
|
|||
#define initnsema(sp, val, name) sema_init(sp, val)
|
||||
#define psema(sp, b) down(sp)
|
||||
#define vsema(sp) up(sp)
|
||||
#define valusema(sp) (atomic_read(&(sp)->count))
|
||||
#define freesema(sema)
|
||||
#define freesema(sema) do { } while (0)
|
||||
|
||||
static inline int issemalocked(sema_t *sp)
|
||||
{
|
||||
return down_trylock(sp) || (up(sp), 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Map cpsema (try to get the sema) to down_trylock. We need to switch
|
||||
* the return values since cpsema returns 1 (acquired) 0 (failed) and
|
||||
* down_trylock returns the reverse 0 (acquired) 1 (failed).
|
||||
*/
|
||||
|
||||
#define cpsema(sp) (down_trylock(sp) ? 0 : 1)
|
||||
|
||||
/*
|
||||
* Didn't do cvsema(sp). Not sure how to map this to up/down/...
|
||||
* It does a vsema if the values is < 0 other wise nothing.
|
||||
*/
|
||||
static inline int cpsema(sema_t *sp)
|
||||
{
|
||||
return down_trylock(sp) ? 0 : 1;
|
||||
}
|
||||
|
||||
#endif /* __XFS_SUPPORT_SEMA_H__ */
|
||||
|
|
|
@ -119,7 +119,7 @@ XFS_DQ_IS_LOCKED(xfs_dquot_t *dqp)
|
|||
*/
|
||||
#define xfs_dqflock(dqp) { psema(&((dqp)->q_flock), PINOD | PRECALC);\
|
||||
(dqp)->dq_flags |= XFS_DQ_FLOCKED; }
|
||||
#define xfs_dqfunlock(dqp) { ASSERT(valusema(&((dqp)->q_flock)) <= 0); \
|
||||
#define xfs_dqfunlock(dqp) { ASSERT(issemalocked(&((dqp)->q_flock))); \
|
||||
vsema(&((dqp)->q_flock)); \
|
||||
(dqp)->dq_flags &= ~(XFS_DQ_FLOCKED); }
|
||||
|
||||
|
@ -128,7 +128,7 @@ XFS_DQ_IS_LOCKED(xfs_dquot_t *dqp)
|
|||
#define XFS_DQ_PINUNLOCK(dqp, s) mutex_spinunlock( \
|
||||
&(XFS_DQ_TO_QINF(dqp)->qi_pinlock), s)
|
||||
|
||||
#define XFS_DQ_IS_FLUSH_LOCKED(dqp) (valusema(&((dqp)->q_flock)) <= 0)
|
||||
#define XFS_DQ_IS_FLUSH_LOCKED(dqp) (issemalocked(&((dqp)->q_flock)))
|
||||
#define XFS_DQ_IS_ON_FREELIST(dqp) ((dqp)->dq_flnext != (dqp))
|
||||
#define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY)
|
||||
#define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER)
|
||||
|
|
|
@ -248,7 +248,7 @@ xfs_qm_dquot_logitem_pushbuf(
|
|||
* inode flush completed and the inode was taken off the AIL.
|
||||
* So, just get out.
|
||||
*/
|
||||
if ((valusema(&(dqp->q_flock)) > 0) ||
|
||||
if (!issemalocked(&(dqp->q_flock)) ||
|
||||
((qip->qli_item.li_flags & XFS_LI_IN_AIL) == 0)) {
|
||||
qip->qli_pushbuf_flag = 0;
|
||||
xfs_dqunlock(dqp);
|
||||
|
@ -261,7 +261,7 @@ xfs_qm_dquot_logitem_pushbuf(
|
|||
if (bp != NULL) {
|
||||
if (XFS_BUF_ISDELAYWRITE(bp)) {
|
||||
dopush = ((qip->qli_item.li_flags & XFS_LI_IN_AIL) &&
|
||||
(valusema(&(dqp->q_flock)) <= 0));
|
||||
issemalocked(&(dqp->q_flock)));
|
||||
qip->qli_pushbuf_flag = 0;
|
||||
xfs_dqunlock(dqp);
|
||||
|
||||
|
|
|
@ -1031,6 +1031,6 @@ xfs_iflock_nowait(xfs_inode_t *ip)
|
|||
void
|
||||
xfs_ifunlock(xfs_inode_t *ip)
|
||||
{
|
||||
ASSERT(valusema(&(ip->i_flock)) <= 0);
|
||||
ASSERT(issemalocked(&(ip->i_flock)));
|
||||
vsema(&(ip->i_flock));
|
||||
}
|
||||
|
|
|
@ -3015,7 +3015,7 @@ xfs_iflush(
|
|||
XFS_STATS_INC(xs_iflush_count);
|
||||
|
||||
ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS));
|
||||
ASSERT(valusema(&ip->i_flock) <= 0);
|
||||
ASSERT(issemalocked(&(ip->i_flock)));
|
||||
ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
|
||||
ip->i_d.di_nextents > ip->i_df.if_ext_max);
|
||||
|
||||
|
@ -3273,7 +3273,7 @@ xfs_iflush_int(
|
|||
SPLDECL(s);
|
||||
|
||||
ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS));
|
||||
ASSERT(valusema(&ip->i_flock) <= 0);
|
||||
ASSERT(issemalocked(&(ip->i_flock)));
|
||||
ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
|
||||
ip->i_d.di_nextents > ip->i_df.if_ext_max);
|
||||
|
||||
|
|
|
@ -794,7 +794,7 @@ xfs_inode_item_pushbuf(
|
|||
* inode flush completed and the inode was taken off the AIL.
|
||||
* So, just get out.
|
||||
*/
|
||||
if ((valusema(&(ip->i_flock)) > 0) ||
|
||||
if (!issemalocked(&(ip->i_flock)) ||
|
||||
((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0)) {
|
||||
iip->ili_pushbuf_flag = 0;
|
||||
xfs_iunlock(ip, XFS_ILOCK_SHARED);
|
||||
|
@ -816,7 +816,7 @@ xfs_inode_item_pushbuf(
|
|||
* If not, we can flush it async.
|
||||
*/
|
||||
dopush = ((iip->ili_item.li_flags & XFS_LI_IN_AIL) &&
|
||||
(valusema(&(ip->i_flock)) <= 0));
|
||||
issemalocked(&(ip->i_flock)));
|
||||
iip->ili_pushbuf_flag = 0;
|
||||
xfs_iunlock(ip, XFS_ILOCK_SHARED);
|
||||
xfs_buftrace("INODE ITEM PUSH", bp);
|
||||
|
@ -864,7 +864,7 @@ xfs_inode_item_push(
|
|||
ip = iip->ili_inode;
|
||||
|
||||
ASSERT(ismrlocked(&(ip->i_lock), MR_ACCESS));
|
||||
ASSERT(valusema(&(ip->i_flock)) <= 0);
|
||||
ASSERT(issemalocked(&(ip->i_flock)));
|
||||
/*
|
||||
* Since we were able to lock the inode's flush lock and
|
||||
* we found it on the AIL, the inode must be dirty. This
|
||||
|
|
Loading…
Reference in New Issue