JFS: One bug fix and some code cleanup
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEIodevzQLVs53l6BhNqiEXrVAjGQFAmKQ6TQACgkQNqiEXrVA jGQJBg/+Pm8HDYVKueUg2ZfGWfyFyYPvFkpthTD/sErTFmqjTYvVrEGVGxnHZPG1 4NLXsgsNOGh1HHWGO8UMlKCTLW3nEfzn/PZ6lH9AFb0tCE0jdE/K/9iMbLr5rkZM CTCnj3xC0/2tS1deX+9KfbOYvhk1sXPoazhHXl3QQ0TyFKSRHeZdnPRWhJsHtrsH S30WIaYvuyiMw+0grlV3dPVq+Cj49fRX4k0ipr7JVQoPEUoahCp7h5i6Fxk1PRYZ 2P2iF9zFzMjRjPrj86pDQNI9GxzOsmKIa9f0n/C97wyI8HDNj39kfNriRPvjbJ/D k6j8ReddSxc61368tiOASA9j8bORb7aRFsKQ3kPkRHZi/TF4l62s4jSr2wfvSHvV uH3wIfZ49uRHyWwDcuvWguKd3w3Zx3hVahs0SQSZm1j7GxmCGxT9E4BrRNe9oTyl Th3c6pZaDImJ8JmewqGz+yBfMGMhBpXaKPQuHaqKrNtFfkNEyp/PKst+V8OdS5+v 8FQaR6hfJpWyN00LJq87NX5rv0Uq+CI1UaEEw9ks+brY5xoGZkKk/Cmxeh70otyz eRVfm6xzwBMZcfEuEQ5wH/BdBtbMKIo6O04q5ity+c75igvIw8H8n+M+v5rOaw/l puLOCplWdvVnbHabHeg7y0OyiNx0WagdW8q8ACLMl1TELl/tiAE= =KcwK -----END PGP SIGNATURE----- Merge tag 'jfs-5.19' of https://github.com/kleikamp/linux-shaggy Pull jfs updates from David Kleikamp: "One bug fix and some code cleanup" * tag 'jfs-5.19' of https://github.com/kleikamp/linux-shaggy: fs/jfs: Remove dead code fs: jfs: fix possible NULL pointer dereference in dbFree()
This commit is contained in:
commit
aef1ff1592
|
@ -13,5 +13,3 @@ jfs-y := super.o file.o inode.o namei.o jfs_mount.o jfs_umount.o \
|
|||
resize.o xattr.o ioctl.o
|
||||
|
||||
jfs-$(CONFIG_JFS_POSIX_ACL) += acl.o
|
||||
|
||||
ccflags-y := -D_JFS_4K
|
||||
|
|
|
@ -224,18 +224,9 @@ int jfs_get_block(struct inode *ip, sector_t lblock,
|
|||
* this as a hole
|
||||
*/
|
||||
goto unlock;
|
||||
#ifdef _JFS_4K
|
||||
XADoffset(&xad, lblock64);
|
||||
XADlength(&xad, xlen);
|
||||
XADaddress(&xad, xaddr);
|
||||
#else /* _JFS_4K */
|
||||
/*
|
||||
* As long as block size = 4K, this isn't a problem.
|
||||
* We should mark the whole page not ABNR, but how
|
||||
* will we know to mark the other blocks BH_New?
|
||||
*/
|
||||
BUG();
|
||||
#endif /* _JFS_4K */
|
||||
rc = extRecord(ip, &xad);
|
||||
if (rc)
|
||||
goto unlock;
|
||||
|
@ -252,7 +243,6 @@ int jfs_get_block(struct inode *ip, sector_t lblock,
|
|||
/*
|
||||
* Allocate a new block
|
||||
*/
|
||||
#ifdef _JFS_4K
|
||||
if ((rc = extHint(ip, lblock64 << ip->i_sb->s_blocksize_bits, &xad)))
|
||||
goto unlock;
|
||||
rc = extAlloc(ip, xlen, lblock64, &xad, false);
|
||||
|
@ -263,14 +253,6 @@ int jfs_get_block(struct inode *ip, sector_t lblock,
|
|||
map_bh(bh_result, ip->i_sb, addressXAD(&xad));
|
||||
bh_result->b_size = lengthXAD(&xad) << ip->i_blkbits;
|
||||
|
||||
#else /* _JFS_4K */
|
||||
/*
|
||||
* We need to do whatever it takes to keep all but the last buffers
|
||||
* in 4K pages - see jfs_write.c
|
||||
*/
|
||||
BUG();
|
||||
#endif /* _JFS_4K */
|
||||
|
||||
unlock:
|
||||
/*
|
||||
* Release lock on inode
|
||||
|
|
|
@ -385,7 +385,8 @@ int dbFree(struct inode *ip, s64 blkno, s64 nblocks)
|
|||
}
|
||||
|
||||
/* write the last buffer. */
|
||||
write_metapage(mp);
|
||||
if (mp)
|
||||
write_metapage(mp);
|
||||
|
||||
IREAD_UNLOCK(ipbmap);
|
||||
|
||||
|
@ -868,74 +869,6 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
|
|||
return (rc);
|
||||
}
|
||||
|
||||
#ifdef _NOTYET
|
||||
/*
|
||||
* NAME: dbAllocExact()
|
||||
*
|
||||
* FUNCTION: try to allocate the requested extent;
|
||||
*
|
||||
* PARAMETERS:
|
||||
* ip - pointer to in-core inode;
|
||||
* blkno - extent address;
|
||||
* nblocks - extent length;
|
||||
*
|
||||
* RETURN VALUES:
|
||||
* 0 - success
|
||||
* -ENOSPC - insufficient disk resources
|
||||
* -EIO - i/o error
|
||||
*/
|
||||
int dbAllocExact(struct inode *ip, s64 blkno, int nblocks)
|
||||
{
|
||||
int rc;
|
||||
struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
|
||||
struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
|
||||
struct dmap *dp;
|
||||
s64 lblkno;
|
||||
struct metapage *mp;
|
||||
|
||||
IREAD_LOCK(ipbmap, RDWRLOCK_DMAP);
|
||||
|
||||
/*
|
||||
* validate extent request:
|
||||
*
|
||||
* note: defragfs policy:
|
||||
* max 64 blocks will be moved.
|
||||
* allocation request size must be satisfied from a single dmap.
|
||||
*/
|
||||
if (nblocks <= 0 || nblocks > BPERDMAP || blkno >= bmp->db_mapsize) {
|
||||
IREAD_UNLOCK(ipbmap);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (nblocks > ((s64) 1 << bmp->db_maxfreebud)) {
|
||||
/* the free space is no longer available */
|
||||
IREAD_UNLOCK(ipbmap);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
/* read in the dmap covering the extent */
|
||||
lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
|
||||
mp = read_metapage(ipbmap, lblkno, PSIZE, 0);
|
||||
if (mp == NULL) {
|
||||
IREAD_UNLOCK(ipbmap);
|
||||
return -EIO;
|
||||
}
|
||||
dp = (struct dmap *) mp->data;
|
||||
|
||||
/* try to allocate the requested extent */
|
||||
rc = dbAllocNext(bmp, dp, blkno, nblocks);
|
||||
|
||||
IREAD_UNLOCK(ipbmap);
|
||||
|
||||
if (rc == 0)
|
||||
mark_metapage_dirty(mp);
|
||||
|
||||
release_metapage(mp);
|
||||
|
||||
return (rc);
|
||||
}
|
||||
#endif /* _NOTYET */
|
||||
|
||||
/*
|
||||
* NAME: dbReAlloc()
|
||||
*
|
||||
|
|
|
@ -2423,304 +2423,6 @@ static int dtDeleteUp(tid_t tid, struct inode *ip,
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef _NOTYET
|
||||
/*
|
||||
* NAME: dtRelocate()
|
||||
*
|
||||
* FUNCTION: relocate dtpage (internal or leaf) of directory;
|
||||
* This function is mainly used by defragfs utility.
|
||||
*/
|
||||
int dtRelocate(tid_t tid, struct inode *ip, s64 lmxaddr, pxd_t * opxd,
|
||||
s64 nxaddr)
|
||||
{
|
||||
int rc = 0;
|
||||
struct metapage *mp, *pmp, *lmp, *rmp;
|
||||
dtpage_t *p, *pp, *rp = 0, *lp= 0;
|
||||
s64 bn;
|
||||
int index;
|
||||
struct btstack btstack;
|
||||
pxd_t *pxd;
|
||||
s64 oxaddr, nextbn, prevbn;
|
||||
int xlen, xsize;
|
||||
struct tlock *tlck;
|
||||
struct dt_lock *dtlck;
|
||||
struct pxd_lock *pxdlock;
|
||||
s8 *stbl;
|
||||
struct lv *lv;
|
||||
|
||||
oxaddr = addressPXD(opxd);
|
||||
xlen = lengthPXD(opxd);
|
||||
|
||||
jfs_info("dtRelocate: lmxaddr:%Ld xaddr:%Ld:%Ld xlen:%d",
|
||||
(long long)lmxaddr, (long long)oxaddr, (long long)nxaddr,
|
||||
xlen);
|
||||
|
||||
/*
|
||||
* 1. get the internal parent dtpage covering
|
||||
* router entry for the tartget page to be relocated;
|
||||
*/
|
||||
rc = dtSearchNode(ip, lmxaddr, opxd, &btstack);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* retrieve search result */
|
||||
DT_GETSEARCH(ip, btstack.top, bn, pmp, pp, index);
|
||||
jfs_info("dtRelocate: parent router entry validated.");
|
||||
|
||||
/*
|
||||
* 2. relocate the target dtpage
|
||||
*/
|
||||
/* read in the target page from src extent */
|
||||
DT_GETPAGE(ip, oxaddr, mp, PSIZE, p, rc);
|
||||
if (rc) {
|
||||
/* release the pinned parent page */
|
||||
DT_PUTPAGE(pmp);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* read in sibling pages if any to update sibling pointers;
|
||||
*/
|
||||
rmp = NULL;
|
||||
if (p->header.next) {
|
||||
nextbn = le64_to_cpu(p->header.next);
|
||||
DT_GETPAGE(ip, nextbn, rmp, PSIZE, rp, rc);
|
||||
if (rc) {
|
||||
DT_PUTPAGE(mp);
|
||||
DT_PUTPAGE(pmp);
|
||||
return (rc);
|
||||
}
|
||||
}
|
||||
|
||||
lmp = NULL;
|
||||
if (p->header.prev) {
|
||||
prevbn = le64_to_cpu(p->header.prev);
|
||||
DT_GETPAGE(ip, prevbn, lmp, PSIZE, lp, rc);
|
||||
if (rc) {
|
||||
DT_PUTPAGE(mp);
|
||||
DT_PUTPAGE(pmp);
|
||||
if (rmp)
|
||||
DT_PUTPAGE(rmp);
|
||||
return (rc);
|
||||
}
|
||||
}
|
||||
|
||||
/* at this point, all xtpages to be updated are in memory */
|
||||
|
||||
/*
|
||||
* update sibling pointers of sibling dtpages if any;
|
||||
*/
|
||||
if (lmp) {
|
||||
tlck = txLock(tid, ip, lmp, tlckDTREE | tlckRELINK);
|
||||
dtlck = (struct dt_lock *) & tlck->lock;
|
||||
/* linelock header */
|
||||
ASSERT(dtlck->index == 0);
|
||||
lv = & dtlck->lv[0];
|
||||
lv->offset = 0;
|
||||
lv->length = 1;
|
||||
dtlck->index++;
|
||||
|
||||
lp->header.next = cpu_to_le64(nxaddr);
|
||||
DT_PUTPAGE(lmp);
|
||||
}
|
||||
|
||||
if (rmp) {
|
||||
tlck = txLock(tid, ip, rmp, tlckDTREE | tlckRELINK);
|
||||
dtlck = (struct dt_lock *) & tlck->lock;
|
||||
/* linelock header */
|
||||
ASSERT(dtlck->index == 0);
|
||||
lv = & dtlck->lv[0];
|
||||
lv->offset = 0;
|
||||
lv->length = 1;
|
||||
dtlck->index++;
|
||||
|
||||
rp->header.prev = cpu_to_le64(nxaddr);
|
||||
DT_PUTPAGE(rmp);
|
||||
}
|
||||
|
||||
/*
|
||||
* update the target dtpage to be relocated
|
||||
*
|
||||
* write LOG_REDOPAGE of LOG_NEW type for dst page
|
||||
* for the whole target page (logredo() will apply
|
||||
* after image and update bmap for allocation of the
|
||||
* dst extent), and update bmap for allocation of
|
||||
* the dst extent;
|
||||
*/
|
||||
tlck = txLock(tid, ip, mp, tlckDTREE | tlckNEW);
|
||||
dtlck = (struct dt_lock *) & tlck->lock;
|
||||
/* linelock header */
|
||||
ASSERT(dtlck->index == 0);
|
||||
lv = & dtlck->lv[0];
|
||||
|
||||
/* update the self address in the dtpage header */
|
||||
pxd = &p->header.self;
|
||||
PXDaddress(pxd, nxaddr);
|
||||
|
||||
/* the dst page is the same as the src page, i.e.,
|
||||
* linelock for afterimage of the whole page;
|
||||
*/
|
||||
lv->offset = 0;
|
||||
lv->length = p->header.maxslot;
|
||||
dtlck->index++;
|
||||
|
||||
/* update the buffer extent descriptor of the dtpage */
|
||||
xsize = xlen << JFS_SBI(ip->i_sb)->l2bsize;
|
||||
|
||||
/* unpin the relocated page */
|
||||
DT_PUTPAGE(mp);
|
||||
jfs_info("dtRelocate: target dtpage relocated.");
|
||||
|
||||
/* the moved extent is dtpage, then a LOG_NOREDOPAGE log rec
|
||||
* needs to be written (in logredo(), the LOG_NOREDOPAGE log rec
|
||||
* will also force a bmap update ).
|
||||
*/
|
||||
|
||||
/*
|
||||
* 3. acquire maplock for the source extent to be freed;
|
||||
*/
|
||||
/* for dtpage relocation, write a LOG_NOREDOPAGE record
|
||||
* for the source dtpage (logredo() will init NoRedoPage
|
||||
* filter and will also update bmap for free of the source
|
||||
* dtpage), and upadte bmap for free of the source dtpage;
|
||||
*/
|
||||
tlck = txMaplock(tid, ip, tlckDTREE | tlckFREE);
|
||||
pxdlock = (struct pxd_lock *) & tlck->lock;
|
||||
pxdlock->flag = mlckFREEPXD;
|
||||
PXDaddress(&pxdlock->pxd, oxaddr);
|
||||
PXDlength(&pxdlock->pxd, xlen);
|
||||
pxdlock->index = 1;
|
||||
|
||||
/*
|
||||
* 4. update the parent router entry for relocation;
|
||||
*
|
||||
* acquire tlck for the parent entry covering the target dtpage;
|
||||
* write LOG_REDOPAGE to apply after image only;
|
||||
*/
|
||||
jfs_info("dtRelocate: update parent router entry.");
|
||||
tlck = txLock(tid, ip, pmp, tlckDTREE | tlckENTRY);
|
||||
dtlck = (struct dt_lock *) & tlck->lock;
|
||||
lv = & dtlck->lv[dtlck->index];
|
||||
|
||||
/* update the PXD with the new address */
|
||||
stbl = DT_GETSTBL(pp);
|
||||
pxd = (pxd_t *) & pp->slot[stbl[index]];
|
||||
PXDaddress(pxd, nxaddr);
|
||||
lv->offset = stbl[index];
|
||||
lv->length = 1;
|
||||
dtlck->index++;
|
||||
|
||||
/* unpin the parent dtpage */
|
||||
DT_PUTPAGE(pmp);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* NAME: dtSearchNode()
|
||||
*
|
||||
* FUNCTION: Search for an dtpage containing a specified address
|
||||
* This function is mainly used by defragfs utility.
|
||||
*
|
||||
* NOTE: Search result on stack, the found page is pinned at exit.
|
||||
* The result page must be an internal dtpage.
|
||||
* lmxaddr give the address of the left most page of the
|
||||
* dtree level, in which the required dtpage resides.
|
||||
*/
|
||||
static int dtSearchNode(struct inode *ip, s64 lmxaddr, pxd_t * kpxd,
|
||||
struct btstack * btstack)
|
||||
{
|
||||
int rc = 0;
|
||||
s64 bn;
|
||||
struct metapage *mp;
|
||||
dtpage_t *p;
|
||||
int psize = 288; /* initial in-line directory */
|
||||
s8 *stbl;
|
||||
int i;
|
||||
pxd_t *pxd;
|
||||
struct btframe *btsp;
|
||||
|
||||
BT_CLR(btstack); /* reset stack */
|
||||
|
||||
/*
|
||||
* descend tree to the level with specified leftmost page
|
||||
*
|
||||
* by convention, root bn = 0.
|
||||
*/
|
||||
for (bn = 0;;) {
|
||||
/* get/pin the page to search */
|
||||
DT_GETPAGE(ip, bn, mp, psize, p, rc);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* does the xaddr of leftmost page of the levevl
|
||||
* matches levevl search key ?
|
||||
*/
|
||||
if (p->header.flag & BT_ROOT) {
|
||||
if (lmxaddr == 0)
|
||||
break;
|
||||
} else if (addressPXD(&p->header.self) == lmxaddr)
|
||||
break;
|
||||
|
||||
/*
|
||||
* descend down to leftmost child page
|
||||
*/
|
||||
if (p->header.flag & BT_LEAF) {
|
||||
DT_PUTPAGE(mp);
|
||||
return -ESTALE;
|
||||
}
|
||||
|
||||
/* get the leftmost entry */
|
||||
stbl = DT_GETSTBL(p);
|
||||
pxd = (pxd_t *) & p->slot[stbl[0]];
|
||||
|
||||
/* get the child page block address */
|
||||
bn = addressPXD(pxd);
|
||||
psize = lengthPXD(pxd) << JFS_SBI(ip->i_sb)->l2bsize;
|
||||
/* unpin the parent page */
|
||||
DT_PUTPAGE(mp);
|
||||
}
|
||||
|
||||
/*
|
||||
* search each page at the current levevl
|
||||
*/
|
||||
loop:
|
||||
stbl = DT_GETSTBL(p);
|
||||
for (i = 0; i < p->header.nextindex; i++) {
|
||||
pxd = (pxd_t *) & p->slot[stbl[i]];
|
||||
|
||||
/* found the specified router entry */
|
||||
if (addressPXD(pxd) == addressPXD(kpxd) &&
|
||||
lengthPXD(pxd) == lengthPXD(kpxd)) {
|
||||
btsp = btstack->top;
|
||||
btsp->bn = bn;
|
||||
btsp->index = i;
|
||||
btsp->mp = mp;
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* get the right sibling page if any */
|
||||
if (p->header.next)
|
||||
bn = le64_to_cpu(p->header.next);
|
||||
else {
|
||||
DT_PUTPAGE(mp);
|
||||
return -ESTALE;
|
||||
}
|
||||
|
||||
/* unpin current page */
|
||||
DT_PUTPAGE(mp);
|
||||
|
||||
/* get the right sibling page */
|
||||
DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
goto loop;
|
||||
}
|
||||
#endif /* _NOTYET */
|
||||
|
||||
/*
|
||||
* dtRelink()
|
||||
*
|
||||
|
|
|
@ -16,9 +16,6 @@
|
|||
* forward references
|
||||
*/
|
||||
static int extBalloc(struct inode *, s64, s64 *, s64 *);
|
||||
#ifdef _NOTYET
|
||||
static int extBrealloc(struct inode *, s64, s64, s64 *, s64 *);
|
||||
#endif
|
||||
static s64 extRoundDown(s64 nb);
|
||||
|
||||
#define DPD(a) (printk("(a): %d\n",(a)))
|
||||
|
@ -177,162 +174,6 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr)
|
|||
return (0);
|
||||
}
|
||||
|
||||
|
||||
#ifdef _NOTYET
|
||||
/*
|
||||
* NAME: extRealloc()
|
||||
*
|
||||
* FUNCTION: extend the allocation of a file extent containing a
|
||||
* partial back last page.
|
||||
*
|
||||
* PARAMETERS:
|
||||
* ip - the inode of the file.
|
||||
* cp - cbuf for the partial backed last page.
|
||||
* xlen - request size of the resulting extent.
|
||||
* xp - pointer to an xad. on successful exit, the xad
|
||||
* describes the newly allocated extent.
|
||||
* abnr - bool indicating whether the newly allocated extent
|
||||
* should be marked as allocated but not recorded.
|
||||
*
|
||||
* RETURN VALUES:
|
||||
* 0 - success
|
||||
* -EIO - i/o error.
|
||||
* -ENOSPC - insufficient disk resources.
|
||||
*/
|
||||
int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr)
|
||||
{
|
||||
struct super_block *sb = ip->i_sb;
|
||||
s64 xaddr, xlen, nxaddr, delta, xoff;
|
||||
s64 ntail, nextend, ninsert;
|
||||
int rc, nbperpage = JFS_SBI(sb)->nbperpage;
|
||||
int xflag;
|
||||
|
||||
/* This blocks if we are low on resources */
|
||||
txBeginAnon(ip->i_sb);
|
||||
|
||||
mutex_lock(&JFS_IP(ip)->commit_mutex);
|
||||
/* validate extent length */
|
||||
if (nxlen > MAXXLEN)
|
||||
nxlen = MAXXLEN;
|
||||
|
||||
/* get the extend (partial) page's disk block address and
|
||||
* number of blocks.
|
||||
*/
|
||||
xaddr = addressXAD(xp);
|
||||
xlen = lengthXAD(xp);
|
||||
xoff = offsetXAD(xp);
|
||||
|
||||
/* if the extend page is abnr and if the request is for
|
||||
* the extent to be allocated and recorded,
|
||||
* make the page allocated and recorded.
|
||||
*/
|
||||
if ((xp->flag & XAD_NOTRECORDED) && !abnr) {
|
||||
xp->flag = 0;
|
||||
if ((rc = xtUpdate(0, ip, xp)))
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* try to allocated the request number of blocks for the
|
||||
* extent. dbRealloc() first tries to satisfy the request
|
||||
* by extending the allocation in place. otherwise, it will
|
||||
* try to allocate a new set of blocks large enough for the
|
||||
* request. in satisfying a request, dbReAlloc() may allocate
|
||||
* less than what was request but will always allocate enough
|
||||
* space as to satisfy the extend page.
|
||||
*/
|
||||
if ((rc = extBrealloc(ip, xaddr, xlen, &nxlen, &nxaddr)))
|
||||
goto exit;
|
||||
|
||||
/* Allocat blocks to quota. */
|
||||
rc = dquot_alloc_block(ip, nxlen);
|
||||
if (rc) {
|
||||
dbFree(ip, nxaddr, (s64) nxlen);
|
||||
mutex_unlock(&JFS_IP(ip)->commit_mutex);
|
||||
return rc;
|
||||
}
|
||||
|
||||
delta = nxlen - xlen;
|
||||
|
||||
/* check if the extend page is not abnr but the request is abnr
|
||||
* and the allocated disk space is for more than one page. if this
|
||||
* is the case, there is a miss match of abnr between the extend page
|
||||
* and the one or more pages following the extend page. as a result,
|
||||
* two extents will have to be manipulated. the first will be that
|
||||
* of the extent of the extend page and will be manipulated thru
|
||||
* an xtExtend() or an xtTailgate(), depending upon whether the
|
||||
* disk allocation occurred as an inplace extension. the second
|
||||
* extent will be manipulated (created) through an xtInsert() and
|
||||
* will be for the pages following the extend page.
|
||||
*/
|
||||
if (abnr && (!(xp->flag & XAD_NOTRECORDED)) && (nxlen > nbperpage)) {
|
||||
ntail = nbperpage;
|
||||
nextend = ntail - xlen;
|
||||
ninsert = nxlen - nbperpage;
|
||||
|
||||
xflag = XAD_NOTRECORDED;
|
||||
} else {
|
||||
ntail = nxlen;
|
||||
nextend = delta;
|
||||
ninsert = 0;
|
||||
|
||||
xflag = xp->flag;
|
||||
}
|
||||
|
||||
/* if we were able to extend the disk allocation in place,
|
||||
* extend the extent. otherwise, move the extent to a
|
||||
* new disk location.
|
||||
*/
|
||||
if (xaddr == nxaddr) {
|
||||
/* extend the extent */
|
||||
if ((rc = xtExtend(0, ip, xoff + xlen, (int) nextend, 0))) {
|
||||
dbFree(ip, xaddr + xlen, delta);
|
||||
dquot_free_block(ip, nxlen);
|
||||
goto exit;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* move the extent to a new location:
|
||||
*
|
||||
* xtTailgate() accounts for relocated tail extent;
|
||||
*/
|
||||
if ((rc = xtTailgate(0, ip, xoff, (int) ntail, nxaddr, 0))) {
|
||||
dbFree(ip, nxaddr, nxlen);
|
||||
dquot_free_block(ip, nxlen);
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* check if we need to also insert a new extent */
|
||||
if (ninsert) {
|
||||
/* perform the insert. if it fails, free the blocks
|
||||
* to be inserted and make it appear that we only did
|
||||
* the xtExtend() or xtTailgate() above.
|
||||
*/
|
||||
xaddr = nxaddr + ntail;
|
||||
if (xtInsert (0, ip, xflag, xoff + ntail, (int) ninsert,
|
||||
&xaddr, 0)) {
|
||||
dbFree(ip, xaddr, (s64) ninsert);
|
||||
delta = nextend;
|
||||
nxlen = ntail;
|
||||
xflag = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* set the return results */
|
||||
XADaddress(xp, nxaddr);
|
||||
XADlength(xp, nxlen);
|
||||
XADoffset(xp, xoff);
|
||||
xp->flag = xflag;
|
||||
|
||||
mark_inode_dirty(ip);
|
||||
exit:
|
||||
mutex_unlock(&JFS_IP(ip)->commit_mutex);
|
||||
return (rc);
|
||||
}
|
||||
#endif /* _NOTYET */
|
||||
|
||||
|
||||
/*
|
||||
* NAME: extHint()
|
||||
*
|
||||
|
@ -423,44 +264,6 @@ int extRecord(struct inode *ip, xad_t * xp)
|
|||
return rc;
|
||||
}
|
||||
|
||||
|
||||
#ifdef _NOTYET
|
||||
/*
|
||||
* NAME: extFill()
|
||||
*
|
||||
* FUNCTION: allocate disk space for a file page that represents
|
||||
* a file hole.
|
||||
*
|
||||
* PARAMETERS:
|
||||
* ip - the inode of the file.
|
||||
* cp - cbuf of the file page represent the hole.
|
||||
*
|
||||
* RETURN VALUES:
|
||||
* 0 - success
|
||||
* -EIO - i/o error.
|
||||
* -ENOSPC - insufficient disk resources.
|
||||
*/
|
||||
int extFill(struct inode *ip, xad_t * xp)
|
||||
{
|
||||
int rc, nbperpage = JFS_SBI(ip->i_sb)->nbperpage;
|
||||
s64 blkno = offsetXAD(xp) >> ip->i_blkbits;
|
||||
|
||||
// assert(ISSPARSE(ip));
|
||||
|
||||
/* initialize the extent allocation hint */
|
||||
XADaddress(xp, 0);
|
||||
|
||||
/* allocate an extent to fill the hole */
|
||||
if ((rc = extAlloc(ip, nbperpage, blkno, xp, false)))
|
||||
return (rc);
|
||||
|
||||
assert(lengthPXD(xp) == nbperpage);
|
||||
|
||||
return (0);
|
||||
}
|
||||
#endif /* _NOTYET */
|
||||
|
||||
|
||||
/*
|
||||
* NAME: extBalloc()
|
||||
*
|
||||
|
@ -550,64 +353,6 @@ extBalloc(struct inode *ip, s64 hint, s64 * nblocks, s64 * blkno)
|
|||
return (0);
|
||||
}
|
||||
|
||||
|
||||
#ifdef _NOTYET
|
||||
/*
|
||||
* NAME: extBrealloc()
|
||||
*
|
||||
* FUNCTION: attempt to extend an extent's allocation.
|
||||
*
|
||||
* Initially, we will try to extend the extent's allocation
|
||||
* in place. If this fails, we'll try to move the extent
|
||||
* to a new set of blocks. If moving the extent, we initially
|
||||
* will try to allocate disk blocks for the requested size
|
||||
* (newnblks). if this fails (new contiguous free blocks not
|
||||
* available), we'll try to allocate a smaller number of
|
||||
* blocks (producing a smaller extent), with this smaller
|
||||
* number of blocks consisting of the requested number of
|
||||
* blocks rounded down to the next smaller power of 2
|
||||
* number (i.e. 16 -> 8). We'll continue to round down and
|
||||
* retry the allocation until the number of blocks to allocate
|
||||
* is smaller than the number of blocks per page.
|
||||
*
|
||||
* PARAMETERS:
|
||||
* ip - the inode of the file.
|
||||
* blkno - starting block number of the extents current allocation.
|
||||
* nblks - number of blocks within the extents current allocation.
|
||||
* newnblks - pointer to a s64 value. on entry, this value is the
|
||||
* new desired extent size (number of blocks). on
|
||||
* successful exit, this value is set to the extent's actual
|
||||
* new size (new number of blocks).
|
||||
* newblkno - the starting block number of the extents new allocation.
|
||||
*
|
||||
* RETURN VALUES:
|
||||
* 0 - success
|
||||
* -EIO - i/o error.
|
||||
* -ENOSPC - insufficient disk resources.
|
||||
*/
|
||||
static int
|
||||
extBrealloc(struct inode *ip,
|
||||
s64 blkno, s64 nblks, s64 * newnblks, s64 * newblkno)
|
||||
{
|
||||
int rc;
|
||||
|
||||
/* try to extend in place */
|
||||
if ((rc = dbExtend(ip, blkno, nblks, *newnblks - nblks)) == 0) {
|
||||
*newblkno = blkno;
|
||||
return (0);
|
||||
} else {
|
||||
if (rc != -ENOSPC)
|
||||
return (rc);
|
||||
}
|
||||
|
||||
/* in place extension not possible.
|
||||
* try to move the extent to a new set of blocks.
|
||||
*/
|
||||
return (extBalloc(ip, blkno, newnblks, newblkno));
|
||||
}
|
||||
#endif /* _NOTYET */
|
||||
|
||||
|
||||
/*
|
||||
* NAME: extRoundDown()
|
||||
*
|
||||
|
|
|
@ -388,14 +388,6 @@ lmWriteRecord(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
|
|||
p = (caddr_t) &JFS_IP(tlck->ip)->i_xtroot;
|
||||
linelock = (struct linelock *) & tlck->lock;
|
||||
}
|
||||
#ifdef _JFS_WIP
|
||||
else if (tlck->flag & tlckINLINELOCK) {
|
||||
|
||||
inlinelock = (struct inlinelock *) & tlck;
|
||||
p = (caddr_t) & inlinelock->pxd;
|
||||
linelock = (struct linelock *) & tlck;
|
||||
}
|
||||
#endif /* _JFS_WIP */
|
||||
else {
|
||||
jfs_err("lmWriteRecord: UFO tlck:0x%p", tlck);
|
||||
return 0; /* Probably should trap */
|
||||
|
|
|
@ -307,13 +307,11 @@ static int chkSuper(struct super_block *sb)
|
|||
}
|
||||
|
||||
bsize = le32_to_cpu(j_sb->s_bsize);
|
||||
#ifdef _JFS_4K
|
||||
if (bsize != PSIZE) {
|
||||
jfs_err("Currently only 4K block size supported!");
|
||||
jfs_err("Only 4K block size supported!");
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
#endif /* _JFS_4K */
|
||||
|
||||
jfs_info("superblock: flag:0x%08x state:0x%08x size:0x%Lx",
|
||||
le32_to_cpu(j_sb->s_flag), le32_to_cpu(j_sb->s_state),
|
||||
|
|
|
@ -1490,40 +1490,6 @@ static void diLog(struct jfs_log *log, struct tblock *tblk, struct lrd *lrd,
|
|||
tlck->flag |= tlckWRITEPAGE;
|
||||
} else
|
||||
jfs_err("diLog: UFO type tlck:0x%p", tlck);
|
||||
#ifdef _JFS_WIP
|
||||
/*
|
||||
* alloc/free external EA extent
|
||||
*
|
||||
* a maplock for txUpdateMap() to update bPWMAP for alloc/free
|
||||
* of the extent has been formatted at txLock() time;
|
||||
*/
|
||||
else {
|
||||
assert(tlck->type & tlckEA);
|
||||
|
||||
/* log LOG_UPDATEMAP for logredo() to update bmap for
|
||||
* alloc of new (and free of old) external EA extent;
|
||||
*/
|
||||
lrd->type = cpu_to_le16(LOG_UPDATEMAP);
|
||||
pxdlock = (struct pxd_lock *) & tlck->lock;
|
||||
nlock = pxdlock->index;
|
||||
for (i = 0; i < nlock; i++, pxdlock++) {
|
||||
if (pxdlock->flag & mlckALLOCPXD)
|
||||
lrd->log.updatemap.type =
|
||||
cpu_to_le16(LOG_ALLOCPXD);
|
||||
else
|
||||
lrd->log.updatemap.type =
|
||||
cpu_to_le16(LOG_FREEPXD);
|
||||
lrd->log.updatemap.nxd = cpu_to_le16(1);
|
||||
lrd->log.updatemap.pxd = pxdlock->pxd;
|
||||
lrd->backchain =
|
||||
cpu_to_le32(lmLog(log, tblk, lrd, NULL));
|
||||
}
|
||||
|
||||
/* update bmap */
|
||||
tlck->flag |= tlckUPDATEMAP;
|
||||
}
|
||||
#endif /* _JFS_WIP */
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -114,17 +114,6 @@ static int xtSplitPage(tid_t tid, struct inode *ip, struct xtsplit * split,
|
|||
static int xtSplitRoot(tid_t tid, struct inode *ip,
|
||||
struct xtsplit * split, struct metapage ** rmpp);
|
||||
|
||||
#ifdef _STILL_TO_PORT
|
||||
static int xtDeleteUp(tid_t tid, struct inode *ip, struct metapage * fmp,
|
||||
xtpage_t * fp, struct btstack * btstack);
|
||||
|
||||
static int xtSearchNode(struct inode *ip,
|
||||
xad_t * xad,
|
||||
int *cmpp, struct btstack * btstack, int flag);
|
||||
|
||||
static int xtRelink(tid_t tid, struct inode *ip, xtpage_t * fp);
|
||||
#endif /* _STILL_TO_PORT */
|
||||
|
||||
/*
|
||||
* xtLookup()
|
||||
*
|
||||
|
@ -1493,189 +1482,6 @@ int xtExtend(tid_t tid, /* transaction id */
|
|||
return rc;
|
||||
}
|
||||
|
||||
#ifdef _NOTYET
|
||||
/*
|
||||
* xtTailgate()
|
||||
*
|
||||
* function: split existing 'tail' extent
|
||||
* (split offset >= start offset of tail extent), and
|
||||
* relocate and extend the split tail half;
|
||||
*
|
||||
* note: existing extent may or may not have been committed.
|
||||
* caller is responsible for pager buffer cache update, and
|
||||
* working block allocation map update;
|
||||
* update pmap: free old split tail extent, alloc new extent;
|
||||
*/
|
||||
int xtTailgate(tid_t tid, /* transaction id */
|
||||
struct inode *ip, s64 xoff, /* split/new extent offset */
|
||||
s32 xlen, /* new extent length */
|
||||
s64 xaddr, /* new extent address */
|
||||
int flag)
|
||||
{
|
||||
int rc = 0;
|
||||
int cmp;
|
||||
struct metapage *mp; /* meta-page buffer */
|
||||
xtpage_t *p; /* base B+-tree index page */
|
||||
s64 bn;
|
||||
int index, nextindex, llen, rlen;
|
||||
struct btstack btstack; /* traverse stack */
|
||||
struct xtsplit split; /* split information */
|
||||
xad_t *xad;
|
||||
struct tlock *tlck;
|
||||
struct xtlock *xtlck = 0;
|
||||
struct tlock *mtlck;
|
||||
struct maplock *pxdlock;
|
||||
|
||||
/*
|
||||
printf("xtTailgate: nxoff:0x%lx nxlen:0x%x nxaddr:0x%lx\n",
|
||||
(ulong)xoff, xlen, (ulong)xaddr);
|
||||
*/
|
||||
|
||||
/* there must exist extent to be tailgated */
|
||||
if ((rc = xtSearch(ip, xoff, NULL, &cmp, &btstack, XT_INSERT)))
|
||||
return rc;
|
||||
|
||||
/* retrieve search result */
|
||||
XT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
|
||||
|
||||
if (cmp != 0) {
|
||||
XT_PUTPAGE(mp);
|
||||
jfs_error(ip->i_sb, "couldn't find extent\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* entry found must be last entry */
|
||||
nextindex = le16_to_cpu(p->header.nextindex);
|
||||
if (index != nextindex - 1) {
|
||||
XT_PUTPAGE(mp);
|
||||
jfs_error(ip->i_sb, "the entry found is not the last entry\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
BT_MARK_DIRTY(mp, ip);
|
||||
/*
|
||||
* acquire tlock of the leaf page containing original entry
|
||||
*/
|
||||
if (!test_cflag(COMMIT_Nolink, ip)) {
|
||||
tlck = txLock(tid, ip, mp, tlckXTREE | tlckGROW);
|
||||
xtlck = (struct xtlock *) & tlck->lock;
|
||||
}
|
||||
|
||||
/* completely replace extent ? */
|
||||
xad = &p->xad[index];
|
||||
/*
|
||||
printf("xtTailgate: xoff:0x%lx xlen:0x%x xaddr:0x%lx\n",
|
||||
(ulong)offsetXAD(xad), lengthXAD(xad), (ulong)addressXAD(xad));
|
||||
*/
|
||||
if ((llen = xoff - offsetXAD(xad)) == 0)
|
||||
goto updateOld;
|
||||
|
||||
/*
|
||||
* partially replace extent: insert entry for new extent
|
||||
*/
|
||||
//insertNew:
|
||||
/*
|
||||
* if the leaf page is full, insert the new entry and
|
||||
* propagate up the router entry for the new page from split
|
||||
*
|
||||
* The xtSplitUp() will insert the entry and unpin the leaf page.
|
||||
*/
|
||||
if (nextindex == le16_to_cpu(p->header.maxentry)) {
|
||||
/* xtSpliUp() unpins leaf pages */
|
||||
split.mp = mp;
|
||||
split.index = index + 1;
|
||||
split.flag = XAD_NEW;
|
||||
split.off = xoff; /* split offset */
|
||||
split.len = xlen;
|
||||
split.addr = xaddr;
|
||||
split.pxdlist = NULL;
|
||||
if ((rc = xtSplitUp(tid, ip, &split, &btstack)))
|
||||
return rc;
|
||||
|
||||
/* get back old page */
|
||||
XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
|
||||
if (rc)
|
||||
return rc;
|
||||
/*
|
||||
* if leaf root has been split, original root has been
|
||||
* copied to new child page, i.e., original entry now
|
||||
* resides on the new child page;
|
||||
*/
|
||||
if (p->header.flag & BT_INTERNAL) {
|
||||
ASSERT(p->header.nextindex ==
|
||||
cpu_to_le16(XTENTRYSTART + 1));
|
||||
xad = &p->xad[XTENTRYSTART];
|
||||
bn = addressXAD(xad);
|
||||
XT_PUTPAGE(mp);
|
||||
|
||||
/* get new child page */
|
||||
XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
BT_MARK_DIRTY(mp, ip);
|
||||
if (!test_cflag(COMMIT_Nolink, ip)) {
|
||||
tlck = txLock(tid, ip, mp, tlckXTREE|tlckGROW);
|
||||
xtlck = (struct xtlock *) & tlck->lock;
|
||||
}
|
||||
}
|
||||
}
|
||||
/*
|
||||
* insert the new entry into the leaf page
|
||||
*/
|
||||
else {
|
||||
/* insert the new entry: mark the entry NEW */
|
||||
xad = &p->xad[index + 1];
|
||||
XT_PUTENTRY(xad, XAD_NEW, xoff, xlen, xaddr);
|
||||
|
||||
/* advance next available entry index */
|
||||
le16_add_cpu(&p->header.nextindex, 1);
|
||||
}
|
||||
|
||||
/* get back old XAD */
|
||||
xad = &p->xad[index];
|
||||
|
||||
/*
|
||||
* truncate/relocate old extent at split offset
|
||||
*/
|
||||
updateOld:
|
||||
/* update dmap for old/committed/truncated extent */
|
||||
rlen = lengthXAD(xad) - llen;
|
||||
if (!(xad->flag & XAD_NEW)) {
|
||||
/* free from PWMAP at commit */
|
||||
if (!test_cflag(COMMIT_Nolink, ip)) {
|
||||
mtlck = txMaplock(tid, ip, tlckMAP);
|
||||
pxdlock = (struct maplock *) & mtlck->lock;
|
||||
pxdlock->flag = mlckFREEPXD;
|
||||
PXDaddress(&pxdlock->pxd, addressXAD(xad) + llen);
|
||||
PXDlength(&pxdlock->pxd, rlen);
|
||||
pxdlock->index = 1;
|
||||
}
|
||||
} else
|
||||
/* free from WMAP */
|
||||
dbFree(ip, addressXAD(xad) + llen, (s64) rlen);
|
||||
|
||||
if (llen)
|
||||
/* truncate */
|
||||
XADlength(xad, llen);
|
||||
else
|
||||
/* replace */
|
||||
XT_PUTENTRY(xad, XAD_NEW, xoff, xlen, xaddr);
|
||||
|
||||
if (!test_cflag(COMMIT_Nolink, ip)) {
|
||||
xtlck->lwm.offset = (xtlck->lwm.offset) ?
|
||||
min(index, (int)xtlck->lwm.offset) : index;
|
||||
xtlck->lwm.length = le16_to_cpu(p->header.nextindex) -
|
||||
xtlck->lwm.offset;
|
||||
}
|
||||
|
||||
/* unpin the leaf page */
|
||||
XT_PUTPAGE(mp);
|
||||
|
||||
return rc;
|
||||
}
|
||||
#endif /* _NOTYET */
|
||||
|
||||
/*
|
||||
* xtUpdate()
|
||||
*
|
||||
|
@ -1753,32 +1559,12 @@ int xtUpdate(tid_t tid, struct inode *ip, xad_t * nxad)
|
|||
newindex = index + 1;
|
||||
nextindex = le16_to_cpu(p->header.nextindex);
|
||||
|
||||
#ifdef _JFS_WIP_NOCOALESCE
|
||||
if (xoff < nxoff)
|
||||
goto updateRight;
|
||||
|
||||
/*
|
||||
* replace XAD with nXAD
|
||||
*/
|
||||
replace: /* (nxoff == xoff) */
|
||||
if (nxlen == xlen) {
|
||||
/* replace XAD with nXAD:recorded */
|
||||
*xad = *nxad;
|
||||
xad->flag = xflag & ~XAD_NOTRECORDED;
|
||||
|
||||
goto out;
|
||||
} else /* (nxlen < xlen) */
|
||||
goto updateLeft;
|
||||
#endif /* _JFS_WIP_NOCOALESCE */
|
||||
|
||||
/* #ifdef _JFS_WIP_COALESCE */
|
||||
if (xoff < nxoff)
|
||||
goto coalesceRight;
|
||||
|
||||
/*
|
||||
* coalesce with left XAD
|
||||
*/
|
||||
//coalesceLeft: /* (xoff == nxoff) */
|
||||
/* is XAD first entry of page ? */
|
||||
if (index == XTENTRYSTART)
|
||||
goto replace;
|
||||
|
@ -1897,7 +1683,6 @@ int xtUpdate(tid_t tid, struct inode *ip, xad_t * nxad)
|
|||
jfs_error(ip->i_sb, "xoff >= nxoff\n");
|
||||
return -EIO;
|
||||
}
|
||||
/* #endif _JFS_WIP_COALESCE */
|
||||
|
||||
/*
|
||||
* split XAD into (lXAD, nXAD):
|
||||
|
@ -2305,752 +2090,6 @@ int xtAppend(tid_t tid, /* transaction id */
|
|||
|
||||
return rc;
|
||||
}
|
||||
#ifdef _STILL_TO_PORT
|
||||
|
||||
/* - TBD for defragmentaion/reorganization -
|
||||
*
|
||||
* xtDelete()
|
||||
*
|
||||
* function:
|
||||
* delete the entry with the specified key.
|
||||
*
|
||||
* N.B.: whole extent of the entry is assumed to be deleted.
|
||||
*
|
||||
* parameter:
|
||||
*
|
||||
* return:
|
||||
* ENOENT: if the entry is not found.
|
||||
*
|
||||
* exception:
|
||||
*/
|
||||
int xtDelete(tid_t tid, struct inode *ip, s64 xoff, s32 xlen, int flag)
|
||||
{
|
||||
int rc = 0;
|
||||
struct btstack btstack;
|
||||
int cmp;
|
||||
s64 bn;
|
||||
struct metapage *mp;
|
||||
xtpage_t *p;
|
||||
int index, nextindex;
|
||||
struct tlock *tlck;
|
||||
struct xtlock *xtlck;
|
||||
|
||||
/*
|
||||
* find the matching entry; xtSearch() pins the page
|
||||
*/
|
||||
if ((rc = xtSearch(ip, xoff, NULL, &cmp, &btstack, 0)))
|
||||
return rc;
|
||||
|
||||
XT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
|
||||
if (cmp) {
|
||||
/* unpin the leaf page */
|
||||
XT_PUTPAGE(mp);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/*
|
||||
* delete the entry from the leaf page
|
||||
*/
|
||||
nextindex = le16_to_cpu(p->header.nextindex);
|
||||
le16_add_cpu(&p->header.nextindex, -1);
|
||||
|
||||
/*
|
||||
* if the leaf page bocome empty, free the page
|
||||
*/
|
||||
if (p->header.nextindex == cpu_to_le16(XTENTRYSTART))
|
||||
return (xtDeleteUp(tid, ip, mp, p, &btstack));
|
||||
|
||||
BT_MARK_DIRTY(mp, ip);
|
||||
/*
|
||||
* acquire a transaction lock on the leaf page;
|
||||
*
|
||||
* action:xad deletion;
|
||||
*/
|
||||
tlck = txLock(tid, ip, mp, tlckXTREE);
|
||||
xtlck = (struct xtlock *) & tlck->lock;
|
||||
xtlck->lwm.offset =
|
||||
(xtlck->lwm.offset) ? min(index, xtlck->lwm.offset) : index;
|
||||
|
||||
/* if delete from middle, shift left/compact the remaining entries */
|
||||
if (index < nextindex - 1)
|
||||
memmove(&p->xad[index], &p->xad[index + 1],
|
||||
(nextindex - index - 1) * sizeof(xad_t));
|
||||
|
||||
XT_PUTPAGE(mp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* - TBD for defragmentaion/reorganization -
|
||||
*
|
||||
* xtDeleteUp()
|
||||
*
|
||||
* function:
|
||||
* free empty pages as propagating deletion up the tree
|
||||
*
|
||||
* parameter:
|
||||
*
|
||||
* return:
|
||||
*/
|
||||
static int
|
||||
xtDeleteUp(tid_t tid, struct inode *ip,
|
||||
struct metapage * fmp, xtpage_t * fp, struct btstack * btstack)
|
||||
{
|
||||
int rc = 0;
|
||||
struct metapage *mp;
|
||||
xtpage_t *p;
|
||||
int index, nextindex;
|
||||
s64 xaddr;
|
||||
int xlen;
|
||||
struct btframe *parent;
|
||||
struct tlock *tlck;
|
||||
struct xtlock *xtlck;
|
||||
|
||||
/*
|
||||
* keep root leaf page which has become empty
|
||||
*/
|
||||
if (fp->header.flag & BT_ROOT) {
|
||||
/* keep the root page */
|
||||
fp->header.flag &= ~BT_INTERNAL;
|
||||
fp->header.flag |= BT_LEAF;
|
||||
fp->header.nextindex = cpu_to_le16(XTENTRYSTART);
|
||||
|
||||
/* XT_PUTPAGE(fmp); */
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* free non-root leaf page
|
||||
*/
|
||||
if ((rc = xtRelink(tid, ip, fp))) {
|
||||
XT_PUTPAGE(fmp);
|
||||
return rc;
|
||||
}
|
||||
|
||||
xaddr = addressPXD(&fp->header.self);
|
||||
xlen = lengthPXD(&fp->header.self);
|
||||
/* free the page extent */
|
||||
dbFree(ip, xaddr, (s64) xlen);
|
||||
|
||||
/* free the buffer page */
|
||||
discard_metapage(fmp);
|
||||
|
||||
/*
|
||||
* propagate page deletion up the index tree
|
||||
*
|
||||
* If the delete from the parent page makes it empty,
|
||||
* continue all the way up the tree.
|
||||
* stop if the root page is reached (which is never deleted) or
|
||||
* if the entry deletion does not empty the page.
|
||||
*/
|
||||
while ((parent = BT_POP(btstack)) != NULL) {
|
||||
/* get/pin the parent page <sp> */
|
||||
XT_GETPAGE(ip, parent->bn, mp, PSIZE, p, rc);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
index = parent->index;
|
||||
|
||||
/* delete the entry for the freed child page from parent.
|
||||
*/
|
||||
nextindex = le16_to_cpu(p->header.nextindex);
|
||||
|
||||
/*
|
||||
* the parent has the single entry being deleted:
|
||||
* free the parent page which has become empty.
|
||||
*/
|
||||
if (nextindex == 1) {
|
||||
if (p->header.flag & BT_ROOT) {
|
||||
/* keep the root page */
|
||||
p->header.flag &= ~BT_INTERNAL;
|
||||
p->header.flag |= BT_LEAF;
|
||||
p->header.nextindex =
|
||||
cpu_to_le16(XTENTRYSTART);
|
||||
|
||||
/* XT_PUTPAGE(mp); */
|
||||
|
||||
break;
|
||||
} else {
|
||||
/* free the parent page */
|
||||
if ((rc = xtRelink(tid, ip, p)))
|
||||
return rc;
|
||||
|
||||
xaddr = addressPXD(&p->header.self);
|
||||
/* free the page extent */
|
||||
dbFree(ip, xaddr,
|
||||
(s64) JFS_SBI(ip->i_sb)->nbperpage);
|
||||
|
||||
/* unpin/free the buffer page */
|
||||
discard_metapage(mp);
|
||||
|
||||
/* propagate up */
|
||||
continue;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* the parent has other entries remaining:
|
||||
* delete the router entry from the parent page.
|
||||
*/
|
||||
else {
|
||||
BT_MARK_DIRTY(mp, ip);
|
||||
/*
|
||||
* acquire a transaction lock on the leaf page;
|
||||
*
|
||||
* action:xad deletion;
|
||||
*/
|
||||
tlck = txLock(tid, ip, mp, tlckXTREE);
|
||||
xtlck = (struct xtlock *) & tlck->lock;
|
||||
xtlck->lwm.offset =
|
||||
(xtlck->lwm.offset) ? min(index,
|
||||
xtlck->lwm.
|
||||
offset) : index;
|
||||
|
||||
/* if delete from middle,
|
||||
* shift left/compact the remaining entries in the page
|
||||
*/
|
||||
if (index < nextindex - 1)
|
||||
memmove(&p->xad[index], &p->xad[index + 1],
|
||||
(nextindex - index -
|
||||
1) << L2XTSLOTSIZE);
|
||||
|
||||
le16_add_cpu(&p->header.nextindex, -1);
|
||||
jfs_info("xtDeleteUp(entry): 0x%lx[%d]",
|
||||
(ulong) parent->bn, index);
|
||||
}
|
||||
|
||||
/* unpin the parent page */
|
||||
XT_PUTPAGE(mp);
|
||||
|
||||
/* exit propagation up */
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* NAME: xtRelocate()
|
||||
*
|
||||
* FUNCTION: relocate xtpage or data extent of regular file;
|
||||
* This function is mainly used by defragfs utility.
|
||||
*
|
||||
* NOTE: This routine does not have the logic to handle
|
||||
* uncommitted allocated extent. The caller should call
|
||||
* txCommit() to commit all the allocation before call
|
||||
* this routine.
|
||||
*/
|
||||
int
|
||||
xtRelocate(tid_t tid, struct inode * ip, xad_t * oxad, /* old XAD */
|
||||
s64 nxaddr, /* new xaddr */
|
||||
int xtype)
|
||||
{ /* extent type: XTPAGE or DATAEXT */
|
||||
int rc = 0;
|
||||
struct tblock *tblk;
|
||||
struct tlock *tlck;
|
||||
struct xtlock *xtlck;
|
||||
struct metapage *mp, *pmp, *lmp, *rmp; /* meta-page buffer */
|
||||
xtpage_t *p, *pp, *rp, *lp; /* base B+-tree index page */
|
||||
xad_t *xad;
|
||||
pxd_t *pxd;
|
||||
s64 xoff, xsize;
|
||||
int xlen;
|
||||
s64 oxaddr, sxaddr, dxaddr, nextbn, prevbn;
|
||||
cbuf_t *cp;
|
||||
s64 offset, nbytes, nbrd, pno;
|
||||
int nb, npages, nblks;
|
||||
s64 bn;
|
||||
int cmp;
|
||||
int index;
|
||||
struct pxd_lock *pxdlock;
|
||||
struct btstack btstack; /* traverse stack */
|
||||
|
||||
xtype = xtype & EXTENT_TYPE;
|
||||
|
||||
xoff = offsetXAD(oxad);
|
||||
oxaddr = addressXAD(oxad);
|
||||
xlen = lengthXAD(oxad);
|
||||
|
||||
/* validate extent offset */
|
||||
offset = xoff << JFS_SBI(ip->i_sb)->l2bsize;
|
||||
if (offset >= ip->i_size)
|
||||
return -ESTALE; /* stale extent */
|
||||
|
||||
jfs_info("xtRelocate: xtype:%d xoff:0x%lx xlen:0x%x xaddr:0x%lx:0x%lx",
|
||||
xtype, (ulong) xoff, xlen, (ulong) oxaddr, (ulong) nxaddr);
|
||||
|
||||
/*
|
||||
* 1. get and validate the parent xtpage/xad entry
|
||||
* covering the source extent to be relocated;
|
||||
*/
|
||||
if (xtype == DATAEXT) {
|
||||
/* search in leaf entry */
|
||||
rc = xtSearch(ip, xoff, NULL, &cmp, &btstack, 0);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* retrieve search result */
|
||||
XT_GETSEARCH(ip, btstack.top, bn, pmp, pp, index);
|
||||
|
||||
if (cmp) {
|
||||
XT_PUTPAGE(pmp);
|
||||
return -ESTALE;
|
||||
}
|
||||
|
||||
/* validate for exact match with a single entry */
|
||||
xad = &pp->xad[index];
|
||||
if (addressXAD(xad) != oxaddr || lengthXAD(xad) != xlen) {
|
||||
XT_PUTPAGE(pmp);
|
||||
return -ESTALE;
|
||||
}
|
||||
} else { /* (xtype == XTPAGE) */
|
||||
|
||||
/* search in internal entry */
|
||||
rc = xtSearchNode(ip, oxad, &cmp, &btstack, 0);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* retrieve search result */
|
||||
XT_GETSEARCH(ip, btstack.top, bn, pmp, pp, index);
|
||||
|
||||
if (cmp) {
|
||||
XT_PUTPAGE(pmp);
|
||||
return -ESTALE;
|
||||
}
|
||||
|
||||
/* xtSearchNode() validated for exact match with a single entry
|
||||
*/
|
||||
xad = &pp->xad[index];
|
||||
}
|
||||
jfs_info("xtRelocate: parent xad entry validated.");
|
||||
|
||||
/*
|
||||
* 2. relocate the extent
|
||||
*/
|
||||
if (xtype == DATAEXT) {
|
||||
/* if the extent is allocated-but-not-recorded
|
||||
* there is no real data to be moved in this extent,
|
||||
*/
|
||||
if (xad->flag & XAD_NOTRECORDED)
|
||||
goto out;
|
||||
else
|
||||
/* release xtpage for cmRead()/xtLookup() */
|
||||
XT_PUTPAGE(pmp);
|
||||
|
||||
/*
|
||||
* cmRelocate()
|
||||
*
|
||||
* copy target data pages to be relocated;
|
||||
*
|
||||
* data extent must start at page boundary and
|
||||
* multiple of page size (except the last data extent);
|
||||
* read in each page of the source data extent into cbuf,
|
||||
* update the cbuf extent descriptor of the page to be
|
||||
* homeward bound to new dst data extent
|
||||
* copy the data from the old extent to new extent.
|
||||
* copy is essential for compressed files to avoid problems
|
||||
* that can arise if there was a change in compression
|
||||
* algorithms.
|
||||
* it is a good strategy because it may disrupt cache
|
||||
* policy to keep the pages in memory afterwards.
|
||||
*/
|
||||
offset = xoff << JFS_SBI(ip->i_sb)->l2bsize;
|
||||
assert((offset & CM_OFFSET) == 0);
|
||||
nbytes = xlen << JFS_SBI(ip->i_sb)->l2bsize;
|
||||
pno = offset >> CM_L2BSIZE;
|
||||
npages = (nbytes + (CM_BSIZE - 1)) >> CM_L2BSIZE;
|
||||
/*
|
||||
npages = ((offset + nbytes - 1) >> CM_L2BSIZE) -
|
||||
(offset >> CM_L2BSIZE) + 1;
|
||||
*/
|
||||
sxaddr = oxaddr;
|
||||
dxaddr = nxaddr;
|
||||
|
||||
/* process the request one cache buffer at a time */
|
||||
for (nbrd = 0; nbrd < nbytes; nbrd += nb,
|
||||
offset += nb, pno++, npages--) {
|
||||
/* compute page size */
|
||||
nb = min(nbytes - nbrd, CM_BSIZE);
|
||||
|
||||
/* get the cache buffer of the page */
|
||||
if (rc = cmRead(ip, offset, npages, &cp))
|
||||
break;
|
||||
|
||||
assert(addressPXD(&cp->cm_pxd) == sxaddr);
|
||||
assert(!cp->cm_modified);
|
||||
|
||||
/* bind buffer with the new extent address */
|
||||
nblks = nb >> JFS_IP(ip->i_sb)->l2bsize;
|
||||
cmSetXD(ip, cp, pno, dxaddr, nblks);
|
||||
|
||||
/* release the cbuf, mark it as modified */
|
||||
cmPut(cp, true);
|
||||
|
||||
dxaddr += nblks;
|
||||
sxaddr += nblks;
|
||||
}
|
||||
|
||||
/* get back parent page */
|
||||
if ((rc = xtSearch(ip, xoff, NULL, &cmp, &btstack, 0)))
|
||||
return rc;
|
||||
|
||||
XT_GETSEARCH(ip, btstack.top, bn, pmp, pp, index);
|
||||
jfs_info("xtRelocate: target data extent relocated.");
|
||||
} else { /* (xtype == XTPAGE) */
|
||||
|
||||
/*
|
||||
* read in the target xtpage from the source extent;
|
||||
*/
|
||||
XT_GETPAGE(ip, oxaddr, mp, PSIZE, p, rc);
|
||||
if (rc) {
|
||||
XT_PUTPAGE(pmp);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* read in sibling pages if any to update sibling pointers;
|
||||
*/
|
||||
rmp = NULL;
|
||||
if (p->header.next) {
|
||||
nextbn = le64_to_cpu(p->header.next);
|
||||
XT_GETPAGE(ip, nextbn, rmp, PSIZE, rp, rc);
|
||||
if (rc) {
|
||||
XT_PUTPAGE(pmp);
|
||||
XT_PUTPAGE(mp);
|
||||
return (rc);
|
||||
}
|
||||
}
|
||||
|
||||
lmp = NULL;
|
||||
if (p->header.prev) {
|
||||
prevbn = le64_to_cpu(p->header.prev);
|
||||
XT_GETPAGE(ip, prevbn, lmp, PSIZE, lp, rc);
|
||||
if (rc) {
|
||||
XT_PUTPAGE(pmp);
|
||||
XT_PUTPAGE(mp);
|
||||
if (rmp)
|
||||
XT_PUTPAGE(rmp);
|
||||
return (rc);
|
||||
}
|
||||
}
|
||||
|
||||
/* at this point, all xtpages to be updated are in memory */
|
||||
|
||||
/*
|
||||
* update sibling pointers of sibling xtpages if any;
|
||||
*/
|
||||
if (lmp) {
|
||||
BT_MARK_DIRTY(lmp, ip);
|
||||
tlck = txLock(tid, ip, lmp, tlckXTREE | tlckRELINK);
|
||||
lp->header.next = cpu_to_le64(nxaddr);
|
||||
XT_PUTPAGE(lmp);
|
||||
}
|
||||
|
||||
if (rmp) {
|
||||
BT_MARK_DIRTY(rmp, ip);
|
||||
tlck = txLock(tid, ip, rmp, tlckXTREE | tlckRELINK);
|
||||
rp->header.prev = cpu_to_le64(nxaddr);
|
||||
XT_PUTPAGE(rmp);
|
||||
}
|
||||
|
||||
/*
|
||||
* update the target xtpage to be relocated
|
||||
*
|
||||
* update the self address of the target page
|
||||
* and write to destination extent;
|
||||
* redo image covers the whole xtpage since it is new page
|
||||
* to the destination extent;
|
||||
* update of bmap for the free of source extent
|
||||
* of the target xtpage itself:
|
||||
* update of bmap for the allocation of destination extent
|
||||
* of the target xtpage itself:
|
||||
* update of bmap for the extents covered by xad entries in
|
||||
* the target xtpage is not necessary since they are not
|
||||
* updated;
|
||||
* if not committed before this relocation,
|
||||
* target page may contain XAD_NEW entries which must
|
||||
* be scanned for bmap update (logredo() always
|
||||
* scan xtpage REDOPAGE image for bmap update);
|
||||
* if committed before this relocation (tlckRELOCATE),
|
||||
* scan may be skipped by commit() and logredo();
|
||||
*/
|
||||
BT_MARK_DIRTY(mp, ip);
|
||||
/* tlckNEW init xtlck->lwm.offset = XTENTRYSTART; */
|
||||
tlck = txLock(tid, ip, mp, tlckXTREE | tlckNEW);
|
||||
xtlck = (struct xtlock *) & tlck->lock;
|
||||
|
||||
/* update the self address in the xtpage header */
|
||||
pxd = &p->header.self;
|
||||
PXDaddress(pxd, nxaddr);
|
||||
|
||||
/* linelock for the after image of the whole page */
|
||||
xtlck->lwm.length =
|
||||
le16_to_cpu(p->header.nextindex) - xtlck->lwm.offset;
|
||||
|
||||
/* update the buffer extent descriptor of target xtpage */
|
||||
xsize = xlen << JFS_SBI(ip->i_sb)->l2bsize;
|
||||
bmSetXD(mp, nxaddr, xsize);
|
||||
|
||||
/* unpin the target page to new homeward bound */
|
||||
XT_PUTPAGE(mp);
|
||||
jfs_info("xtRelocate: target xtpage relocated.");
|
||||
}
|
||||
|
||||
/*
|
||||
* 3. acquire maplock for the source extent to be freed;
|
||||
*
|
||||
* acquire a maplock saving the src relocated extent address;
|
||||
* to free of the extent at commit time;
|
||||
*/
|
||||
out:
|
||||
/* if DATAEXT relocation, write a LOG_UPDATEMAP record for
|
||||
* free PXD of the source data extent (logredo() will update
|
||||
* bmap for free of source data extent), and update bmap for
|
||||
* free of the source data extent;
|
||||
*/
|
||||
if (xtype == DATAEXT)
|
||||
tlck = txMaplock(tid, ip, tlckMAP);
|
||||
/* if XTPAGE relocation, write a LOG_NOREDOPAGE record
|
||||
* for the source xtpage (logredo() will init NoRedoPage
|
||||
* filter and will also update bmap for free of the source
|
||||
* xtpage), and update bmap for free of the source xtpage;
|
||||
* N.B. We use tlckMAP instead of tlkcXTREE because there
|
||||
* is no buffer associated with this lock since the buffer
|
||||
* has been redirected to the target location.
|
||||
*/
|
||||
else /* (xtype == XTPAGE) */
|
||||
tlck = txMaplock(tid, ip, tlckMAP | tlckRELOCATE);
|
||||
|
||||
pxdlock = (struct pxd_lock *) & tlck->lock;
|
||||
pxdlock->flag = mlckFREEPXD;
|
||||
PXDaddress(&pxdlock->pxd, oxaddr);
|
||||
PXDlength(&pxdlock->pxd, xlen);
|
||||
pxdlock->index = 1;
|
||||
|
||||
/*
|
||||
* 4. update the parent xad entry for relocation;
|
||||
*
|
||||
* acquire tlck for the parent entry with XAD_NEW as entry
|
||||
* update which will write LOG_REDOPAGE and update bmap for
|
||||
* allocation of XAD_NEW destination extent;
|
||||
*/
|
||||
jfs_info("xtRelocate: update parent xad entry.");
|
||||
BT_MARK_DIRTY(pmp, ip);
|
||||
tlck = txLock(tid, ip, pmp, tlckXTREE | tlckGROW);
|
||||
xtlck = (struct xtlock *) & tlck->lock;
|
||||
|
||||
/* update the XAD with the new destination extent; */
|
||||
xad = &pp->xad[index];
|
||||
xad->flag |= XAD_NEW;
|
||||
XADaddress(xad, nxaddr);
|
||||
|
||||
xtlck->lwm.offset = min(index, xtlck->lwm.offset);
|
||||
xtlck->lwm.length = le16_to_cpu(pp->header.nextindex) -
|
||||
xtlck->lwm.offset;
|
||||
|
||||
/* unpin the parent xtpage */
|
||||
XT_PUTPAGE(pmp);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* xtSearchNode()
|
||||
*
|
||||
* function: search for the internal xad entry covering specified extent.
|
||||
* This function is mainly used by defragfs utility.
|
||||
*
|
||||
* parameters:
|
||||
* ip - file object;
|
||||
* xad - extent to find;
|
||||
* cmpp - comparison result:
|
||||
* btstack - traverse stack;
|
||||
* flag - search process flag;
|
||||
*
|
||||
* returns:
|
||||
* btstack contains (bn, index) of search path traversed to the entry.
|
||||
* *cmpp is set to result of comparison with the entry returned.
|
||||
* the page containing the entry is pinned at exit.
|
||||
*/
|
||||
static int xtSearchNode(struct inode *ip, xad_t * xad, /* required XAD entry */
|
||||
int *cmpp, struct btstack * btstack, int flag)
|
||||
{
|
||||
int rc = 0;
|
||||
s64 xoff, xaddr;
|
||||
int xlen;
|
||||
int cmp = 1; /* init for empty page */
|
||||
s64 bn; /* block number */
|
||||
struct metapage *mp; /* meta-page buffer */
|
||||
xtpage_t *p; /* page */
|
||||
int base, index, lim;
|
||||
struct btframe *btsp;
|
||||
s64 t64;
|
||||
|
||||
BT_CLR(btstack);
|
||||
|
||||
xoff = offsetXAD(xad);
|
||||
xlen = lengthXAD(xad);
|
||||
xaddr = addressXAD(xad);
|
||||
|
||||
/*
|
||||
* search down tree from root:
|
||||
*
|
||||
* between two consecutive entries of <Ki, Pi> and <Kj, Pj> of
|
||||
* internal page, child page Pi contains entry with k, Ki <= K < Kj.
|
||||
*
|
||||
* if entry with search key K is not found
|
||||
* internal page search find the entry with largest key Ki
|
||||
* less than K which point to the child page to search;
|
||||
* leaf page search find the entry with smallest key Kj
|
||||
* greater than K so that the returned index is the position of
|
||||
* the entry to be shifted right for insertion of new entry.
|
||||
* for empty tree, search key is greater than any key of the tree.
|
||||
*
|
||||
* by convention, root bn = 0.
|
||||
*/
|
||||
for (bn = 0;;) {
|
||||
/* get/pin the page to search */
|
||||
XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
|
||||
if (rc)
|
||||
return rc;
|
||||
if (p->header.flag & BT_LEAF) {
|
||||
XT_PUTPAGE(mp);
|
||||
return -ESTALE;
|
||||
}
|
||||
|
||||
lim = le16_to_cpu(p->header.nextindex) - XTENTRYSTART;
|
||||
|
||||
/*
|
||||
* binary search with search key K on the current page
|
||||
*/
|
||||
for (base = XTENTRYSTART; lim; lim >>= 1) {
|
||||
index = base + (lim >> 1);
|
||||
|
||||
XT_CMP(cmp, xoff, &p->xad[index], t64);
|
||||
if (cmp == 0) {
|
||||
/*
|
||||
* search hit
|
||||
*
|
||||
* verify for exact match;
|
||||
*/
|
||||
if (xaddr == addressXAD(&p->xad[index]) &&
|
||||
xoff == offsetXAD(&p->xad[index])) {
|
||||
*cmpp = cmp;
|
||||
|
||||
/* save search result */
|
||||
btsp = btstack->top;
|
||||
btsp->bn = bn;
|
||||
btsp->index = index;
|
||||
btsp->mp = mp;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* descend/search its child page */
|
||||
goto next;
|
||||
}
|
||||
|
||||
if (cmp > 0) {
|
||||
base = index + 1;
|
||||
--lim;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* search miss - non-leaf page:
|
||||
*
|
||||
* base is the smallest index with key (Kj) greater than
|
||||
* search key (K) and may be zero or maxentry index.
|
||||
* if base is non-zero, decrement base by one to get the parent
|
||||
* entry of the child page to search.
|
||||
*/
|
||||
index = base ? base - 1 : base;
|
||||
|
||||
/*
|
||||
* go down to child page
|
||||
*/
|
||||
next:
|
||||
/* get the child page block number */
|
||||
bn = addressXAD(&p->xad[index]);
|
||||
|
||||
/* unpin the parent page */
|
||||
XT_PUTPAGE(mp);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* xtRelink()
|
||||
*
|
||||
* function:
|
||||
* link around a freed page.
|
||||
*
|
||||
* Parameter:
|
||||
* int tid,
|
||||
* struct inode *ip,
|
||||
* xtpage_t *p)
|
||||
*
|
||||
* returns:
|
||||
*/
|
||||
static int xtRelink(tid_t tid, struct inode *ip, xtpage_t * p)
|
||||
{
|
||||
int rc = 0;
|
||||
struct metapage *mp;
|
||||
s64 nextbn, prevbn;
|
||||
struct tlock *tlck;
|
||||
|
||||
nextbn = le64_to_cpu(p->header.next);
|
||||
prevbn = le64_to_cpu(p->header.prev);
|
||||
|
||||
/* update prev pointer of the next page */
|
||||
if (nextbn != 0) {
|
||||
XT_GETPAGE(ip, nextbn, mp, PSIZE, p, rc);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/*
|
||||
* acquire a transaction lock on the page;
|
||||
*
|
||||
* action: update prev pointer;
|
||||
*/
|
||||
BT_MARK_DIRTY(mp, ip);
|
||||
tlck = txLock(tid, ip, mp, tlckXTREE | tlckRELINK);
|
||||
|
||||
/* the page may already have been tlock'd */
|
||||
|
||||
p->header.prev = cpu_to_le64(prevbn);
|
||||
|
||||
XT_PUTPAGE(mp);
|
||||
}
|
||||
|
||||
/* update next pointer of the previous page */
|
||||
if (prevbn != 0) {
|
||||
XT_GETPAGE(ip, prevbn, mp, PSIZE, p, rc);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/*
|
||||
* acquire a transaction lock on the page;
|
||||
*
|
||||
* action: update next pointer;
|
||||
*/
|
||||
BT_MARK_DIRTY(mp, ip);
|
||||
tlck = txLock(tid, ip, mp, tlckXTREE | tlckRELINK);
|
||||
|
||||
/* the page may already have been tlock'd */
|
||||
|
||||
p->header.next = le64_to_cpu(nextbn);
|
||||
|
||||
XT_PUTPAGE(mp);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* _STILL_TO_PORT */
|
||||
|
||||
|
||||
/*
|
||||
* xtInitRoot()
|
||||
|
|
|
@ -95,10 +95,6 @@ extern int xtInsert(tid_t tid, struct inode *ip,
|
|||
int xflag, s64 xoff, int xlen, s64 * xaddrp, int flag);
|
||||
extern int xtExtend(tid_t tid, struct inode *ip, s64 xoff, int xlen,
|
||||
int flag);
|
||||
#ifdef _NOTYET
|
||||
extern int xtTailgate(tid_t tid, struct inode *ip,
|
||||
s64 xoff, int xlen, s64 xaddr, int flag);
|
||||
#endif
|
||||
extern int xtUpdate(tid_t tid, struct inode *ip, struct xad *nxad);
|
||||
extern int xtDelete(tid_t tid, struct inode *ip, s64 xoff, int xlen,
|
||||
int flag);
|
||||
|
|
Loading…
Reference in New Issue