JFS: Code cleanup - getting rid of never-used debug code
I'm finally getting around to cleaning out debug code that I've never used. There has always been code ifdef'ed out by _JFS_DEBUG_DMAP, _JFS_DEBUG_IMAP, _JFS_DEBUG_DTREE, and _JFS_DEBUG_XTREE, which I have personally never used, and I doubt that anyone has since the design stage back in OS/2. There is also a function, xtGather, that has never been used, and I don't know why it was ever there. Signed-off-by: Dave Kleikamp <shaggy@austin.ibm.com>
This commit is contained in:
parent
f5f287738b
commit
b38a3ab3d1
|
@ -25,36 +25,6 @@
|
|||
#include "jfs_metapage.h"
|
||||
#include "jfs_debug.h"
|
||||
|
||||
/*
|
||||
* Debug code for double-checking block map
|
||||
*/
|
||||
/* #define _JFS_DEBUG_DMAP 1 */
|
||||
|
||||
#ifdef _JFS_DEBUG_DMAP
|
||||
#define DBINITMAP(size,ipbmap,results) \
|
||||
DBinitmap(size,ipbmap,results)
|
||||
#define DBALLOC(dbmap,mapsize,blkno,nblocks) \
|
||||
DBAlloc(dbmap,mapsize,blkno,nblocks)
|
||||
#define DBFREE(dbmap,mapsize,blkno,nblocks) \
|
||||
DBFree(dbmap,mapsize,blkno,nblocks)
|
||||
#define DBALLOCCK(dbmap,mapsize,blkno,nblocks) \
|
||||
DBAllocCK(dbmap,mapsize,blkno,nblocks)
|
||||
#define DBFREECK(dbmap,mapsize,blkno,nblocks) \
|
||||
DBFreeCK(dbmap,mapsize,blkno,nblocks)
|
||||
|
||||
static void DBinitmap(s64, struct inode *, u32 **);
|
||||
static void DBAlloc(uint *, s64, s64, s64);
|
||||
static void DBFree(uint *, s64, s64, s64);
|
||||
static void DBAllocCK(uint *, s64, s64, s64);
|
||||
static void DBFreeCK(uint *, s64, s64, s64);
|
||||
#else
|
||||
#define DBINITMAP(size,ipbmap,results)
|
||||
#define DBALLOC(dbmap, mapsize, blkno, nblocks)
|
||||
#define DBFREE(dbmap, mapsize, blkno, nblocks)
|
||||
#define DBALLOCCK(dbmap, mapsize, blkno, nblocks)
|
||||
#define DBFREECK(dbmap, mapsize, blkno, nblocks)
|
||||
#endif /* _JFS_DEBUG_DMAP */
|
||||
|
||||
/*
|
||||
* SERIALIZATION of the Block Allocation Map.
|
||||
*
|
||||
|
@ -242,7 +212,6 @@ int dbMount(struct inode *ipbmap)
|
|||
JFS_SBI(ipbmap->i_sb)->bmap = bmp;
|
||||
|
||||
memset(bmp->db_active, 0, sizeof(bmp->db_active));
|
||||
DBINITMAP(bmp->db_mapsize, ipbmap, &bmp->db_DBmap);
|
||||
|
||||
/*
|
||||
* allocate/initialize the bmap lock
|
||||
|
@ -407,16 +376,12 @@ int dbFree(struct inode *ip, s64 blkno, s64 nblocks)
|
|||
*/
|
||||
nb = min(rem, BPERDMAP - (blkno & (BPERDMAP - 1)));
|
||||
|
||||
DBALLOCCK(bmp->db_DBmap, bmp->db_mapsize, blkno, nb);
|
||||
|
||||
/* free the blocks. */
|
||||
if ((rc = dbFreeDmap(bmp, dp, blkno, nb))) {
|
||||
release_metapage(mp);
|
||||
IREAD_UNLOCK(ipbmap);
|
||||
return (rc);
|
||||
}
|
||||
|
||||
DBFREE(bmp->db_DBmap, bmp->db_mapsize, blkno, nb);
|
||||
}
|
||||
|
||||
/* write the last buffer. */
|
||||
|
@ -775,10 +740,6 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
|
|||
IWRITE_LOCK(ipbmap);
|
||||
|
||||
rc = dbAllocAny(bmp, nblocks, l2nb, results);
|
||||
if (rc == 0) {
|
||||
DBALLOC(bmp->db_DBmap, bmp->db_mapsize, *results,
|
||||
nblocks);
|
||||
}
|
||||
|
||||
goto write_unlock;
|
||||
}
|
||||
|
@ -836,8 +797,6 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
|
|||
!= -ENOSPC) {
|
||||
if (rc == 0) {
|
||||
*results = blkno;
|
||||
DBALLOC(bmp->db_DBmap, bmp->db_mapsize,
|
||||
*results, nblocks);
|
||||
mark_metapage_dirty(mp);
|
||||
}
|
||||
|
||||
|
@ -863,11 +822,8 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
|
|||
if ((rc =
|
||||
dbAllocNear(bmp, dp, blkno, (int) nblocks, l2nb, results))
|
||||
!= -ENOSPC) {
|
||||
if (rc == 0) {
|
||||
DBALLOC(bmp->db_DBmap, bmp->db_mapsize,
|
||||
*results, nblocks);
|
||||
if (rc == 0)
|
||||
mark_metapage_dirty(mp);
|
||||
}
|
||||
|
||||
release_metapage(mp);
|
||||
goto read_unlock;
|
||||
|
@ -878,11 +834,8 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
|
|||
*/
|
||||
if ((rc = dbAllocDmapLev(bmp, dp, (int) nblocks, l2nb, results))
|
||||
!= -ENOSPC) {
|
||||
if (rc == 0) {
|
||||
DBALLOC(bmp->db_DBmap, bmp->db_mapsize,
|
||||
*results, nblocks);
|
||||
if (rc == 0)
|
||||
mark_metapage_dirty(mp);
|
||||
}
|
||||
|
||||
release_metapage(mp);
|
||||
goto read_unlock;
|
||||
|
@ -896,13 +849,9 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
|
|||
* the same allocation group as the hint.
|
||||
*/
|
||||
IWRITE_LOCK(ipbmap);
|
||||
if ((rc = dbAllocAG(bmp, agno, nblocks, l2nb, results))
|
||||
!= -ENOSPC) {
|
||||
if (rc == 0)
|
||||
DBALLOC(bmp->db_DBmap, bmp->db_mapsize,
|
||||
*results, nblocks);
|
||||
if ((rc = dbAllocAG(bmp, agno, nblocks, l2nb, results)) != -ENOSPC)
|
||||
goto write_unlock;
|
||||
}
|
||||
|
||||
IWRITE_UNLOCK(ipbmap);
|
||||
|
||||
|
||||
|
@ -918,9 +867,6 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
|
|||
*/
|
||||
if ((rc = dbAllocAG(bmp, agno, nblocks, l2nb, results)) == -ENOSPC)
|
||||
rc = dbAllocAny(bmp, nblocks, l2nb, results);
|
||||
if (rc == 0) {
|
||||
DBALLOC(bmp->db_DBmap, bmp->db_mapsize, *results, nblocks);
|
||||
}
|
||||
|
||||
write_unlock:
|
||||
IWRITE_UNLOCK(ipbmap);
|
||||
|
@ -992,10 +938,9 @@ int dbAllocExact(struct inode *ip, s64 blkno, int nblocks)
|
|||
|
||||
IREAD_UNLOCK(ipbmap);
|
||||
|
||||
if (rc == 0) {
|
||||
DBALLOC(bmp->db_DBmap, bmp->db_mapsize, blkno, nblocks);
|
||||
if (rc == 0)
|
||||
mark_metapage_dirty(mp);
|
||||
}
|
||||
|
||||
release_metapage(mp);
|
||||
|
||||
return (rc);
|
||||
|
@ -1144,7 +1089,6 @@ static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks)
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
DBALLOCCK(bmp->db_DBmap, bmp->db_mapsize, blkno, nblocks);
|
||||
dp = (struct dmap *) mp->data;
|
||||
|
||||
/* try to allocate the blocks immediately following the
|
||||
|
@ -1155,11 +1099,9 @@ static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks)
|
|||
IREAD_UNLOCK(ipbmap);
|
||||
|
||||
/* were we successful ? */
|
||||
if (rc == 0) {
|
||||
DBALLOC(bmp->db_DBmap, bmp->db_mapsize, extblkno,
|
||||
addnblocks);
|
||||
if (rc == 0)
|
||||
write_metapage(mp);
|
||||
} else
|
||||
else
|
||||
/* we were not successful */
|
||||
release_metapage(mp);
|
||||
|
||||
|
@ -3185,16 +3127,12 @@ int dbAllocBottomUp(struct inode *ip, s64 blkno, s64 nblocks)
|
|||
*/
|
||||
nb = min(rem, BPERDMAP - (blkno & (BPERDMAP - 1)));
|
||||
|
||||
DBFREECK(bmp->db_DBmap, bmp->db_mapsize, blkno, nb);
|
||||
|
||||
/* allocate the blocks. */
|
||||
if ((rc = dbAllocDmapBU(bmp, dp, blkno, nb))) {
|
||||
release_metapage(mp);
|
||||
IREAD_UNLOCK(ipbmap);
|
||||
return (rc);
|
||||
}
|
||||
|
||||
DBALLOC(bmp->db_DBmap, bmp->db_mapsize, blkno, nb);
|
||||
}
|
||||
|
||||
/* write the last buffer. */
|
||||
|
@ -4041,223 +3979,3 @@ s64 dbMapFileSizeToMapSize(struct inode * ipbmap)
|
|||
|
||||
return (nblocks);
|
||||
}
|
||||
|
||||
|
||||
#ifdef _JFS_DEBUG_DMAP
|
||||
/*
|
||||
* DBinitmap()
|
||||
*/
|
||||
static void DBinitmap(s64 size, struct inode *ipbmap, u32 ** results)
|
||||
{
|
||||
int npages;
|
||||
u32 *dbmap, *d;
|
||||
int n;
|
||||
s64 lblkno, cur_block;
|
||||
struct dmap *dp;
|
||||
struct metapage *mp;
|
||||
|
||||
npages = size / 32768;
|
||||
npages += (size % 32768) ? 1 : 0;
|
||||
|
||||
dbmap = (u32 *) xmalloc(npages * 4096, L2PSIZE, kernel_heap);
|
||||
if (dbmap == NULL)
|
||||
BUG(); /* Not robust since this is only unused debug code */
|
||||
|
||||
for (n = 0, d = dbmap; n < npages; n++, d += 1024)
|
||||
bzero(d, 4096);
|
||||
|
||||
/* Need to initialize from disk map pages
|
||||
*/
|
||||
for (d = dbmap, cur_block = 0; cur_block < size;
|
||||
cur_block += BPERDMAP, d += LPERDMAP) {
|
||||
lblkno = BLKTODMAP(cur_block,
|
||||
JFS_SBI(ipbmap->i_sb)->bmap->
|
||||
db_l2nbperpage);
|
||||
mp = read_metapage(ipbmap, lblkno, PSIZE, 0);
|
||||
if (mp == NULL) {
|
||||
jfs_error(ipbmap->i_sb,
|
||||
"DBinitmap: could not read disk map page");
|
||||
continue;
|
||||
}
|
||||
dp = (struct dmap *) mp->data;
|
||||
|
||||
for (n = 0; n < LPERDMAP; n++)
|
||||
d[n] = le32_to_cpu(dp->wmap[n]);
|
||||
|
||||
release_metapage(mp);
|
||||
}
|
||||
|
||||
*results = dbmap;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* DBAlloc()
|
||||
*/
|
||||
void DBAlloc(uint * dbmap, s64 mapsize, s64 blkno, s64 nblocks)
|
||||
{
|
||||
int word, nb, bitno;
|
||||
u32 mask;
|
||||
|
||||
assert(blkno > 0 && blkno < mapsize);
|
||||
assert(nblocks > 0 && nblocks <= mapsize);
|
||||
|
||||
assert(blkno + nblocks <= mapsize);
|
||||
|
||||
dbmap += (blkno / 32);
|
||||
while (nblocks > 0) {
|
||||
bitno = blkno & (32 - 1);
|
||||
nb = min(nblocks, 32 - bitno);
|
||||
|
||||
mask = (0xffffffff << (32 - nb) >> bitno);
|
||||
assert((mask & *dbmap) == 0);
|
||||
*dbmap |= mask;
|
||||
|
||||
dbmap++;
|
||||
blkno += nb;
|
||||
nblocks -= nb;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* DBFree()
|
||||
*/
|
||||
static void DBFree(uint * dbmap, s64 mapsize, s64 blkno, s64 nblocks)
|
||||
{
|
||||
int word, nb, bitno;
|
||||
u32 mask;
|
||||
|
||||
assert(blkno > 0 && blkno < mapsize);
|
||||
assert(nblocks > 0 && nblocks <= mapsize);
|
||||
|
||||
assert(blkno + nblocks <= mapsize);
|
||||
|
||||
dbmap += (blkno / 32);
|
||||
while (nblocks > 0) {
|
||||
bitno = blkno & (32 - 1);
|
||||
nb = min(nblocks, 32 - bitno);
|
||||
|
||||
mask = (0xffffffff << (32 - nb) >> bitno);
|
||||
assert((mask & *dbmap) == mask);
|
||||
*dbmap &= ~mask;
|
||||
|
||||
dbmap++;
|
||||
blkno += nb;
|
||||
nblocks -= nb;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* DBAllocCK()
|
||||
*/
|
||||
static void DBAllocCK(uint * dbmap, s64 mapsize, s64 blkno, s64 nblocks)
|
||||
{
|
||||
int word, nb, bitno;
|
||||
u32 mask;
|
||||
|
||||
assert(blkno > 0 && blkno < mapsize);
|
||||
assert(nblocks > 0 && nblocks <= mapsize);
|
||||
|
||||
assert(blkno + nblocks <= mapsize);
|
||||
|
||||
dbmap += (blkno / 32);
|
||||
while (nblocks > 0) {
|
||||
bitno = blkno & (32 - 1);
|
||||
nb = min(nblocks, 32 - bitno);
|
||||
|
||||
mask = (0xffffffff << (32 - nb) >> bitno);
|
||||
assert((mask & *dbmap) == mask);
|
||||
|
||||
dbmap++;
|
||||
blkno += nb;
|
||||
nblocks -= nb;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* DBFreeCK()
|
||||
*/
|
||||
static void DBFreeCK(uint * dbmap, s64 mapsize, s64 blkno, s64 nblocks)
|
||||
{
|
||||
int word, nb, bitno;
|
||||
u32 mask;
|
||||
|
||||
assert(blkno > 0 && blkno < mapsize);
|
||||
assert(nblocks > 0 && nblocks <= mapsize);
|
||||
|
||||
assert(blkno + nblocks <= mapsize);
|
||||
|
||||
dbmap += (blkno / 32);
|
||||
while (nblocks > 0) {
|
||||
bitno = blkno & (32 - 1);
|
||||
nb = min(nblocks, 32 - bitno);
|
||||
|
||||
mask = (0xffffffff << (32 - nb) >> bitno);
|
||||
assert((mask & *dbmap) == 0);
|
||||
|
||||
dbmap++;
|
||||
blkno += nb;
|
||||
nblocks -= nb;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* dbPrtMap()
|
||||
*/
|
||||
static void dbPrtMap(struct bmap * bmp)
|
||||
{
|
||||
printk(" mapsize: %d%d\n", bmp->db_mapsize);
|
||||
printk(" nfree: %d%d\n", bmp->db_nfree);
|
||||
printk(" numag: %d\n", bmp->db_numag);
|
||||
printk(" agsize: %d%d\n", bmp->db_agsize);
|
||||
printk(" agl2size: %d\n", bmp->db_agl2size);
|
||||
printk(" agwidth: %d\n", bmp->db_agwidth);
|
||||
printk(" agstart: %d\n", bmp->db_agstart);
|
||||
printk(" agheigth: %d\n", bmp->db_agheigth);
|
||||
printk(" aglevel: %d\n", bmp->db_aglevel);
|
||||
printk(" maxlevel: %d\n", bmp->db_maxlevel);
|
||||
printk(" maxag: %d\n", bmp->db_maxag);
|
||||
printk(" agpref: %d\n", bmp->db_agpref);
|
||||
printk(" l2nbppg: %d\n", bmp->db_l2nbperpage);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* dbPrtCtl()
|
||||
*/
|
||||
static void dbPrtCtl(struct dmapctl * dcp)
|
||||
{
|
||||
int i, j, n;
|
||||
|
||||
printk(" height: %08x\n", le32_to_cpu(dcp->height));
|
||||
printk(" leafidx: %08x\n", le32_to_cpu(dcp->leafidx));
|
||||
printk(" budmin: %08x\n", dcp->budmin);
|
||||
printk(" nleafs: %08x\n", le32_to_cpu(dcp->nleafs));
|
||||
printk(" l2nleafs: %08x\n", le32_to_cpu(dcp->l2nleafs));
|
||||
|
||||
printk("\n Tree:\n");
|
||||
for (i = 0; i < CTLLEAFIND; i += 8) {
|
||||
n = min(8, CTLLEAFIND - i);
|
||||
|
||||
for (j = 0; j < n; j++)
|
||||
printf(" [%03x]: %02x", i + j,
|
||||
(char) dcp->stree[i + j]);
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
printk("\n Tree Leaves:\n");
|
||||
for (i = 0; i < LPERCTL; i += 8) {
|
||||
n = min(8, LPERCTL - i);
|
||||
|
||||
for (j = 0; j < n; j++)
|
||||
printf(" [%03x]: %02x",
|
||||
i + j,
|
||||
(char) dcp->stree[i + j + CTLLEAFIND]);
|
||||
printf("\n");
|
||||
}
|
||||
}
|
||||
#endif /* _JFS_DEBUG_DMAP */
|
||||
|
|
|
@ -4554,202 +4554,3 @@ int dtModify(tid_t tid, struct inode *ip,
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef _JFS_DEBUG_DTREE
|
||||
/*
|
||||
* dtDisplayTree()
|
||||
*
|
||||
* function: traverse forward
|
||||
*/
|
||||
int dtDisplayTree(struct inode *ip)
|
||||
{
|
||||
int rc;
|
||||
struct metapage *mp;
|
||||
dtpage_t *p;
|
||||
s64 bn, pbn;
|
||||
int index, lastindex, v, h;
|
||||
pxd_t *xd;
|
||||
struct btstack btstack;
|
||||
struct btframe *btsp;
|
||||
struct btframe *parent;
|
||||
u8 *stbl;
|
||||
int psize = 256;
|
||||
|
||||
printk("display B+-tree.\n");
|
||||
|
||||
/* clear stack */
|
||||
btsp = btstack.stack;
|
||||
|
||||
/*
|
||||
* start with root
|
||||
*
|
||||
* root resides in the inode
|
||||
*/
|
||||
bn = 0;
|
||||
v = h = 0;
|
||||
|
||||
/*
|
||||
* first access of each page:
|
||||
*/
|
||||
newPage:
|
||||
DT_GETPAGE(ip, bn, mp, psize, p, rc);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* process entries forward from first index */
|
||||
index = 0;
|
||||
lastindex = p->header.nextindex - 1;
|
||||
|
||||
if (p->header.flag & BT_INTERNAL) {
|
||||
/*
|
||||
* first access of each internal page
|
||||
*/
|
||||
printf("internal page ");
|
||||
dtDisplayPage(ip, bn, p);
|
||||
|
||||
goto getChild;
|
||||
} else { /* (p->header.flag & BT_LEAF) */
|
||||
|
||||
/*
|
||||
* first access of each leaf page
|
||||
*/
|
||||
printf("leaf page ");
|
||||
dtDisplayPage(ip, bn, p);
|
||||
|
||||
/*
|
||||
* process leaf page entries
|
||||
*
|
||||
for ( ; index <= lastindex; index++)
|
||||
{
|
||||
}
|
||||
*/
|
||||
|
||||
/* unpin the leaf page */
|
||||
DT_PUTPAGE(mp);
|
||||
}
|
||||
|
||||
/*
|
||||
* go back up to the parent page
|
||||
*/
|
||||
getParent:
|
||||
/* pop/restore parent entry for the current child page */
|
||||
if ((parent = (btsp == btstack.stack ? NULL : --btsp)) == NULL)
|
||||
/* current page must have been root */
|
||||
return;
|
||||
|
||||
/*
|
||||
* parent page scan completed
|
||||
*/
|
||||
if ((index = parent->index) == (lastindex = parent->lastindex)) {
|
||||
/* go back up to the parent page */
|
||||
goto getParent;
|
||||
}
|
||||
|
||||
/*
|
||||
* parent page has entries remaining
|
||||
*/
|
||||
/* get back the parent page */
|
||||
bn = parent->bn;
|
||||
/* v = parent->level; */
|
||||
DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* get next parent entry */
|
||||
index++;
|
||||
|
||||
/*
|
||||
* internal page: go down to child page of current entry
|
||||
*/
|
||||
getChild:
|
||||
/* push/save current parent entry for the child page */
|
||||
btsp->bn = pbn = bn;
|
||||
btsp->index = index;
|
||||
btsp->lastindex = lastindex;
|
||||
/* btsp->level = v; */
|
||||
/* btsp->node = h; */
|
||||
++btsp;
|
||||
|
||||
/* get current entry for the child page */
|
||||
stbl = DT_GETSTBL(p);
|
||||
xd = (pxd_t *) & p->slot[stbl[index]];
|
||||
|
||||
/*
|
||||
* first access of each internal entry:
|
||||
*/
|
||||
|
||||
/* get child page */
|
||||
bn = addressPXD(xd);
|
||||
psize = lengthPXD(xd) << ip->i_ipmnt->i_l2bsize;
|
||||
|
||||
printk("traverse down 0x%Lx[%d]->0x%Lx\n", pbn, index, bn);
|
||||
v++;
|
||||
h = index;
|
||||
|
||||
/* release parent page */
|
||||
DT_PUTPAGE(mp);
|
||||
|
||||
/* process the child page */
|
||||
goto newPage;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* dtDisplayPage()
|
||||
*
|
||||
* function: display page
|
||||
*/
|
||||
int dtDisplayPage(struct inode *ip, s64 bn, dtpage_t * p)
|
||||
{
|
||||
int rc;
|
||||
struct metapage *mp;
|
||||
struct ldtentry *lh;
|
||||
struct idtentry *ih;
|
||||
pxd_t *xd;
|
||||
int i, j;
|
||||
u8 *stbl;
|
||||
wchar_t name[JFS_NAME_MAX + 1];
|
||||
struct component_name key = { 0, name };
|
||||
int freepage = 0;
|
||||
|
||||
if (p == NULL) {
|
||||
freepage = 1;
|
||||
DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* display page control */
|
||||
printk("bn:0x%Lx flag:0x%08x nextindex:%d\n",
|
||||
bn, p->header.flag, p->header.nextindex);
|
||||
|
||||
/* display entries */
|
||||
stbl = DT_GETSTBL(p);
|
||||
for (i = 0, j = 1; i < p->header.nextindex; i++, j++) {
|
||||
dtGetKey(p, i, &key, JFS_SBI(ip->i_sb)->mntflag);
|
||||
key.name[key.namlen] = '\0';
|
||||
if (p->header.flag & BT_LEAF) {
|
||||
lh = (struct ldtentry *) & p->slot[stbl[i]];
|
||||
printf("\t[%d] %s:%d", i, key.name,
|
||||
le32_to_cpu(lh->inumber));
|
||||
} else {
|
||||
ih = (struct idtentry *) & p->slot[stbl[i]];
|
||||
xd = (pxd_t *) ih;
|
||||
bn = addressPXD(xd);
|
||||
printf("\t[%d] %s:0x%Lx", i, key.name, bn);
|
||||
}
|
||||
|
||||
if (j == 4) {
|
||||
printf("\n");
|
||||
j = 0;
|
||||
}
|
||||
}
|
||||
|
||||
printf("\n");
|
||||
|
||||
if (freepage)
|
||||
DT_PUTPAGE(mp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* _JFS_DEBUG_DTREE */
|
||||
|
|
|
@ -269,11 +269,4 @@ extern int dtModify(tid_t tid, struct inode *ip, struct component_name * key,
|
|||
ino_t * orig_ino, ino_t new_ino, int flag);
|
||||
|
||||
extern int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir);
|
||||
|
||||
#ifdef _JFS_DEBUG_DTREE
|
||||
extern int dtDisplayTree(struct inode *ip);
|
||||
|
||||
extern int dtDisplayPage(struct inode *ip, s64 bn, dtpage_t * p);
|
||||
#endif /* _JFS_DEBUG_DTREE */
|
||||
|
||||
#endif /* !_H_JFS_DTREE */
|
||||
|
|
|
@ -86,25 +86,6 @@ static int diIAGRead(struct inomap * imap, int, struct metapage **);
|
|||
static int copy_from_dinode(struct dinode *, struct inode *);
|
||||
static void copy_to_dinode(struct dinode *, struct inode *);
|
||||
|
||||
/*
|
||||
* debug code for double-checking inode map
|
||||
*/
|
||||
/* #define _JFS_DEBUG_IMAP 1 */
|
||||
|
||||
#ifdef _JFS_DEBUG_IMAP
|
||||
#define DBG_DIINIT(imap) DBGdiInit(imap)
|
||||
#define DBG_DIALLOC(imap, ino) DBGdiAlloc(imap, ino)
|
||||
#define DBG_DIFREE(imap, ino) DBGdiFree(imap, ino)
|
||||
|
||||
static void *DBGdiInit(struct inomap * imap);
|
||||
static void DBGdiAlloc(struct inomap * imap, ino_t ino);
|
||||
static void DBGdiFree(struct inomap * imap, ino_t ino);
|
||||
#else
|
||||
#define DBG_DIINIT(imap)
|
||||
#define DBG_DIALLOC(imap, ino)
|
||||
#define DBG_DIFREE(imap, ino)
|
||||
#endif /* _JFS_DEBUG_IMAP */
|
||||
|
||||
/*
|
||||
* NAME: diMount()
|
||||
*
|
||||
|
@ -188,8 +169,6 @@ int diMount(struct inode *ipimap)
|
|||
imap->im_ipimap = ipimap;
|
||||
JFS_IP(ipimap)->i_imap = imap;
|
||||
|
||||
// DBG_DIINIT(imap);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
|
@ -1043,7 +1022,6 @@ int diFree(struct inode *ip)
|
|||
/* update the bitmap.
|
||||
*/
|
||||
iagp->wmap[extno] = cpu_to_le32(bitmap);
|
||||
DBG_DIFREE(imap, inum);
|
||||
|
||||
/* update the free inode counts at the iag, ag and
|
||||
* map level.
|
||||
|
@ -1231,7 +1209,6 @@ int diFree(struct inode *ip)
|
|||
jfs_error(ip->i_sb, "diFree: the pmap does not show inode free");
|
||||
}
|
||||
iagp->wmap[extno] = 0;
|
||||
DBG_DIFREE(imap, inum);
|
||||
PXDlength(&iagp->inoext[extno], 0);
|
||||
PXDaddress(&iagp->inoext[extno], 0);
|
||||
|
||||
|
@ -1350,7 +1327,6 @@ diInitInode(struct inode *ip, int iagno, int ino, int extno, struct iag * iagp)
|
|||
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
|
||||
|
||||
ip->i_ino = (iagno << L2INOSPERIAG) + ino;
|
||||
DBG_DIALLOC(JFS_IP(ipimap)->i_imap, ip->i_ino);
|
||||
jfs_ip->ixpxd = iagp->inoext[extno];
|
||||
jfs_ip->agno = BLKTOAG(le64_to_cpu(iagp->agstart), sbi);
|
||||
jfs_ip->active_ag = -1;
|
||||
|
@ -3185,84 +3161,3 @@ static void copy_to_dinode(struct dinode * dip, struct inode *ip)
|
|||
if (S_ISCHR(ip->i_mode) || S_ISBLK(ip->i_mode))
|
||||
dip->di_rdev = cpu_to_le32(jfs_ip->dev);
|
||||
}
|
||||
|
||||
#ifdef _JFS_DEBUG_IMAP
|
||||
/*
|
||||
* DBGdiInit()
|
||||
*/
|
||||
static void *DBGdiInit(struct inomap * imap)
|
||||
{
|
||||
u32 *dimap;
|
||||
int size;
|
||||
size = 64 * 1024;
|
||||
if ((dimap = (u32 *) xmalloc(size, L2PSIZE, kernel_heap)) == NULL)
|
||||
assert(0);
|
||||
bzero((void *) dimap, size);
|
||||
imap->im_DBGdimap = dimap;
|
||||
}
|
||||
|
||||
/*
|
||||
* DBGdiAlloc()
|
||||
*/
|
||||
static void DBGdiAlloc(struct inomap * imap, ino_t ino)
|
||||
{
|
||||
u32 *dimap = imap->im_DBGdimap;
|
||||
int w, b;
|
||||
u32 m;
|
||||
w = ino >> 5;
|
||||
b = ino & 31;
|
||||
m = 0x80000000 >> b;
|
||||
assert(w < 64 * 256);
|
||||
if (dimap[w] & m) {
|
||||
printk("DEBUG diAlloc: duplicate alloc ino:0x%x\n", ino);
|
||||
}
|
||||
dimap[w] |= m;
|
||||
}
|
||||
|
||||
/*
|
||||
* DBGdiFree()
|
||||
*/
|
||||
static void DBGdiFree(struct inomap * imap, ino_t ino)
|
||||
{
|
||||
u32 *dimap = imap->im_DBGdimap;
|
||||
int w, b;
|
||||
u32 m;
|
||||
w = ino >> 5;
|
||||
b = ino & 31;
|
||||
m = 0x80000000 >> b;
|
||||
assert(w < 64 * 256);
|
||||
if ((dimap[w] & m) == 0) {
|
||||
printk("DEBUG diFree: duplicate free ino:0x%x\n", ino);
|
||||
}
|
||||
dimap[w] &= ~m;
|
||||
}
|
||||
|
||||
static void dump_cp(struct inomap * ipimap, char *function, int line)
|
||||
{
|
||||
printk("\n* ********* *\nControl Page %s %d\n", function, line);
|
||||
printk("FreeIAG %d\tNextIAG %d\n", ipimap->im_freeiag,
|
||||
ipimap->im_nextiag);
|
||||
printk("NumInos %d\tNumFree %d\n",
|
||||
atomic_read(&ipimap->im_numinos),
|
||||
atomic_read(&ipimap->im_numfree));
|
||||
printk("AG InoFree %d\tAG ExtFree %d\n",
|
||||
ipimap->im_agctl[0].inofree, ipimap->im_agctl[0].extfree);
|
||||
printk("AG NumInos %d\tAG NumFree %d\n",
|
||||
ipimap->im_agctl[0].numinos, ipimap->im_agctl[0].numfree);
|
||||
}
|
||||
|
||||
static void dump_iag(struct iag * iag, char *function, int line)
|
||||
{
|
||||
printk("\n* ********* *\nIAG %s %d\n", function, line);
|
||||
printk("IagNum %d\tIAG Free %d\n", le32_to_cpu(iag->iagnum),
|
||||
le32_to_cpu(iag->iagfree));
|
||||
printk("InoFreeFwd %d\tInoFreeBack %d\n",
|
||||
le32_to_cpu(iag->inofreefwd),
|
||||
le32_to_cpu(iag->inofreeback));
|
||||
printk("ExtFreeFwd %d\tExtFreeBack %d\n",
|
||||
le32_to_cpu(iag->extfreefwd),
|
||||
le32_to_cpu(iag->extfreeback));
|
||||
printk("NFreeInos %d\tNFreeExts %d\n", le32_to_cpu(iag->nfreeinos),
|
||||
le32_to_cpu(iag->nfreeexts));
|
||||
}
|
||||
#endif /* _JFS_DEBUG_IMAP */
|
||||
|
|
|
@ -135,14 +135,6 @@ static int xtSearchNode(struct inode *ip,
|
|||
static int xtRelink(tid_t tid, struct inode *ip, xtpage_t * fp);
|
||||
#endif /* _STILL_TO_PORT */
|
||||
|
||||
/* External references */
|
||||
|
||||
/*
|
||||
* debug control
|
||||
*/
|
||||
/* #define _JFS_DEBUG_XTREE 1 */
|
||||
|
||||
|
||||
/*
|
||||
* xtLookup()
|
||||
*
|
||||
|
@ -4140,338 +4132,6 @@ s64 xtTruncate_pmap(tid_t tid, struct inode *ip, s64 committed_size)
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#ifdef _JFS_DEBUG_XTREE
|
||||
/*
|
||||
* xtDisplayTree()
|
||||
*
|
||||
* function: traverse forward
|
||||
*/
|
||||
int xtDisplayTree(struct inode *ip)
|
||||
{
|
||||
int rc = 0;
|
||||
struct metapage *mp;
|
||||
xtpage_t *p;
|
||||
s64 bn, pbn;
|
||||
int index, lastindex, v, h;
|
||||
xad_t *xad;
|
||||
struct btstack btstack;
|
||||
struct btframe *btsp;
|
||||
struct btframe *parent;
|
||||
|
||||
printk("display B+-tree.\n");
|
||||
|
||||
/* clear stack */
|
||||
btsp = btstack.stack;
|
||||
|
||||
/*
|
||||
* start with root
|
||||
*
|
||||
* root resides in the inode
|
||||
*/
|
||||
bn = 0;
|
||||
v = h = 0;
|
||||
|
||||
/*
|
||||
* first access of each page:
|
||||
*/
|
||||
getPage:
|
||||
XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* process entries forward from first index */
|
||||
index = XTENTRYSTART;
|
||||
lastindex = le16_to_cpu(p->header.nextindex) - 1;
|
||||
|
||||
if (p->header.flag & BT_INTERNAL) {
|
||||
/*
|
||||
* first access of each internal page
|
||||
*/
|
||||
goto getChild;
|
||||
} else { /* (p->header.flag & BT_LEAF) */
|
||||
|
||||
/*
|
||||
* first access of each leaf page
|
||||
*/
|
||||
printf("leaf page ");
|
||||
xtDisplayPage(ip, bn, p);
|
||||
|
||||
/* unpin the leaf page */
|
||||
XT_PUTPAGE(mp);
|
||||
}
|
||||
|
||||
/*
|
||||
* go back up to the parent page
|
||||
*/
|
||||
getParent:
|
||||
/* pop/restore parent entry for the current child page */
|
||||
if ((parent = (btsp == btstack.stack ? NULL : --btsp)) == NULL)
|
||||
/* current page must have been root */
|
||||
return;
|
||||
|
||||
/*
|
||||
* parent page scan completed
|
||||
*/
|
||||
if ((index = parent->index) == (lastindex = parent->lastindex)) {
|
||||
/* go back up to the parent page */
|
||||
goto getParent;
|
||||
}
|
||||
|
||||
/*
|
||||
* parent page has entries remaining
|
||||
*/
|
||||
/* get back the parent page */
|
||||
bn = parent->bn;
|
||||
/* v = parent->level; */
|
||||
XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* get next parent entry */
|
||||
index++;
|
||||
|
||||
/*
|
||||
* internal page: go down to child page of current entry
|
||||
*/
|
||||
getChild:
|
||||
/* push/save current parent entry for the child page */
|
||||
btsp->bn = pbn = bn;
|
||||
btsp->index = index;
|
||||
btsp->lastindex = lastindex;
|
||||
/* btsp->level = v; */
|
||||
/* btsp->node = h; */
|
||||
++btsp;
|
||||
|
||||
/* get child page */
|
||||
xad = &p->xad[index];
|
||||
bn = addressXAD(xad);
|
||||
|
||||
/*
|
||||
* first access of each internal entry:
|
||||
*/
|
||||
/* release parent page */
|
||||
XT_PUTPAGE(mp);
|
||||
|
||||
printk("traverse down 0x%lx[%d]->0x%lx\n", (ulong) pbn, index,
|
||||
(ulong) bn);
|
||||
v++;
|
||||
h = index;
|
||||
|
||||
/* process the child page */
|
||||
goto getPage;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* xtDisplayPage()
|
||||
*
|
||||
* function: display page
|
||||
*/
|
||||
int xtDisplayPage(struct inode *ip, s64 bn, xtpage_t * p)
|
||||
{
|
||||
int rc = 0;
|
||||
xad_t *xad;
|
||||
s64 xaddr, xoff;
|
||||
int xlen, i, j;
|
||||
|
||||
/* display page control */
|
||||
printf("bn:0x%lx flag:0x%x nextindex:%d\n",
|
||||
(ulong) bn, p->header.flag,
|
||||
le16_to_cpu(p->header.nextindex));
|
||||
|
||||
/* display entries */
|
||||
xad = &p->xad[XTENTRYSTART];
|
||||
for (i = XTENTRYSTART, j = 1; i < le16_to_cpu(p->header.nextindex);
|
||||
i++, xad++, j++) {
|
||||
xoff = offsetXAD(xad);
|
||||
xaddr = addressXAD(xad);
|
||||
xlen = lengthXAD(xad);
|
||||
printf("\t[%d] 0x%lx:0x%lx(0x%x)", i, (ulong) xoff,
|
||||
(ulong) xaddr, xlen);
|
||||
|
||||
if (j == 4) {
|
||||
printf("\n");
|
||||
j = 0;
|
||||
}
|
||||
}
|
||||
|
||||
printf("\n");
|
||||
}
|
||||
#endif /* _JFS_DEBUG_XTREE */
|
||||
|
||||
|
||||
#ifdef _JFS_WIP
|
||||
/*
|
||||
* xtGather()
|
||||
*
|
||||
* function:
|
||||
* traverse for allocation acquiring tlock at commit time
|
||||
* (vs at the time of update) logging backward top down
|
||||
*
|
||||
* note:
|
||||
* problem - establishing that all new allocation have been
|
||||
* processed both for append and random write in sparse file
|
||||
* at the current entry at the current subtree root page
|
||||
*
|
||||
*/
|
||||
int xtGather(btree_t *t)
|
||||
{
|
||||
int rc = 0;
|
||||
xtpage_t *p;
|
||||
u64 bn;
|
||||
int index;
|
||||
btentry_t *e;
|
||||
struct btstack btstack;
|
||||
struct btsf *parent;
|
||||
|
||||
/* clear stack */
|
||||
BT_CLR(&btstack);
|
||||
|
||||
/*
|
||||
* start with root
|
||||
*
|
||||
* root resides in the inode
|
||||
*/
|
||||
bn = 0;
|
||||
XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* new root is NOT pointed by a new entry
|
||||
if (p->header.flag & NEW)
|
||||
allocate new page lock;
|
||||
write a NEWPAGE log;
|
||||
*/
|
||||
|
||||
dopage:
|
||||
/*
|
||||
* first access of each page:
|
||||
*/
|
||||
/* process entries backward from last index */
|
||||
index = le16_to_cpu(p->header.nextindex) - 1;
|
||||
|
||||
if (p->header.flag & BT_LEAF) {
|
||||
/*
|
||||
* first access of each leaf page
|
||||
*/
|
||||
/* process leaf page entries backward */
|
||||
for (; index >= XTENTRYSTART; index--) {
|
||||
e = &p->xad[index];
|
||||
/*
|
||||
* if newpage, log NEWPAGE.
|
||||
*
|
||||
if (e->flag & XAD_NEW) {
|
||||
nfound =+ entry->length;
|
||||
update current page lock for the entry;
|
||||
newpage(entry);
|
||||
*
|
||||
* if moved, log move.
|
||||
*
|
||||
} else if (e->flag & XAD_MOVED) {
|
||||
reset flag;
|
||||
update current page lock for the entry;
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
/* unpin the leaf page */
|
||||
XT_PUTPAGE(mp);
|
||||
|
||||
/*
|
||||
* go back up to the parent page
|
||||
*/
|
||||
getParent:
|
||||
/* restore parent entry for the current child page */
|
||||
if ((parent = BT_POP(&btstack)) == NULL)
|
||||
/* current page must have been root */
|
||||
return 0;
|
||||
|
||||
if ((index = parent->index) == XTENTRYSTART) {
|
||||
/*
|
||||
* parent page scan completed
|
||||
*/
|
||||
/* go back up to the parent page */
|
||||
goto getParent;
|
||||
} else {
|
||||
/*
|
||||
* parent page has entries remaining
|
||||
*/
|
||||
/* get back the parent page */
|
||||
bn = parent->bn;
|
||||
XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
|
||||
if (rc)
|
||||
return -EIO;
|
||||
|
||||
/* first subroot page which
|
||||
* covers all new allocated blocks
|
||||
* itself not new/modified.
|
||||
* (if modified from split of descendent,
|
||||
* go down path of split page)
|
||||
|
||||
if (nfound == nnew &&
|
||||
!(p->header.flag & (NEW | MOD)))
|
||||
exit scan;
|
||||
*/
|
||||
|
||||
/* process parent page entries backward */
|
||||
index--;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* first access of each internal page
|
||||
*/
|
||||
}
|
||||
|
||||
/*
|
||||
* internal page: go down to child page of current entry
|
||||
*/
|
||||
|
||||
/* save current parent entry for the child page */
|
||||
BT_PUSH(&btstack, bn, index);
|
||||
|
||||
/* get current entry for the child page */
|
||||
e = &p->xad[index];
|
||||
|
||||
/*
|
||||
* first access of each internal entry:
|
||||
*/
|
||||
/*
|
||||
* if new entry, log btree_tnewentry.
|
||||
*
|
||||
if (e->flag & XAD_NEW)
|
||||
update parent page lock for the entry;
|
||||
*/
|
||||
|
||||
/* release parent page */
|
||||
XT_PUTPAGE(mp);
|
||||
|
||||
/* get child page */
|
||||
bn = e->bn;
|
||||
XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/*
|
||||
* first access of each non-root page:
|
||||
*/
|
||||
/*
|
||||
* if new, log btree_newpage.
|
||||
*
|
||||
if (p->header.flag & NEW)
|
||||
allocate new page lock;
|
||||
write a NEWPAGE log (next, prev);
|
||||
*/
|
||||
|
||||
/* process the child page */
|
||||
goto dopage;
|
||||
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
#endif /* _JFS_WIP */
|
||||
|
||||
|
||||
#ifdef CONFIG_JFS_STATISTICS
|
||||
int jfs_xtstat_read(char *buffer, char **start, off_t offset, int length,
|
||||
int *eof, void *data)
|
||||
|
|
|
@ -131,10 +131,4 @@ extern int xtRelocate(tid_t tid, struct inode *ip,
|
|||
extern int xtAppend(tid_t tid,
|
||||
struct inode *ip, int xflag, s64 xoff, int maxblocks,
|
||||
int *xlenp, s64 * xaddrp, int flag);
|
||||
|
||||
#ifdef _JFS_DEBUG_XTREE
|
||||
extern int xtDisplayTree(struct inode *ip);
|
||||
extern int xtDisplayPage(struct inode *ip, s64 bn, xtpage_t * p);
|
||||
#endif /* _JFS_DEBUG_XTREE */
|
||||
|
||||
#endif /* !_H_JFS_XTREE */
|
||||
|
|
Loading…
Reference in New Issue