bcache: Convert debug code to btree_keys
More work to disentangle various code from struct btree Signed-off-by: Kent Overstreet <kmo@daterainc.com>
This commit is contained in:
parent
c052dd9a26
commit
dc9d98d621
|
@ -7,11 +7,121 @@
|
|||
|
||||
#include "bcache.h"
|
||||
#include "btree.h"
|
||||
#include "debug.h"
|
||||
|
||||
#include <linux/console.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/prefetch.h>
|
||||
|
||||
#ifdef CONFIG_BCACHE_DEBUG
|
||||
|
||||
void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set)
|
||||
{
|
||||
struct bkey *k, *next;
|
||||
|
||||
for (k = i->start; k < bset_bkey_last(i); k = next) {
|
||||
next = bkey_next(k);
|
||||
|
||||
printk(KERN_ERR "block %u key %zi/%u: ", set,
|
||||
(uint64_t *) k - i->d, i->keys);
|
||||
|
||||
if (b->ops->key_dump)
|
||||
b->ops->key_dump(b, k);
|
||||
else
|
||||
printk("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k));
|
||||
|
||||
if (next < bset_bkey_last(i) &&
|
||||
bkey_cmp(k, b->ops->is_extents ?
|
||||
&START_KEY(next) : next) > 0)
|
||||
printk(KERN_ERR "Key skipped backwards\n");
|
||||
}
|
||||
}
|
||||
|
||||
void bch_dump_bucket(struct btree_keys *b)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
console_lock();
|
||||
for (i = 0; i <= b->nsets; i++)
|
||||
bch_dump_bset(b, b->set[i].data,
|
||||
bset_sector_offset(b, b->set[i].data));
|
||||
console_unlock();
|
||||
}
|
||||
|
||||
int __bch_count_data(struct btree_keys *b)
|
||||
{
|
||||
unsigned ret = 0;
|
||||
struct btree_iter iter;
|
||||
struct bkey *k;
|
||||
|
||||
if (b->ops->is_extents)
|
||||
for_each_key(b, k, &iter)
|
||||
ret += KEY_SIZE(k);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void __bch_check_keys(struct btree_keys *b, const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
struct bkey *k, *p = NULL;
|
||||
struct btree_iter iter;
|
||||
const char *err;
|
||||
|
||||
for_each_key(b, k, &iter) {
|
||||
if (b->ops->is_extents) {
|
||||
err = "Keys out of order";
|
||||
if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0)
|
||||
goto bug;
|
||||
|
||||
if (bch_ptr_invalid(b, k))
|
||||
continue;
|
||||
|
||||
err = "Overlapping keys";
|
||||
if (p && bkey_cmp(p, &START_KEY(k)) > 0)
|
||||
goto bug;
|
||||
} else {
|
||||
if (bch_ptr_bad(b, k))
|
||||
continue;
|
||||
|
||||
err = "Duplicate keys";
|
||||
if (p && !bkey_cmp(p, k))
|
||||
goto bug;
|
||||
}
|
||||
p = k;
|
||||
}
|
||||
#if 0
|
||||
err = "Key larger than btree node key";
|
||||
if (p && bkey_cmp(p, &b->key) > 0)
|
||||
goto bug;
|
||||
#endif
|
||||
return;
|
||||
bug:
|
||||
bch_dump_bucket(b);
|
||||
|
||||
va_start(args, fmt);
|
||||
vprintk(fmt, args);
|
||||
va_end(args);
|
||||
|
||||
panic("bch_check_keys error: %s:\n", err);
|
||||
}
|
||||
|
||||
static void bch_btree_iter_next_check(struct btree_iter *iter)
|
||||
{
|
||||
struct bkey *k = iter->data->k, *next = bkey_next(k);
|
||||
|
||||
if (next < iter->data->end &&
|
||||
bkey_cmp(k, iter->b->ops->is_extents ?
|
||||
&START_KEY(next) : next) > 0) {
|
||||
bch_dump_bucket(iter->b);
|
||||
panic("Key skipped backwards\n");
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline void bch_btree_iter_next_check(struct btree_iter *iter) {}
|
||||
|
||||
#endif
|
||||
|
||||
/* Keylists */
|
||||
|
||||
int __bch_keylist_realloc(struct keylist *l, unsigned u64s)
|
||||
|
@ -1045,7 +1155,7 @@ void bch_btree_sort_partial(struct btree *b, unsigned start,
|
|||
{
|
||||
size_t order = b->keys.page_order, keys = 0;
|
||||
struct btree_iter iter;
|
||||
int oldsize = bch_count_data(b);
|
||||
int oldsize = bch_count_data(&b->keys);
|
||||
|
||||
__bch_btree_iter_init(&b->keys, &iter, NULL, &b->keys.set[start]);
|
||||
|
||||
|
@ -1063,7 +1173,8 @@ void bch_btree_sort_partial(struct btree *b, unsigned start,
|
|||
|
||||
__btree_sort(&b->keys, &iter, start, order, false, state);
|
||||
|
||||
EBUG_ON(b->written && oldsize >= 0 && bch_count_data(b) != oldsize);
|
||||
EBUG_ON(b->written && oldsize >= 0 &&
|
||||
bch_count_data(&b->keys) != oldsize);
|
||||
}
|
||||
EXPORT_SYMBOL(bch_btree_sort_partial);
|
||||
|
||||
|
|
|
@ -193,6 +193,8 @@ struct btree_keys_ops {
|
|||
bool (*key_bad)(struct btree_keys *, const struct bkey *);
|
||||
bool (*key_merge)(struct btree_keys *,
|
||||
struct bkey *, struct bkey *);
|
||||
void (*key_to_text)(char *, size_t, const struct bkey *);
|
||||
void (*key_dump)(struct btree_keys *, const struct bkey *);
|
||||
|
||||
/*
|
||||
* Only used for deciding whether to use START_KEY(k) or just the key
|
||||
|
@ -243,15 +245,6 @@ static inline unsigned bset_sector_offset(struct btree_keys *b, struct bset *i)
|
|||
return bset_byte_offset(b, i) >> 9;
|
||||
}
|
||||
|
||||
static inline bool btree_keys_expensive_checks(struct btree_keys *b)
|
||||
{
|
||||
#ifdef CONFIG_BCACHE_DEBUG
|
||||
return *b->expensive_debug_checks;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
#define __set_bytes(i, k) (sizeof(*(i)) + (k) * sizeof(uint64_t))
|
||||
#define set_bytes(i) __set_bytes(i, i->keys)
|
||||
|
||||
|
@ -446,6 +439,12 @@ static inline bool bch_ptr_bad(struct btree_keys *b, const struct bkey *k)
|
|||
return b->ops->key_bad(b, k);
|
||||
}
|
||||
|
||||
static inline void bch_bkey_to_text(struct btree_keys *b, char *buf,
|
||||
size_t size, const struct bkey *k)
|
||||
{
|
||||
return b->ops->key_to_text(buf, size, k);
|
||||
}
|
||||
|
||||
/* Keylists */
|
||||
|
||||
struct keylist {
|
||||
|
@ -509,7 +508,42 @@ struct bkey *bch_keylist_pop(struct keylist *);
|
|||
void bch_keylist_pop_front(struct keylist *);
|
||||
int __bch_keylist_realloc(struct keylist *, unsigned);
|
||||
|
||||
struct cache_set;
|
||||
const char *bch_ptr_status(struct cache_set *, const struct bkey *);
|
||||
/* Debug stuff */
|
||||
|
||||
#ifdef CONFIG_BCACHE_DEBUG
|
||||
|
||||
int __bch_count_data(struct btree_keys *);
|
||||
void __bch_check_keys(struct btree_keys *, const char *, ...);
|
||||
void bch_dump_bset(struct btree_keys *, struct bset *, unsigned);
|
||||
void bch_dump_bucket(struct btree_keys *);
|
||||
|
||||
#else
|
||||
|
||||
static inline int __bch_count_data(struct btree_keys *b) { return -1; }
|
||||
static inline void __bch_check_keys(struct btree_keys *b, const char *fmt, ...) {}
|
||||
static inline void bch_dump_bucket(struct btree_keys *b) {}
|
||||
void bch_dump_bset(struct btree_keys *, struct bset *, unsigned);
|
||||
|
||||
#endif
|
||||
|
||||
static inline bool btree_keys_expensive_checks(struct btree_keys *b)
|
||||
{
|
||||
#ifdef CONFIG_BCACHE_DEBUG
|
||||
return *b->expensive_debug_checks;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int bch_count_data(struct btree_keys *b)
|
||||
{
|
||||
return btree_keys_expensive_checks(b) ? __bch_count_data(b) : -1;
|
||||
}
|
||||
|
||||
#define bch_check_keys(b, ...) \
|
||||
do { \
|
||||
if (btree_keys_expensive_checks(b)) \
|
||||
__bch_check_keys(b, __VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -460,7 +460,7 @@ void bch_btree_node_write(struct btree *b, struct closure *parent)
|
|||
BUG_ON(b->written >= btree_blocks(b));
|
||||
BUG_ON(b->written && !i->keys);
|
||||
BUG_ON(btree_bset_first(b)->seq != i->seq);
|
||||
bch_check_keys(b, "writing");
|
||||
bch_check_keys(&b->keys, "writing");
|
||||
|
||||
cancel_delayed_work(&b->work);
|
||||
|
||||
|
@ -2007,7 +2007,7 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op,
|
|||
insert: bch_bset_insert(&b->keys, m, k);
|
||||
copy: bkey_copy(m, k);
|
||||
merged:
|
||||
bch_check_keys(b, "%u for %s", status,
|
||||
bch_check_keys(&b->keys, "%u for %s", status,
|
||||
replace_key ? "replace" : "insert");
|
||||
|
||||
if (b->level && !KEY_OFFSET(k))
|
||||
|
@ -2036,7 +2036,7 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
|
|||
struct bkey *replace_key)
|
||||
{
|
||||
bool ret = false;
|
||||
int oldsize = bch_count_data(b);
|
||||
int oldsize = bch_count_data(&b->keys);
|
||||
|
||||
while (!bch_keylist_empty(insert_keys)) {
|
||||
struct bkey *k = insert_keys->keys;
|
||||
|
@ -2066,7 +2066,7 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
|
|||
|
||||
BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
|
||||
|
||||
BUG_ON(bch_count_data(b) < oldsize);
|
||||
BUG_ON(bch_count_data(&b->keys) < oldsize);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include "bcache.h"
|
||||
#include "btree.h"
|
||||
#include "debug.h"
|
||||
#include "extents.h"
|
||||
|
||||
#include <linux/console.h>
|
||||
#include <linux/debugfs.h>
|
||||
|
@ -17,108 +18,8 @@
|
|||
|
||||
static struct dentry *debug;
|
||||
|
||||
const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < KEY_PTRS(k); i++)
|
||||
if (ptr_available(c, k, i)) {
|
||||
struct cache *ca = PTR_CACHE(c, k, i);
|
||||
size_t bucket = PTR_BUCKET_NR(c, k, i);
|
||||
size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
|
||||
|
||||
if (KEY_SIZE(k) + r > c->sb.bucket_size)
|
||||
return "bad, length too big";
|
||||
if (bucket < ca->sb.first_bucket)
|
||||
return "bad, short offset";
|
||||
if (bucket >= ca->sb.nbuckets)
|
||||
return "bad, offset past end of device";
|
||||
if (ptr_stale(c, k, i))
|
||||
return "stale";
|
||||
}
|
||||
|
||||
if (!bkey_cmp(k, &ZERO_KEY))
|
||||
return "bad, null key";
|
||||
if (!KEY_PTRS(k))
|
||||
return "bad, no pointers";
|
||||
if (!KEY_SIZE(k))
|
||||
return "zeroed key";
|
||||
return "";
|
||||
}
|
||||
|
||||
int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k)
|
||||
{
|
||||
unsigned i = 0;
|
||||
char *out = buf, *end = buf + size;
|
||||
|
||||
#define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
|
||||
|
||||
p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_START(k), KEY_SIZE(k));
|
||||
|
||||
for (i = 0; i < KEY_PTRS(k); i++) {
|
||||
if (i)
|
||||
p(", ");
|
||||
|
||||
if (PTR_DEV(k, i) == PTR_CHECK_DEV)
|
||||
p("check dev");
|
||||
else
|
||||
p("%llu:%llu gen %llu", PTR_DEV(k, i),
|
||||
PTR_OFFSET(k, i), PTR_GEN(k, i));
|
||||
}
|
||||
|
||||
p("]");
|
||||
|
||||
if (KEY_DIRTY(k))
|
||||
p(" dirty");
|
||||
if (KEY_CSUM(k))
|
||||
p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]);
|
||||
#undef p
|
||||
return out - buf;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BCACHE_DEBUG
|
||||
|
||||
static void dump_bset(struct btree *b, struct bset *i, unsigned set)
|
||||
{
|
||||
struct bkey *k, *next;
|
||||
unsigned j;
|
||||
char buf[80];
|
||||
|
||||
for (k = i->start; k < bset_bkey_last(i); k = next) {
|
||||
next = bkey_next(k);
|
||||
|
||||
bch_bkey_to_text(buf, sizeof(buf), k);
|
||||
printk(KERN_ERR "b %u k %zi/%u: %s", set,
|
||||
(uint64_t *) k - i->d, i->keys, buf);
|
||||
|
||||
for (j = 0; j < KEY_PTRS(k); j++) {
|
||||
size_t n = PTR_BUCKET_NR(b->c, k, j);
|
||||
printk(" bucket %zu", n);
|
||||
|
||||
if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
|
||||
printk(" prio %i",
|
||||
PTR_BUCKET(b->c, k, j)->prio);
|
||||
}
|
||||
|
||||
printk(" %s\n", bch_ptr_status(b->c, k));
|
||||
|
||||
if (next < bset_bkey_last(i) &&
|
||||
bkey_cmp(k, !b->level ? &START_KEY(next) : next) > 0)
|
||||
printk(KERN_ERR "Key skipped backwards\n");
|
||||
}
|
||||
}
|
||||
|
||||
static void bch_dump_bucket(struct btree *b)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
console_lock();
|
||||
for (i = 0; i <= b->keys.nsets; i++)
|
||||
dump_bset(b, b->keys.set[i].data,
|
||||
bset_block_offset(b, b->keys.set[i].data));
|
||||
console_unlock();
|
||||
}
|
||||
|
||||
#define for_each_written_bset(b, start, i) \
|
||||
for (i = (start); \
|
||||
(void *) i < (void *) (start) + (KEY_SIZE(&b->key) << 9) &&\
|
||||
|
@ -171,17 +72,17 @@ void bch_btree_verify(struct btree *b)
|
|||
console_lock();
|
||||
|
||||
printk(KERN_ERR "*** in memory:\n");
|
||||
dump_bset(b, inmemory, 0);
|
||||
bch_dump_bset(&b->keys, inmemory, 0);
|
||||
|
||||
printk(KERN_ERR "*** read back in:\n");
|
||||
dump_bset(v, sorted, 0);
|
||||
bch_dump_bset(&v->keys, sorted, 0);
|
||||
|
||||
for_each_written_bset(b, ondisk, i) {
|
||||
unsigned block = ((void *) i - (void *) ondisk) /
|
||||
block_bytes(b->c);
|
||||
|
||||
printk(KERN_ERR "*** on disk block %u:\n", block);
|
||||
dump_bset(b, i, block);
|
||||
bch_dump_bset(&b->keys, i, block);
|
||||
}
|
||||
|
||||
printk(KERN_ERR "*** block %zu not written\n",
|
||||
|
@ -239,76 +140,6 @@ out_put:
|
|||
bio_put(check);
|
||||
}
|
||||
|
||||
int __bch_count_data(struct btree *b)
|
||||
{
|
||||
unsigned ret = 0;
|
||||
struct btree_iter iter;
|
||||
struct bkey *k;
|
||||
|
||||
if (!b->level)
|
||||
for_each_key(&b->keys, k, &iter)
|
||||
ret += KEY_SIZE(k);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void __bch_check_keys(struct btree *b, const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
struct bkey *k, *p = NULL;
|
||||
struct btree_iter iter;
|
||||
const char *err;
|
||||
|
||||
for_each_key(&b->keys, k, &iter) {
|
||||
if (!b->level) {
|
||||
err = "Keys out of order";
|
||||
if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0)
|
||||
goto bug;
|
||||
|
||||
if (bch_ptr_invalid(&b->keys, k))
|
||||
continue;
|
||||
|
||||
err = "Overlapping keys";
|
||||
if (p && bkey_cmp(p, &START_KEY(k)) > 0)
|
||||
goto bug;
|
||||
} else {
|
||||
if (bch_ptr_bad(&b->keys, k))
|
||||
continue;
|
||||
|
||||
err = "Duplicate keys";
|
||||
if (p && !bkey_cmp(p, k))
|
||||
goto bug;
|
||||
}
|
||||
p = k;
|
||||
}
|
||||
|
||||
err = "Key larger than btree node key";
|
||||
if (p && bkey_cmp(p, &b->key) > 0)
|
||||
goto bug;
|
||||
|
||||
return;
|
||||
bug:
|
||||
bch_dump_bucket(b);
|
||||
|
||||
va_start(args, fmt);
|
||||
vprintk(fmt, args);
|
||||
va_end(args);
|
||||
|
||||
panic("bcache error: %s:\n", err);
|
||||
}
|
||||
|
||||
void bch_btree_iter_next_check(struct btree_iter *iter)
|
||||
{
|
||||
#if 0
|
||||
struct bkey *k = iter->data->k, *next = bkey_next(k);
|
||||
|
||||
if (next < iter->data->end &&
|
||||
bkey_cmp(k, iter->b->level ? next : &START_KEY(next)) > 0) {
|
||||
bch_dump_bucket(iter->b);
|
||||
panic("Key skipped backwards\n");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
@ -355,7 +186,7 @@ static ssize_t bch_dump_read(struct file *file, char __user *buf,
|
|||
if (!w)
|
||||
break;
|
||||
|
||||
bch_bkey_to_text(kbuf, sizeof(kbuf), &w->key);
|
||||
bch_extent_to_text(kbuf, sizeof(kbuf), &w->key);
|
||||
i->bytes = snprintf(i->buf, PAGE_SIZE, "%s\n", kbuf);
|
||||
bch_keybuf_del(&i->keys, w);
|
||||
}
|
||||
|
|
|
@ -1,19 +1,15 @@
|
|||
#ifndef _BCACHE_DEBUG_H
|
||||
#define _BCACHE_DEBUG_H
|
||||
|
||||
/* Btree/bkey debug printing */
|
||||
|
||||
int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k);
|
||||
struct bio;
|
||||
struct cached_dev;
|
||||
struct cache_set;
|
||||
|
||||
#ifdef CONFIG_BCACHE_DEBUG
|
||||
|
||||
void bch_btree_verify(struct btree *);
|
||||
void bch_data_verify(struct cached_dev *, struct bio *);
|
||||
int __bch_count_data(struct btree *);
|
||||
void __bch_check_keys(struct btree *, const char *, ...);
|
||||
void bch_btree_iter_next_check(struct btree_iter *);
|
||||
|
||||
#define EBUG_ON(cond) BUG_ON(cond)
|
||||
#define expensive_debug_checks(c) ((c)->expensive_debug_checks)
|
||||
#define key_merging_disabled(c) ((c)->key_merging_disabled)
|
||||
#define bypass_torture_test(d) ((d)->bypass_torture_test)
|
||||
|
@ -22,26 +18,13 @@ void bch_btree_iter_next_check(struct btree_iter *);
|
|||
|
||||
static inline void bch_btree_verify(struct btree *b) {}
|
||||
static inline void bch_data_verify(struct cached_dev *dc, struct bio *bio) {}
|
||||
static inline int __bch_count_data(struct btree *b) { return -1; }
|
||||
static inline void __bch_check_keys(struct btree *b, const char *fmt, ...) {}
|
||||
static inline void bch_btree_iter_next_check(struct btree_iter *iter) {}
|
||||
|
||||
#define EBUG_ON(cond) do { if (cond); } while (0)
|
||||
#define expensive_debug_checks(c) 0
|
||||
#define key_merging_disabled(c) 0
|
||||
#define bypass_torture_test(d) 0
|
||||
|
||||
#endif
|
||||
|
||||
#define bch_count_data(b) \
|
||||
(expensive_debug_checks((b)->c) ? __bch_count_data(b) : -1)
|
||||
|
||||
#define bch_check_keys(b, ...) \
|
||||
do { \
|
||||
if (expensive_debug_checks((b)->c)) \
|
||||
__bch_check_keys(b, __VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
void bch_debug_init_cache_set(struct cache_set *);
|
||||
#else
|
||||
|
|
|
@ -62,6 +62,87 @@ static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
|
|||
return false;
|
||||
}
|
||||
|
||||
/* Common among btree and extent ptrs */
|
||||
|
||||
static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < KEY_PTRS(k); i++)
|
||||
if (ptr_available(c, k, i)) {
|
||||
struct cache *ca = PTR_CACHE(c, k, i);
|
||||
size_t bucket = PTR_BUCKET_NR(c, k, i);
|
||||
size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
|
||||
|
||||
if (KEY_SIZE(k) + r > c->sb.bucket_size)
|
||||
return "bad, length too big";
|
||||
if (bucket < ca->sb.first_bucket)
|
||||
return "bad, short offset";
|
||||
if (bucket >= ca->sb.nbuckets)
|
||||
return "bad, offset past end of device";
|
||||
if (ptr_stale(c, k, i))
|
||||
return "stale";
|
||||
}
|
||||
|
||||
if (!bkey_cmp(k, &ZERO_KEY))
|
||||
return "bad, null key";
|
||||
if (!KEY_PTRS(k))
|
||||
return "bad, no pointers";
|
||||
if (!KEY_SIZE(k))
|
||||
return "zeroed key";
|
||||
return "";
|
||||
}
|
||||
|
||||
void bch_extent_to_text(char *buf, size_t size, const struct bkey *k)
|
||||
{
|
||||
unsigned i = 0;
|
||||
char *out = buf, *end = buf + size;
|
||||
|
||||
#define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
|
||||
|
||||
p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_START(k), KEY_SIZE(k));
|
||||
|
||||
for (i = 0; i < KEY_PTRS(k); i++) {
|
||||
if (i)
|
||||
p(", ");
|
||||
|
||||
if (PTR_DEV(k, i) == PTR_CHECK_DEV)
|
||||
p("check dev");
|
||||
else
|
||||
p("%llu:%llu gen %llu", PTR_DEV(k, i),
|
||||
PTR_OFFSET(k, i), PTR_GEN(k, i));
|
||||
}
|
||||
|
||||
p("]");
|
||||
|
||||
if (KEY_DIRTY(k))
|
||||
p(" dirty");
|
||||
if (KEY_CSUM(k))
|
||||
p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]);
|
||||
#undef p
|
||||
}
|
||||
|
||||
static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k)
|
||||
{
|
||||
struct btree *b = container_of(keys, struct btree, keys);
|
||||
unsigned j;
|
||||
char buf[80];
|
||||
|
||||
bch_extent_to_text(buf, sizeof(buf), k);
|
||||
printk(" %s", buf);
|
||||
|
||||
for (j = 0; j < KEY_PTRS(k); j++) {
|
||||
size_t n = PTR_BUCKET_NR(b->c, k, j);
|
||||
printk(" bucket %zu", n);
|
||||
|
||||
if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
|
||||
printk(" prio %i",
|
||||
PTR_BUCKET(b->c, k, j)->prio);
|
||||
}
|
||||
|
||||
printk(" %s\n", bch_ptr_status(b->c, k));
|
||||
}
|
||||
|
||||
/* Btree ptrs */
|
||||
|
||||
bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k)
|
||||
|
@ -76,7 +157,7 @@ bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k)
|
|||
|
||||
return false;
|
||||
bad:
|
||||
bch_bkey_to_text(buf, sizeof(buf), k);
|
||||
bch_extent_to_text(buf, sizeof(buf), k);
|
||||
cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k));
|
||||
return true;
|
||||
}
|
||||
|
@ -111,7 +192,7 @@ static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k)
|
|||
return false;
|
||||
err:
|
||||
mutex_unlock(&b->c->bucket_lock);
|
||||
bch_bkey_to_text(buf, sizeof(buf), k);
|
||||
bch_extent_to_text(buf, sizeof(buf), k);
|
||||
btree_bug(b,
|
||||
"inconsistent btree pointer %s: bucket %li pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
|
||||
buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
|
||||
|
@ -145,6 +226,8 @@ const struct btree_keys_ops bch_btree_keys_ops = {
|
|||
.sort_cmp = bch_key_sort_cmp,
|
||||
.key_invalid = bch_btree_ptr_invalid,
|
||||
.key_bad = bch_btree_ptr_bad,
|
||||
.key_to_text = bch_extent_to_text,
|
||||
.key_dump = bch_bkey_dump,
|
||||
};
|
||||
|
||||
/* Extents */
|
||||
|
@ -227,7 +310,7 @@ static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k)
|
|||
|
||||
return false;
|
||||
bad:
|
||||
bch_bkey_to_text(buf, sizeof(buf), k);
|
||||
bch_extent_to_text(buf, sizeof(buf), k);
|
||||
cache_bug(b->c, "spotted extent %s: %s", buf, bch_ptr_status(b->c, k));
|
||||
return true;
|
||||
}
|
||||
|
@ -254,7 +337,7 @@ static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k,
|
|||
return false;
|
||||
err:
|
||||
mutex_unlock(&b->c->bucket_lock);
|
||||
bch_bkey_to_text(buf, sizeof(buf), k);
|
||||
bch_extent_to_text(buf, sizeof(buf), k);
|
||||
btree_bug(b,
|
||||
"inconsistent extent pointer %s:\nbucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
|
||||
buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin),
|
||||
|
@ -355,5 +438,7 @@ const struct btree_keys_ops bch_extent_keys_ops = {
|
|||
.key_invalid = bch_extent_invalid,
|
||||
.key_bad = bch_extent_bad,
|
||||
.key_merge = bch_extent_merge,
|
||||
.key_to_text = bch_extent_to_text,
|
||||
.key_dump = bch_bkey_dump,
|
||||
.is_extents = true,
|
||||
};
|
||||
|
|
|
@ -7,6 +7,7 @@ extern const struct btree_keys_ops bch_extent_keys_ops;
|
|||
struct bkey;
|
||||
struct cache_set;
|
||||
|
||||
void bch_extent_to_text(char *, size_t, const struct bkey *);
|
||||
bool __bch_btree_ptr_invalid(struct cache_set *, const struct bkey *);
|
||||
|
||||
#endif /* _BCACHE_EXTENTS_H */
|
||||
|
|
|
@ -384,7 +384,7 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
|
|||
break;
|
||||
}
|
||||
|
||||
bch_bkey_to_text(buf, sizeof(buf), k);
|
||||
bch_extent_to_text(buf, sizeof(buf), k);
|
||||
pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read", buf);
|
||||
|
||||
for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
|
||||
|
|
|
@ -18,11 +18,13 @@ struct closure;
|
|||
|
||||
#ifdef CONFIG_BCACHE_DEBUG
|
||||
|
||||
#define EBUG_ON(cond) BUG_ON(cond)
|
||||
#define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0)
|
||||
#define atomic_inc_bug(v, i) BUG_ON(atomic_inc_return(v) <= i)
|
||||
|
||||
#else /* DEBUG */
|
||||
|
||||
#define EBUG_ON(cond) do { if (cond); } while (0)
|
||||
#define atomic_dec_bug(v) atomic_dec(v)
|
||||
#define atomic_inc_bug(v, i) atomic_inc(v)
|
||||
|
||||
|
|
Loading…
Reference in New Issue