md: remove PRINTK and dprintk debugging and use pr_debug
Being able to dynamically enable these make them much more useful. Signed-off-by: NeilBrown <neilb@suse.de>
This commit is contained in:
parent
bdc04e6b15
commit
36a4e1fe0f
|
@ -50,14 +50,6 @@
|
|||
#define INJECT_FATAL_FAULT_3 0 /* undef */
|
||||
#endif
|
||||
|
||||
#ifndef PRINTK
|
||||
# if DEBUG > 0
|
||||
# define PRINTK(x...) printk(KERN_DEBUG x)
|
||||
# else
|
||||
# define PRINTK(x...)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
static inline char *bmname(struct bitmap *bitmap)
|
||||
{
|
||||
return bitmap->mddev ? mdname(bitmap->mddev) : "mdX";
|
||||
|
@ -78,8 +70,8 @@ static unsigned char *bitmap_alloc_page(struct bitmap *bitmap)
|
|||
if (!page)
|
||||
printk("%s: bitmap_alloc_page FAILED\n", bmname(bitmap));
|
||||
else
|
||||
PRINTK("%s: bitmap_alloc_page: allocated page at %p\n",
|
||||
bmname(bitmap), page);
|
||||
pr_debug("%s: bitmap_alloc_page: allocated page at %p\n",
|
||||
bmname(bitmap), page);
|
||||
return page;
|
||||
}
|
||||
|
||||
|
@ -88,7 +80,7 @@ static unsigned char *bitmap_alloc_page(struct bitmap *bitmap)
|
|||
*/
|
||||
static void bitmap_free_page(struct bitmap *bitmap, unsigned char *page)
|
||||
{
|
||||
PRINTK("%s: bitmap_free_page: free page %p\n", bmname(bitmap), page);
|
||||
pr_debug("%s: bitmap_free_page: free page %p\n", bmname(bitmap), page);
|
||||
kfree(page);
|
||||
}
|
||||
|
||||
|
@ -133,8 +125,8 @@ __acquires(bitmap->lock)
|
|||
spin_lock_irq(&bitmap->lock);
|
||||
|
||||
if (mappage == NULL) {
|
||||
PRINTK("%s: bitmap map page allocation failed, hijacking\n",
|
||||
bmname(bitmap));
|
||||
pr_debug("%s: bitmap map page allocation failed, hijacking\n",
|
||||
bmname(bitmap));
|
||||
/* failed - set the hijacked flag so that we can use the
|
||||
* pointer as a counter */
|
||||
if (!bitmap->bp[page].map)
|
||||
|
@ -409,8 +401,8 @@ static struct page *read_page(struct file *file, unsigned long index,
|
|||
struct buffer_head *bh;
|
||||
sector_t block;
|
||||
|
||||
PRINTK("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
|
||||
(unsigned long long)index << PAGE_SHIFT);
|
||||
pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
|
||||
(unsigned long long)index << PAGE_SHIFT);
|
||||
|
||||
page = alloc_page(GFP_KERNEL);
|
||||
if (!page)
|
||||
|
@ -920,7 +912,7 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
|
|||
else
|
||||
__set_bit_le(bit, kaddr);
|
||||
kunmap_atomic(kaddr, KM_USER0);
|
||||
PRINTK("set file bit %lu page %lu\n", bit, page->index);
|
||||
pr_debug("set file bit %lu page %lu\n", bit, page->index);
|
||||
/* record page number so it gets flushed to disk when unplug occurs */
|
||||
set_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
|
||||
}
|
||||
|
@ -1364,8 +1356,8 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
|
|||
if (bw > bitmap->behind_writes_used)
|
||||
bitmap->behind_writes_used = bw;
|
||||
|
||||
PRINTK(KERN_DEBUG "inc write-behind count %d/%d\n",
|
||||
bw, bitmap->max_write_behind);
|
||||
pr_debug("inc write-behind count %d/%lu\n",
|
||||
bw, bitmap->mddev->bitmap_info.max_write_behind);
|
||||
}
|
||||
|
||||
while (sectors) {
|
||||
|
@ -1424,8 +1416,9 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
|
|||
if (behind) {
|
||||
if (atomic_dec_and_test(&bitmap->behind_writes))
|
||||
wake_up(&bitmap->behind_wait);
|
||||
PRINTK(KERN_DEBUG "dec write-behind count %d/%d\n",
|
||||
atomic_read(&bitmap->behind_writes), bitmap->max_write_behind);
|
||||
pr_debug("dec write-behind count %d/%lu\n",
|
||||
atomic_read(&bitmap->behind_writes),
|
||||
bitmap->mddev->bitmap_info.max_write_behind);
|
||||
}
|
||||
if (bitmap->mddev->degraded)
|
||||
/* Never clear bits or update events_cleared when degraded */
|
||||
|
|
|
@ -54,9 +54,6 @@
|
|||
#include "md.h"
|
||||
#include "bitmap.h"
|
||||
|
||||
#define DEBUG 0
|
||||
#define dprintk(x...) ((void)(DEBUG && printk(x)))
|
||||
|
||||
#ifndef MODULE
|
||||
static void autostart_arrays(int part);
|
||||
#endif
|
||||
|
@ -2442,27 +2439,23 @@ repeat:
|
|||
sync_sbs(mddev, nospares);
|
||||
spin_unlock_irq(&mddev->write_lock);
|
||||
|
||||
dprintk(KERN_INFO
|
||||
"md: updating %s RAID superblock on device (in sync %d)\n",
|
||||
mdname(mddev),mddev->in_sync);
|
||||
pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
|
||||
mdname(mddev), mddev->in_sync);
|
||||
|
||||
bitmap_update_sb(mddev->bitmap);
|
||||
list_for_each_entry(rdev, &mddev->disks, same_set) {
|
||||
char b[BDEVNAME_SIZE];
|
||||
dprintk(KERN_INFO "md: ");
|
||||
|
||||
if (rdev->sb_loaded != 1)
|
||||
continue; /* no noise on spare devices */
|
||||
if (test_bit(Faulty, &rdev->flags))
|
||||
dprintk("(skipping faulty ");
|
||||
|
||||
dprintk("%s ", bdevname(rdev->bdev,b));
|
||||
if (!test_bit(Faulty, &rdev->flags)) {
|
||||
md_super_write(mddev,rdev,
|
||||
rdev->sb_start, rdev->sb_size,
|
||||
rdev->sb_page);
|
||||
dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
|
||||
bdevname(rdev->bdev,b),
|
||||
(unsigned long long)rdev->sb_start);
|
||||
pr_debug("md: (write) %s's sb offset: %llu\n",
|
||||
bdevname(rdev->bdev, b),
|
||||
(unsigned long long)rdev->sb_start);
|
||||
rdev->sb_events = mddev->events;
|
||||
if (rdev->badblocks.size) {
|
||||
md_super_write(mddev, rdev,
|
||||
|
@ -2473,7 +2466,8 @@ repeat:
|
|||
}
|
||||
|
||||
} else
|
||||
dprintk(")\n");
|
||||
pr_debug("md: %s (skipping faulty)\n",
|
||||
bdevname(rdev->bdev, b));
|
||||
if (mddev->level == LEVEL_MULTIPATH)
|
||||
/* only need to write one superblock... */
|
||||
break;
|
||||
|
@ -6408,7 +6402,7 @@ static int md_thread(void * arg)
|
|||
void md_wakeup_thread(mdk_thread_t *thread)
|
||||
{
|
||||
if (thread) {
|
||||
dprintk("md: waking up MD thread %s.\n", thread->tsk->comm);
|
||||
pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
|
||||
set_bit(THREAD_WAKEUP, &thread->flags);
|
||||
wake_up(&thread->wqueue);
|
||||
}
|
||||
|
@ -6444,7 +6438,7 @@ void md_unregister_thread(mdk_thread_t **threadp)
|
|||
mdk_thread_t *thread = *threadp;
|
||||
if (!thread)
|
||||
return;
|
||||
dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
|
||||
pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
|
||||
/* Locking ensures that mddev_unlock does not wake_up a
|
||||
* non-existent thread
|
||||
*/
|
||||
|
@ -8112,7 +8106,7 @@ static struct notifier_block md_notifier = {
|
|||
|
||||
static void md_geninit(void)
|
||||
{
|
||||
dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
|
||||
pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
|
||||
|
||||
proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
|
||||
}
|
||||
|
|
|
@ -40,9 +40,6 @@
|
|||
#include "raid1.h"
|
||||
#include "bitmap.h"
|
||||
|
||||
#define DEBUG 0
|
||||
#define PRINTK(x...) do { if (DEBUG) printk(x); } while (0)
|
||||
|
||||
/*
|
||||
* Number of guaranteed r1bios in case of extreme VM load:
|
||||
*/
|
||||
|
@ -246,11 +243,11 @@ static void raid_end_bio_io(r1bio_t *r1_bio)
|
|||
|
||||
/* if nobody has done the final endio yet, do it now */
|
||||
if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
|
||||
PRINTK(KERN_DEBUG "raid1: sync end %s on sectors %llu-%llu\n",
|
||||
(bio_data_dir(bio) == WRITE) ? "write" : "read",
|
||||
(unsigned long long) bio->bi_sector,
|
||||
(unsigned long long) bio->bi_sector +
|
||||
(bio->bi_size >> 9) - 1);
|
||||
pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
|
||||
(bio_data_dir(bio) == WRITE) ? "write" : "read",
|
||||
(unsigned long long) bio->bi_sector,
|
||||
(unsigned long long) bio->bi_sector +
|
||||
(bio->bi_size >> 9) - 1);
|
||||
|
||||
call_bio_endio(r1_bio);
|
||||
}
|
||||
|
@ -431,10 +428,11 @@ static void raid1_end_write_request(struct bio *bio, int error)
|
|||
/* Maybe we can return now */
|
||||
if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
|
||||
struct bio *mbio = r1_bio->master_bio;
|
||||
PRINTK(KERN_DEBUG "raid1: behind end write sectors %llu-%llu\n",
|
||||
(unsigned long long) mbio->bi_sector,
|
||||
(unsigned long long) mbio->bi_sector +
|
||||
(mbio->bi_size >> 9) - 1);
|
||||
pr_debug("raid1: behind end write sectors"
|
||||
" %llu-%llu\n",
|
||||
(unsigned long long) mbio->bi_sector,
|
||||
(unsigned long long) mbio->bi_sector +
|
||||
(mbio->bi_size >> 9) - 1);
|
||||
call_bio_endio(r1_bio);
|
||||
}
|
||||
}
|
||||
|
@ -795,7 +793,7 @@ do_sync_io:
|
|||
if (bvecs[i].bv_page)
|
||||
put_page(bvecs[i].bv_page);
|
||||
kfree(bvecs);
|
||||
PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
|
||||
pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
|
||||
}
|
||||
|
||||
static int make_request(mddev_t *mddev, struct bio * bio)
|
||||
|
|
Loading…
Reference in New Issue