2005-06-22 08:17:14 +08:00
|
|
|
/*
|
|
|
|
* bitmap.c two-level bitmap (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003
|
|
|
|
*
|
|
|
|
* bitmap_create - sets up the bitmap structure
|
|
|
|
* bitmap_destroy - destroys the bitmap structure
|
|
|
|
*
|
|
|
|
* additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.:
|
|
|
|
* - added disk storage for bitmap
|
|
|
|
* - changes to allow various bitmap chunk sizes
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Still to do:
|
|
|
|
*
|
|
|
|
* flush after percent set rather than just time based. (maybe both).
|
|
|
|
*/
|
|
|
|
|
2009-03-31 11:33:13 +08:00
|
|
|
#include <linux/blkdev.h>
|
2005-06-22 08:17:14 +08:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/timer.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/file.h>
|
|
|
|
#include <linux/mount.h>
|
|
|
|
#include <linux/buffer_head.h>
|
2009-03-31 11:33:13 +08:00
|
|
|
#include "md.h"
|
2009-03-31 11:27:03 +08:00
|
|
|
#include "bitmap.h"
|
2005-06-22 08:17:14 +08:00
|
|
|
|
2010-06-01 17:37:31 +08:00
|
|
|
static inline char *bmname(struct bitmap *bitmap)
|
2005-06-22 08:17:14 +08:00
|
|
|
{
|
|
|
|
return bitmap->mddev ? mdname(bitmap->mddev) : "mdX";
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* just a placeholder - calls kmalloc for bitmap pages
|
|
|
|
*/
|
|
|
|
static unsigned char *bitmap_alloc_page(struct bitmap *bitmap)
|
|
|
|
{
|
|
|
|
unsigned char *page;
|
|
|
|
|
2010-06-01 17:37:31 +08:00
|
|
|
page = kzalloc(PAGE_SIZE, GFP_NOIO);
|
2005-06-22 08:17:14 +08:00
|
|
|
if (!page)
|
|
|
|
printk("%s: bitmap_alloc_page FAILED\n", bmname(bitmap));
|
|
|
|
else
|
2011-10-07 11:23:17 +08:00
|
|
|
pr_debug("%s: bitmap_alloc_page: allocated page at %p\n",
|
|
|
|
bmname(bitmap), page);
|
2005-06-22 08:17:14 +08:00
|
|
|
return page;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* for now just a placeholder -- just calls kfree for bitmap pages
|
|
|
|
*/
|
|
|
|
static void bitmap_free_page(struct bitmap *bitmap, unsigned char *page)
|
|
|
|
{
|
2011-10-07 11:23:17 +08:00
|
|
|
pr_debug("%s: bitmap_free_page: free page %p\n", bmname(bitmap), page);
|
2005-06-22 08:17:14 +08:00
|
|
|
kfree(page);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* check a page and, if necessary, allocate it (or hijack it if the alloc fails)
|
|
|
|
*
|
|
|
|
* 1) check to see if this page is allocated, if it's not then try to alloc
|
|
|
|
* 2) if the alloc fails, set the page's hijacked flag so we'll use the
|
|
|
|
* page pointer directly as a counter
|
|
|
|
*
|
|
|
|
* if we find our page, we increment the page's refcount so that it stays
|
|
|
|
* allocated while we're using it
|
|
|
|
*/
|
2010-06-01 17:37:31 +08:00
|
|
|
static int bitmap_checkpage(struct bitmap *bitmap,
|
|
|
|
unsigned long page, int create)
|
2009-09-23 16:06:44 +08:00
|
|
|
__releases(bitmap->lock)
|
|
|
|
__acquires(bitmap->lock)
|
2005-06-22 08:17:14 +08:00
|
|
|
{
|
|
|
|
unsigned char *mappage;
|
|
|
|
|
|
|
|
if (page >= bitmap->pages) {
|
2009-03-31 11:27:02 +08:00
|
|
|
/* This can happen if bitmap_start_sync goes beyond
|
|
|
|
* End-of-device while looking for a whole page.
|
|
|
|
* It is harmless.
|
|
|
|
*/
|
2005-06-22 08:17:14 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (bitmap->bp[page].map) /* page is already allocated, just return */
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!create)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
/* this page has not been allocated yet */
|
|
|
|
|
2010-06-01 17:37:31 +08:00
|
|
|
spin_unlock_irq(&bitmap->lock);
|
|
|
|
mappage = bitmap_alloc_page(bitmap);
|
|
|
|
spin_lock_irq(&bitmap->lock);
|
|
|
|
|
|
|
|
if (mappage == NULL) {
|
2011-10-07 11:23:17 +08:00
|
|
|
pr_debug("%s: bitmap map page allocation failed, hijacking\n",
|
|
|
|
bmname(bitmap));
|
2005-06-22 08:17:14 +08:00
|
|
|
/* failed - set the hijacked flag so that we can use the
|
|
|
|
* pointer as a counter */
|
|
|
|
if (!bitmap->bp[page].map)
|
|
|
|
bitmap->bp[page].hijacked = 1;
|
2010-06-01 17:37:31 +08:00
|
|
|
} else if (bitmap->bp[page].map ||
|
|
|
|
bitmap->bp[page].hijacked) {
|
2005-06-22 08:17:14 +08:00
|
|
|
/* somebody beat us to getting the page */
|
|
|
|
bitmap_free_page(bitmap, mappage);
|
|
|
|
return 0;
|
2010-06-01 17:37:31 +08:00
|
|
|
} else {
|
2005-06-22 08:17:14 +08:00
|
|
|
|
2010-06-01 17:37:31 +08:00
|
|
|
/* no page was in place and we have one, so install it */
|
2005-06-22 08:17:14 +08:00
|
|
|
|
2010-06-01 17:37:31 +08:00
|
|
|
bitmap->bp[page].map = mappage;
|
|
|
|
bitmap->missing_pages--;
|
|
|
|
}
|
2005-06-22 08:17:14 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* if page is completely empty, put it back on the free list, or dealloc it */
|
|
|
|
/* if page was hijacked, unmark the flag so it might get alloced next time */
|
|
|
|
/* Note: lock should be held when calling this */
|
2006-01-15 05:20:43 +08:00
|
|
|
static void bitmap_checkfree(struct bitmap *bitmap, unsigned long page)
|
2005-06-22 08:17:14 +08:00
|
|
|
{
|
|
|
|
char *ptr;
|
|
|
|
|
|
|
|
if (bitmap->bp[page].count) /* page is still busy */
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* page is no longer in use, it can be released */
|
|
|
|
|
|
|
|
if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */
|
|
|
|
bitmap->bp[page].hijacked = 0;
|
|
|
|
bitmap->bp[page].map = NULL;
|
2010-06-01 17:37:31 +08:00
|
|
|
} else {
|
|
|
|
/* normal case, free the page */
|
|
|
|
ptr = bitmap->bp[page].map;
|
|
|
|
bitmap->bp[page].map = NULL;
|
|
|
|
bitmap->missing_pages++;
|
|
|
|
bitmap_free_page(bitmap, ptr);
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* bitmap file handling - read and write the bitmap file and its superblock
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* basic page I/O operations
|
|
|
|
*/
|
|
|
|
|
2005-06-22 08:17:27 +08:00
|
|
|
/* IO operations when bitmap is stored near all superblocks */
|
2011-10-11 13:47:53 +08:00
|
|
|
static struct page *read_sb_page(struct mddev *mddev, loff_t offset,
|
2008-12-19 13:25:01 +08:00
|
|
|
struct page *page,
|
|
|
|
unsigned long index, int size)
|
2005-06-22 08:17:27 +08:00
|
|
|
{
|
|
|
|
/* choose a good rdev and read the page from there */
|
|
|
|
|
2011-10-11 13:45:26 +08:00
|
|
|
struct md_rdev *rdev;
|
2005-06-22 08:17:27 +08:00
|
|
|
sector_t target;
|
2010-06-01 17:37:31 +08:00
|
|
|
int did_alloc = 0;
|
2005-06-22 08:17:27 +08:00
|
|
|
|
2010-06-01 17:37:31 +08:00
|
|
|
if (!page) {
|
2008-12-19 13:25:01 +08:00
|
|
|
page = alloc_page(GFP_KERNEL);
|
2010-06-01 17:37:31 +08:00
|
|
|
if (!page)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
did_alloc = 1;
|
|
|
|
}
|
2005-06-22 08:17:27 +08:00
|
|
|
|
2009-01-09 05:31:08 +08:00
|
|
|
list_for_each_entry(rdev, &mddev->disks, same_set) {
|
2005-11-09 13:39:31 +08:00
|
|
|
if (! test_bit(In_sync, &rdev->flags)
|
|
|
|
|| test_bit(Faulty, &rdev->flags))
|
2005-09-10 07:23:52 +08:00
|
|
|
continue;
|
|
|
|
|
2011-01-14 06:14:33 +08:00
|
|
|
target = offset + index * (PAGE_SIZE/512);
|
2005-06-22 08:17:27 +08:00
|
|
|
|
2010-10-27 12:16:40 +08:00
|
|
|
if (sync_page_io(rdev, target,
|
2009-05-23 05:17:49 +08:00
|
|
|
roundup(size, bdev_logical_block_size(rdev->bdev)),
|
2011-01-14 06:14:33 +08:00
|
|
|
page, READ, true)) {
|
2005-09-10 07:23:52 +08:00
|
|
|
page->index = index;
|
2006-06-26 15:27:49 +08:00
|
|
|
attach_page_buffers(page, NULL); /* so that free_buffer will
|
|
|
|
* quietly no-op */
|
2005-09-10 07:23:52 +08:00
|
|
|
return page;
|
|
|
|
}
|
|
|
|
}
|
2010-06-01 17:37:31 +08:00
|
|
|
if (did_alloc)
|
|
|
|
put_page(page);
|
2005-09-10 07:23:52 +08:00
|
|
|
return ERR_PTR(-EIO);
|
2005-06-22 08:17:27 +08:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2011-10-11 13:47:53 +08:00
|
|
|
static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mddev)
|
2008-09-01 10:48:13 +08:00
|
|
|
{
|
|
|
|
/* Iterate the disks of an mddev, using rcu to protect access to the
|
|
|
|
* linked list, and raising the refcount of devices we return to ensure
|
|
|
|
* they don't disappear while in use.
|
|
|
|
* As devices are only added or removed when raid_disk is < 0 and
|
|
|
|
* nr_pending is 0 and In_sync is clear, the entries we return will
|
|
|
|
* still be in the same position on the list when we re-enter
|
|
|
|
* list_for_each_continue_rcu.
|
|
|
|
*/
|
|
|
|
struct list_head *pos;
|
|
|
|
rcu_read_lock();
|
|
|
|
if (rdev == NULL)
|
|
|
|
/* start at the beginning */
|
|
|
|
pos = &mddev->disks;
|
|
|
|
else {
|
|
|
|
/* release the previous rdev and start from there. */
|
|
|
|
rdev_dec_pending(rdev, mddev);
|
|
|
|
pos = &rdev->same_set;
|
|
|
|
}
|
|
|
|
list_for_each_continue_rcu(pos, &mddev->disks) {
|
2011-10-11 13:45:26 +08:00
|
|
|
rdev = list_entry(pos, struct md_rdev, same_set);
|
2008-09-01 10:48:13 +08:00
|
|
|
if (rdev->raid_disk >= 0 &&
|
|
|
|
!test_bit(Faulty, &rdev->flags)) {
|
|
|
|
/* this is a usable devices */
|
|
|
|
atomic_inc(&rdev->nr_pending);
|
|
|
|
rcu_read_unlock();
|
|
|
|
return rdev;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2007-05-24 04:58:10 +08:00
|
|
|
static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
|
2005-06-22 08:17:27 +08:00
|
|
|
{
|
2011-10-11 13:45:26 +08:00
|
|
|
struct md_rdev *rdev = NULL;
|
2011-01-14 06:14:34 +08:00
|
|
|
struct block_device *bdev;
|
2011-10-11 13:47:53 +08:00
|
|
|
struct mddev *mddev = bitmap->mddev;
|
2005-06-22 08:17:27 +08:00
|
|
|
|
2008-09-01 10:48:13 +08:00
|
|
|
while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
|
2010-06-01 17:37:31 +08:00
|
|
|
int size = PAGE_SIZE;
|
|
|
|
loff_t offset = mddev->bitmap_info.offset;
|
2011-01-14 06:14:34 +08:00
|
|
|
|
|
|
|
bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev;
|
|
|
|
|
2010-06-01 17:37:31 +08:00
|
|
|
if (page->index == bitmap->file_pages-1)
|
|
|
|
size = roundup(bitmap->last_page_size,
|
2011-01-14 06:14:34 +08:00
|
|
|
bdev_logical_block_size(bdev));
|
2010-06-01 17:37:31 +08:00
|
|
|
/* Just make sure we aren't corrupting data or
|
|
|
|
* metadata
|
|
|
|
*/
|
|
|
|
if (mddev->external) {
|
|
|
|
/* Bitmap could be anywhere. */
|
|
|
|
if (rdev->sb_start + offset + (page->index
|
|
|
|
* (PAGE_SIZE/512))
|
|
|
|
> rdev->data_offset
|
|
|
|
&&
|
|
|
|
rdev->sb_start + offset
|
|
|
|
< (rdev->data_offset + mddev->dev_sectors
|
|
|
|
+ (PAGE_SIZE/512)))
|
|
|
|
goto bad_alignment;
|
|
|
|
} else if (offset < 0) {
|
|
|
|
/* DATA BITMAP METADATA */
|
|
|
|
if (offset
|
|
|
|
+ (long)(page->index * (PAGE_SIZE/512))
|
|
|
|
+ size/512 > 0)
|
|
|
|
/* bitmap runs in to metadata */
|
|
|
|
goto bad_alignment;
|
|
|
|
if (rdev->data_offset + mddev->dev_sectors
|
|
|
|
> rdev->sb_start + offset)
|
|
|
|
/* data runs in to bitmap */
|
|
|
|
goto bad_alignment;
|
|
|
|
} else if (rdev->sb_start < rdev->data_offset) {
|
|
|
|
/* METADATA BITMAP DATA */
|
|
|
|
if (rdev->sb_start
|
|
|
|
+ offset
|
|
|
|
+ page->index*(PAGE_SIZE/512) + size/512
|
|
|
|
> rdev->data_offset)
|
|
|
|
/* bitmap runs in to data */
|
|
|
|
goto bad_alignment;
|
|
|
|
} else {
|
|
|
|
/* DATA METADATA BITMAP - no problems */
|
|
|
|
}
|
|
|
|
md_super_write(mddev, rdev,
|
|
|
|
rdev->sb_start + offset
|
|
|
|
+ page->index * (PAGE_SIZE/512),
|
|
|
|
size,
|
|
|
|
page);
|
2008-09-01 10:48:13 +08:00
|
|
|
}
|
2005-06-22 08:17:27 +08:00
|
|
|
|
|
|
|
if (wait)
|
[PATCH] md: support BIO_RW_BARRIER for md/raid1
We can only accept BARRIER requests if all slaves handle
barriers, and that can, of course, change with time....
So we keep track of whether the whole array seems safe for barriers,
and also whether each individual rdev handles barriers.
We initially assumes barriers are OK.
When writing the superblock we try a barrier, and if that fails, we flag
things for no-barriers. This will usually clear the flags fairly quickly.
If writing the superblock finds that BIO_RW_BARRIER is -ENOTSUPP, we need to
resubmit, so introduce function "md_super_wait" which waits for requests to
finish, and retries ENOTSUPP requests without the barrier flag.
When writing the real raid1, write requests which were BIO_RW_BARRIER but
which aresn't supported need to be retried. So raid1d is enhanced to do this,
and when any bio write completes (i.e. no retry needed) we remove it from the
r1bio, so that devices needing retry are easy to find.
We should hardly ever get -ENOTSUPP errors when writing data to the raid.
It should only happen if:
1/ the device used to support BARRIER, but now doesn't. Few devices
change like this, though raid1 can!
or
2/ the array has no persistent superblock, so there was no opportunity to
pre-test for barriers when writing the superblock.
Signed-off-by: Neil Brown <neilb@cse.unsw.edu.au>
Signed-off-by: Neil Brown <neilb@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-11-09 13:39:34 +08:00
|
|
|
md_super_wait(mddev);
|
2005-06-22 08:17:27 +08:00
|
|
|
return 0;
|
2008-07-21 15:05:25 +08:00
|
|
|
|
|
|
|
bad_alignment:
|
|
|
|
return -EINVAL;
|
2005-06-22 08:17:27 +08:00
|
|
|
}
|
|
|
|
|
2007-07-17 19:06:13 +08:00
|
|
|
static void bitmap_file_kick(struct bitmap *bitmap);
|
2005-06-22 08:17:14 +08:00
|
|
|
/*
|
2005-06-22 08:17:27 +08:00
|
|
|
* write out a page to a file
|
2005-06-22 08:17:14 +08:00
|
|
|
*/
|
2007-07-17 19:06:13 +08:00
|
|
|
static void write_page(struct bitmap *bitmap, struct page *page, int wait)
|
2005-06-22 08:17:14 +08:00
|
|
|
{
|
2006-06-26 15:27:48 +08:00
|
|
|
struct buffer_head *bh;
|
2005-06-22 08:17:14 +08:00
|
|
|
|
2007-07-17 19:06:12 +08:00
|
|
|
if (bitmap->file == NULL) {
|
|
|
|
switch (write_sb_page(bitmap, page, wait)) {
|
|
|
|
case -EINVAL:
|
|
|
|
bitmap->flags |= BITMAP_WRITE_ERROR;
|
|
|
|
}
|
2007-07-17 19:06:13 +08:00
|
|
|
} else {
|
2005-06-22 08:17:27 +08:00
|
|
|
|
2007-07-17 19:06:13 +08:00
|
|
|
bh = page_buffers(page);
|
2006-01-06 16:20:45 +08:00
|
|
|
|
2007-07-17 19:06:13 +08:00
|
|
|
while (bh && bh->b_blocknr) {
|
|
|
|
atomic_inc(&bitmap->pending_writes);
|
|
|
|
set_buffer_locked(bh);
|
|
|
|
set_buffer_mapped(bh);
|
2011-03-09 18:56:30 +08:00
|
|
|
submit_bh(WRITE | REQ_SYNC, bh);
|
2007-07-17 19:06:13 +08:00
|
|
|
bh = bh->b_this_page;
|
|
|
|
}
|
2006-06-26 15:27:48 +08:00
|
|
|
|
2010-06-01 17:37:31 +08:00
|
|
|
if (wait)
|
2007-07-17 19:06:13 +08:00
|
|
|
wait_event(bitmap->write_wait,
|
|
|
|
atomic_read(&bitmap->pending_writes)==0);
|
2005-06-22 08:17:29 +08:00
|
|
|
}
|
2007-07-17 19:06:13 +08:00
|
|
|
if (bitmap->flags & BITMAP_WRITE_ERROR)
|
|
|
|
bitmap_file_kick(bitmap);
|
2006-06-26 15:27:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void end_bitmap_write(struct buffer_head *bh, int uptodate)
|
|
|
|
{
|
|
|
|
struct bitmap *bitmap = bh->b_private;
|
|
|
|
unsigned long flags;
|
2005-06-22 08:17:14 +08:00
|
|
|
|
2006-06-26 15:27:48 +08:00
|
|
|
if (!uptodate) {
|
|
|
|
spin_lock_irqsave(&bitmap->lock, flags);
|
|
|
|
bitmap->flags |= BITMAP_WRITE_ERROR;
|
|
|
|
spin_unlock_irqrestore(&bitmap->lock, flags);
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
2006-06-26 15:27:48 +08:00
|
|
|
if (atomic_dec_and_test(&bitmap->pending_writes))
|
|
|
|
wake_up(&bitmap->write_wait);
|
|
|
|
}
|
2005-06-22 08:17:14 +08:00
|
|
|
|
2006-06-26 15:27:48 +08:00
|
|
|
/* copied from buffer.c */
|
|
|
|
static void
|
|
|
|
__clear_page_buffers(struct page *page)
|
|
|
|
{
|
|
|
|
ClearPagePrivate(page);
|
|
|
|
set_page_private(page, 0);
|
|
|
|
page_cache_release(page);
|
|
|
|
}
|
|
|
|
static void free_buffers(struct page *page)
|
|
|
|
{
|
|
|
|
struct buffer_head *bh = page_buffers(page);
|
2005-06-22 08:17:21 +08:00
|
|
|
|
2006-06-26 15:27:48 +08:00
|
|
|
while (bh) {
|
|
|
|
struct buffer_head *next = bh->b_this_page;
|
|
|
|
free_buffer_head(bh);
|
|
|
|
bh = next;
|
2005-06-22 08:17:21 +08:00
|
|
|
}
|
2006-06-26 15:27:48 +08:00
|
|
|
__clear_page_buffers(page);
|
|
|
|
put_page(page);
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
|
|
|
|
2006-06-26 15:27:48 +08:00
|
|
|
/* read a page from a file.
|
|
|
|
* We both read the page, and attach buffers to the page to record the
|
|
|
|
* address of each block (using bmap). These addresses will be used
|
|
|
|
* to write the block later, completely bypassing the filesystem.
|
|
|
|
* This usage is similar to how swap files are handled, and allows us
|
|
|
|
* to write to a file with no concerns of memory allocation failing.
|
|
|
|
*/
|
2005-06-22 08:17:14 +08:00
|
|
|
static struct page *read_page(struct file *file, unsigned long index,
|
2006-06-26 15:27:48 +08:00
|
|
|
struct bitmap *bitmap,
|
|
|
|
unsigned long count)
|
2005-06-22 08:17:14 +08:00
|
|
|
{
|
|
|
|
struct page *page = NULL;
|
2006-12-08 18:37:19 +08:00
|
|
|
struct inode *inode = file->f_path.dentry->d_inode;
|
2006-06-26 15:27:48 +08:00
|
|
|
struct buffer_head *bh;
|
|
|
|
sector_t block;
|
2005-06-22 08:17:14 +08:00
|
|
|
|
2011-10-07 11:23:17 +08:00
|
|
|
pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
|
|
|
|
(unsigned long long)index << PAGE_SHIFT);
|
2005-06-22 08:17:14 +08:00
|
|
|
|
2006-06-26 15:27:48 +08:00
|
|
|
page = alloc_page(GFP_KERNEL);
|
|
|
|
if (!page)
|
|
|
|
page = ERR_PTR(-ENOMEM);
|
2005-06-22 08:17:14 +08:00
|
|
|
if (IS_ERR(page))
|
|
|
|
goto out;
|
2006-06-26 15:27:48 +08:00
|
|
|
|
|
|
|
bh = alloc_page_buffers(page, 1<<inode->i_blkbits, 0);
|
|
|
|
if (!bh) {
|
2006-01-06 16:20:31 +08:00
|
|
|
put_page(page);
|
2006-06-26 15:27:48 +08:00
|
|
|
page = ERR_PTR(-ENOMEM);
|
2005-06-22 08:17:14 +08:00
|
|
|
goto out;
|
|
|
|
}
|
2006-06-26 15:27:48 +08:00
|
|
|
attach_page_buffers(page, bh);
|
|
|
|
block = index << (PAGE_SHIFT - inode->i_blkbits);
|
|
|
|
while (bh) {
|
|
|
|
if (count == 0)
|
|
|
|
bh->b_blocknr = 0;
|
|
|
|
else {
|
|
|
|
bh->b_blocknr = bmap(inode, block);
|
|
|
|
if (bh->b_blocknr == 0) {
|
|
|
|
/* Cannot use this file! */
|
|
|
|
free_buffers(page);
|
|
|
|
page = ERR_PTR(-EINVAL);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
bh->b_bdev = inode->i_sb->s_bdev;
|
|
|
|
if (count < (1<<inode->i_blkbits))
|
|
|
|
count = 0;
|
|
|
|
else
|
|
|
|
count -= (1<<inode->i_blkbits);
|
|
|
|
|
|
|
|
bh->b_end_io = end_bitmap_write;
|
|
|
|
bh->b_private = bitmap;
|
2006-06-26 15:27:49 +08:00
|
|
|
atomic_inc(&bitmap->pending_writes);
|
|
|
|
set_buffer_locked(bh);
|
|
|
|
set_buffer_mapped(bh);
|
|
|
|
submit_bh(READ, bh);
|
2006-06-26 15:27:48 +08:00
|
|
|
}
|
|
|
|
block++;
|
|
|
|
bh = bh->b_this_page;
|
|
|
|
}
|
|
|
|
page->index = index;
|
2006-06-26 15:27:49 +08:00
|
|
|
|
|
|
|
wait_event(bitmap->write_wait,
|
|
|
|
atomic_read(&bitmap->pending_writes)==0);
|
|
|
|
if (bitmap->flags & BITMAP_WRITE_ERROR) {
|
|
|
|
free_buffers(page);
|
|
|
|
page = ERR_PTR(-EIO);
|
|
|
|
}
|
2005-06-22 08:17:14 +08:00
|
|
|
out:
|
|
|
|
if (IS_ERR(page))
|
2010-06-01 17:37:31 +08:00
|
|
|
printk(KERN_ALERT "md: bitmap read error: (%dB @ %llu): %ld\n",
|
2006-01-06 16:20:31 +08:00
|
|
|
(int)PAGE_SIZE,
|
|
|
|
(unsigned long long)index << PAGE_SHIFT,
|
2005-06-22 08:17:14 +08:00
|
|
|
PTR_ERR(page));
|
|
|
|
return page;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* bitmap file superblock operations
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* update the event counter and sync the superblock to disk */
|
2007-07-17 19:06:13 +08:00
|
|
|
void bitmap_update_sb(struct bitmap *bitmap)
|
2005-06-22 08:17:14 +08:00
|
|
|
{
|
|
|
|
bitmap_super_t *sb;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (!bitmap || !bitmap->mddev) /* no bitmap for this array */
|
2007-07-17 19:06:13 +08:00
|
|
|
return;
|
2009-12-14 09:49:56 +08:00
|
|
|
if (bitmap->mddev->bitmap_info.external)
|
|
|
|
return;
|
2005-06-22 08:17:14 +08:00
|
|
|
spin_lock_irqsave(&bitmap->lock, flags);
|
|
|
|
if (!bitmap->sb_page) { /* no superblock */
|
|
|
|
spin_unlock_irqrestore(&bitmap->lock, flags);
|
2007-07-17 19:06:13 +08:00
|
|
|
return;
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&bitmap->lock, flags);
|
2010-03-08 13:02:40 +08:00
|
|
|
sb = kmap_atomic(bitmap->sb_page, KM_USER0);
|
2005-06-22 08:17:14 +08:00
|
|
|
sb->events = cpu_to_le64(bitmap->mddev->events);
|
2011-05-11 12:26:30 +08:00
|
|
|
if (bitmap->mddev->events < bitmap->events_cleared)
|
2008-06-28 06:31:22 +08:00
|
|
|
/* rocking back to read-only */
|
|
|
|
bitmap->events_cleared = bitmap->mddev->events;
|
2011-05-11 12:26:30 +08:00
|
|
|
sb->events_cleared = cpu_to_le64(bitmap->events_cleared);
|
|
|
|
sb->state = cpu_to_le32(bitmap->flags);
|
2009-12-14 09:49:55 +08:00
|
|
|
/* Just in case these have been changed via sysfs: */
|
|
|
|
sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ);
|
|
|
|
sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind);
|
2006-01-06 16:20:34 +08:00
|
|
|
kunmap_atomic(sb, KM_USER0);
|
2007-07-17 19:06:13 +08:00
|
|
|
write_page(bitmap, bitmap->sb_page, 1);
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* print out the bitmap file superblock */
|
|
|
|
void bitmap_print_sb(struct bitmap *bitmap)
|
|
|
|
{
|
|
|
|
bitmap_super_t *sb;
|
|
|
|
|
|
|
|
if (!bitmap || !bitmap->sb_page)
|
|
|
|
return;
|
2010-03-08 13:02:40 +08:00
|
|
|
sb = kmap_atomic(bitmap->sb_page, KM_USER0);
|
2005-06-22 08:17:14 +08:00
|
|
|
printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap));
|
2005-06-22 08:17:20 +08:00
|
|
|
printk(KERN_DEBUG " magic: %08x\n", le32_to_cpu(sb->magic));
|
|
|
|
printk(KERN_DEBUG " version: %d\n", le32_to_cpu(sb->version));
|
|
|
|
printk(KERN_DEBUG " uuid: %08x.%08x.%08x.%08x\n",
|
2005-06-22 08:17:14 +08:00
|
|
|
*(__u32 *)(sb->uuid+0),
|
|
|
|
*(__u32 *)(sb->uuid+4),
|
|
|
|
*(__u32 *)(sb->uuid+8),
|
|
|
|
*(__u32 *)(sb->uuid+12));
|
2005-06-22 08:17:20 +08:00
|
|
|
printk(KERN_DEBUG " events: %llu\n",
|
2005-06-22 08:17:14 +08:00
|
|
|
(unsigned long long) le64_to_cpu(sb->events));
|
2005-06-22 08:17:20 +08:00
|
|
|
printk(KERN_DEBUG "events cleared: %llu\n",
|
2005-06-22 08:17:14 +08:00
|
|
|
(unsigned long long) le64_to_cpu(sb->events_cleared));
|
2005-06-22 08:17:20 +08:00
|
|
|
printk(KERN_DEBUG " state: %08x\n", le32_to_cpu(sb->state));
|
|
|
|
printk(KERN_DEBUG " chunksize: %d B\n", le32_to_cpu(sb->chunksize));
|
|
|
|
printk(KERN_DEBUG " daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep));
|
|
|
|
printk(KERN_DEBUG " sync size: %llu KB\n",
|
|
|
|
(unsigned long long)le64_to_cpu(sb->sync_size)/2);
|
2005-09-10 07:23:47 +08:00
|
|
|
printk(KERN_DEBUG "max write behind: %d\n", le32_to_cpu(sb->write_behind));
|
2006-01-06 16:20:34 +08:00
|
|
|
kunmap_atomic(sb, KM_USER0);
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
|
|
|
|
2011-06-09 06:59:30 +08:00
|
|
|
/*
|
|
|
|
* bitmap_new_disk_sb
|
|
|
|
* @bitmap
|
|
|
|
*
|
|
|
|
* This function is somewhat the reverse of bitmap_read_sb. bitmap_read_sb
|
|
|
|
* reads and verifies the on-disk bitmap superblock and populates bitmap_info.
|
|
|
|
* This function verifies 'bitmap_info' and populates the on-disk bitmap
|
|
|
|
* structure, which is to be written to disk.
|
|
|
|
*
|
|
|
|
* Returns: 0 on success, -Exxx on error
|
|
|
|
*/
|
|
|
|
static int bitmap_new_disk_sb(struct bitmap *bitmap)
|
|
|
|
{
|
|
|
|
bitmap_super_t *sb;
|
|
|
|
unsigned long chunksize, daemon_sleep, write_behind;
|
|
|
|
int err = -EINVAL;
|
|
|
|
|
|
|
|
bitmap->sb_page = alloc_page(GFP_KERNEL);
|
|
|
|
if (IS_ERR(bitmap->sb_page)) {
|
|
|
|
err = PTR_ERR(bitmap->sb_page);
|
|
|
|
bitmap->sb_page = NULL;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
bitmap->sb_page->index = 0;
|
|
|
|
|
|
|
|
sb = kmap_atomic(bitmap->sb_page, KM_USER0);
|
|
|
|
|
|
|
|
sb->magic = cpu_to_le32(BITMAP_MAGIC);
|
|
|
|
sb->version = cpu_to_le32(BITMAP_MAJOR_HI);
|
|
|
|
|
|
|
|
chunksize = bitmap->mddev->bitmap_info.chunksize;
|
|
|
|
BUG_ON(!chunksize);
|
|
|
|
if (!is_power_of_2(chunksize)) {
|
|
|
|
kunmap_atomic(sb, KM_USER0);
|
|
|
|
printk(KERN_ERR "bitmap chunksize not a power of 2\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
sb->chunksize = cpu_to_le32(chunksize);
|
|
|
|
|
|
|
|
daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep;
|
|
|
|
if (!daemon_sleep ||
|
|
|
|
(daemon_sleep < 1) || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) {
|
|
|
|
printk(KERN_INFO "Choosing daemon_sleep default (5 sec)\n");
|
|
|
|
daemon_sleep = 5 * HZ;
|
|
|
|
}
|
|
|
|
sb->daemon_sleep = cpu_to_le32(daemon_sleep);
|
|
|
|
bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* FIXME: write_behind for RAID1. If not specified, what
|
|
|
|
* is a good choice? We choose COUNTER_MAX / 2 arbitrarily.
|
|
|
|
*/
|
|
|
|
write_behind = bitmap->mddev->bitmap_info.max_write_behind;
|
|
|
|
if (write_behind > COUNTER_MAX)
|
|
|
|
write_behind = COUNTER_MAX / 2;
|
|
|
|
sb->write_behind = cpu_to_le32(write_behind);
|
|
|
|
bitmap->mddev->bitmap_info.max_write_behind = write_behind;
|
|
|
|
|
|
|
|
/* keep the array size field of the bitmap superblock up to date */
|
|
|
|
sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
|
|
|
|
|
|
|
|
memcpy(sb->uuid, bitmap->mddev->uuid, 16);
|
|
|
|
|
|
|
|
bitmap->flags |= BITMAP_STALE;
|
|
|
|
sb->state |= cpu_to_le32(BITMAP_STALE);
|
|
|
|
bitmap->events_cleared = bitmap->mddev->events;
|
|
|
|
sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
|
|
|
|
|
|
|
|
bitmap->flags |= BITMAP_HOSTENDIAN;
|
|
|
|
sb->version = cpu_to_le32(BITMAP_MAJOR_HOSTENDIAN);
|
|
|
|
|
|
|
|
kunmap_atomic(sb, KM_USER0);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-06-22 08:17:14 +08:00
|
|
|
/* read the superblock from the bitmap file and initialize some bitmap fields */
|
|
|
|
static int bitmap_read_sb(struct bitmap *bitmap)
|
|
|
|
{
|
|
|
|
char *reason = NULL;
|
|
|
|
bitmap_super_t *sb;
|
2005-09-10 07:23:47 +08:00
|
|
|
unsigned long chunksize, daemon_sleep, write_behind;
|
2005-06-22 08:17:14 +08:00
|
|
|
unsigned long long events;
|
|
|
|
int err = -EINVAL;
|
|
|
|
|
|
|
|
/* page 0 is the superblock, read it... */
|
2007-01-26 16:57:03 +08:00
|
|
|
if (bitmap->file) {
|
|
|
|
loff_t isize = i_size_read(bitmap->file->f_mapping->host);
|
|
|
|
int bytes = isize > PAGE_SIZE ? PAGE_SIZE : isize;
|
|
|
|
|
|
|
|
bitmap->sb_page = read_page(bitmap->file, 0, bitmap, bytes);
|
|
|
|
} else {
|
2009-12-14 09:49:53 +08:00
|
|
|
bitmap->sb_page = read_sb_page(bitmap->mddev,
|
|
|
|
bitmap->mddev->bitmap_info.offset,
|
2008-12-19 13:25:01 +08:00
|
|
|
NULL,
|
|
|
|
0, sizeof(bitmap_super_t));
|
2005-06-22 08:17:27 +08:00
|
|
|
}
|
2005-06-22 08:17:14 +08:00
|
|
|
if (IS_ERR(bitmap->sb_page)) {
|
|
|
|
err = PTR_ERR(bitmap->sb_page);
|
|
|
|
bitmap->sb_page = NULL;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2010-03-08 13:02:40 +08:00
|
|
|
sb = kmap_atomic(bitmap->sb_page, KM_USER0);
|
2005-06-22 08:17:14 +08:00
|
|
|
|
|
|
|
chunksize = le32_to_cpu(sb->chunksize);
|
2009-12-14 09:49:53 +08:00
|
|
|
daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
|
2005-09-10 07:23:47 +08:00
|
|
|
write_behind = le32_to_cpu(sb->write_behind);
|
2005-06-22 08:17:14 +08:00
|
|
|
|
|
|
|
/* verify that the bitmap-specific fields are valid */
|
|
|
|
if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
|
|
|
|
reason = "bad magic";
|
2005-11-09 13:39:32 +08:00
|
|
|
else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO ||
|
|
|
|
le32_to_cpu(sb->version) > BITMAP_MAJOR_HI)
|
2005-06-22 08:17:14 +08:00
|
|
|
reason = "unrecognized superblock version";
|
2009-03-31 11:27:02 +08:00
|
|
|
else if (chunksize < 512)
|
2006-01-06 16:20:39 +08:00
|
|
|
reason = "bitmap chunksize too small";
|
2011-06-09 07:01:10 +08:00
|
|
|
else if (!is_power_of_2(chunksize))
|
2005-06-22 08:17:14 +08:00
|
|
|
reason = "bitmap chunksize not a power of 2";
|
2009-12-14 09:49:53 +08:00
|
|
|
else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT)
|
2006-01-06 16:20:39 +08:00
|
|
|
reason = "daemon sleep period out of range";
|
2005-09-10 07:23:47 +08:00
|
|
|
else if (write_behind > COUNTER_MAX)
|
|
|
|
reason = "write-behind limit out of range (0 - 16383)";
|
2005-06-22 08:17:14 +08:00
|
|
|
if (reason) {
|
|
|
|
printk(KERN_INFO "%s: invalid bitmap file superblock: %s\n",
|
|
|
|
bmname(bitmap), reason);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* keep the array size field of the bitmap superblock up to date */
|
|
|
|
sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
|
|
|
|
|
|
|
|
if (!bitmap->mddev->persistent)
|
|
|
|
goto success;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if we have a persistent array superblock, compare the
|
|
|
|
* bitmap's UUID and event counter to the mddev's
|
|
|
|
*/
|
|
|
|
if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) {
|
|
|
|
printk(KERN_INFO "%s: bitmap superblock UUID mismatch\n",
|
|
|
|
bmname(bitmap));
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
events = le64_to_cpu(sb->events);
|
|
|
|
if (events < bitmap->mddev->events) {
|
|
|
|
printk(KERN_INFO "%s: bitmap file is out of date (%llu < %llu) "
|
|
|
|
"-- forcing full recovery\n", bmname(bitmap), events,
|
|
|
|
(unsigned long long) bitmap->mddev->events);
|
2006-10-22 01:24:09 +08:00
|
|
|
sb->state |= cpu_to_le32(BITMAP_STALE);
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
|
|
|
success:
|
|
|
|
/* assign fields using values from superblock */
|
2009-12-14 09:49:53 +08:00
|
|
|
bitmap->mddev->bitmap_info.chunksize = chunksize;
|
|
|
|
bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
|
|
|
|
bitmap->mddev->bitmap_info.max_write_behind = write_behind;
|
2006-10-22 01:24:09 +08:00
|
|
|
bitmap->flags |= le32_to_cpu(sb->state);
|
2005-11-09 13:39:32 +08:00
|
|
|
if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN)
|
|
|
|
bitmap->flags |= BITMAP_HOSTENDIAN;
|
2005-06-22 08:17:14 +08:00
|
|
|
bitmap->events_cleared = le64_to_cpu(sb->events_cleared);
|
2011-05-11 12:26:30 +08:00
|
|
|
if (bitmap->flags & BITMAP_STALE)
|
2005-09-10 07:23:44 +08:00
|
|
|
bitmap->events_cleared = bitmap->mddev->events;
|
2005-06-22 08:17:14 +08:00
|
|
|
err = 0;
|
|
|
|
out:
|
2006-01-06 16:20:34 +08:00
|
|
|
kunmap_atomic(sb, KM_USER0);
|
2005-06-22 08:17:14 +08:00
|
|
|
if (err)
|
|
|
|
bitmap_print_sb(bitmap);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
enum bitmap_mask_op {
|
|
|
|
MASK_SET,
|
|
|
|
MASK_UNSET
|
|
|
|
};
|
|
|
|
|
2007-07-17 19:06:13 +08:00
|
|
|
/* record the state of the bitmap in the superblock. Return the old value */
|
|
|
|
static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
|
|
|
|
enum bitmap_mask_op op)
|
2005-06-22 08:17:14 +08:00
|
|
|
{
|
|
|
|
bitmap_super_t *sb;
|
|
|
|
unsigned long flags;
|
2007-07-17 19:06:13 +08:00
|
|
|
int old;
|
2005-06-22 08:17:14 +08:00
|
|
|
|
|
|
|
spin_lock_irqsave(&bitmap->lock, flags);
|
2006-03-25 19:07:51 +08:00
|
|
|
if (!bitmap->sb_page) { /* can't set the state */
|
2005-06-22 08:17:14 +08:00
|
|
|
spin_unlock_irqrestore(&bitmap->lock, flags);
|
2007-07-17 19:06:13 +08:00
|
|
|
return 0;
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&bitmap->lock, flags);
|
2010-03-08 13:02:40 +08:00
|
|
|
sb = kmap_atomic(bitmap->sb_page, KM_USER0);
|
2007-07-17 19:06:13 +08:00
|
|
|
old = le32_to_cpu(sb->state) & bits;
|
2005-06-22 08:17:14 +08:00
|
|
|
switch (op) {
|
2010-06-01 17:37:31 +08:00
|
|
|
case MASK_SET:
|
|
|
|
sb->state |= cpu_to_le32(bits);
|
2011-05-11 12:26:30 +08:00
|
|
|
bitmap->flags |= bits;
|
2010-06-01 17:37:31 +08:00
|
|
|
break;
|
|
|
|
case MASK_UNSET:
|
|
|
|
sb->state &= cpu_to_le32(~bits);
|
2011-05-11 12:26:30 +08:00
|
|
|
bitmap->flags &= ~bits;
|
2010-06-01 17:37:31 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
BUG();
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
2006-01-06 16:20:34 +08:00
|
|
|
kunmap_atomic(sb, KM_USER0);
|
2007-07-17 19:06:13 +08:00
|
|
|
return old;
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* general bitmap file operations
|
|
|
|
*/
|
|
|
|
|
2009-12-14 09:49:56 +08:00
|
|
|
/*
|
|
|
|
* on-disk bitmap:
|
|
|
|
*
|
|
|
|
* Use one bit per "chunk" (block set). We do the disk I/O on the bitmap
|
|
|
|
* file a page at a time. There's a superblock at the start of the file.
|
|
|
|
*/
|
2005-06-22 08:17:14 +08:00
|
|
|
/* calculate the index of the page that contains this bit */
|
2009-12-14 09:49:56 +08:00
|
|
|
static inline unsigned long file_page_index(struct bitmap *bitmap, unsigned long chunk)
|
2005-06-22 08:17:14 +08:00
|
|
|
{
|
2009-12-14 09:49:56 +08:00
|
|
|
if (!bitmap->mddev->bitmap_info.external)
|
|
|
|
chunk += sizeof(bitmap_super_t) << 3;
|
|
|
|
return chunk >> PAGE_BIT_SHIFT;
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* calculate the (bit) offset of this bit within a page */
|
2009-12-14 09:49:56 +08:00
|
|
|
static inline unsigned long file_page_offset(struct bitmap *bitmap, unsigned long chunk)
|
2005-06-22 08:17:14 +08:00
|
|
|
{
|
2009-12-14 09:49:56 +08:00
|
|
|
if (!bitmap->mddev->bitmap_info.external)
|
|
|
|
chunk += sizeof(bitmap_super_t) << 3;
|
|
|
|
return chunk & (PAGE_BITS - 1);
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* return a pointer to the page in the filemap that contains the given bit
|
|
|
|
*
|
|
|
|
* this lookup is complicated by the fact that the bitmap sb might be exactly
|
|
|
|
* 1 page (e.g., x86) or less than 1 page -- so the bitmap might start on page
|
|
|
|
* 0 or page 1
|
|
|
|
*/
|
|
|
|
static inline struct page *filemap_get_page(struct bitmap *bitmap,
|
2011-07-27 09:00:37 +08:00
|
|
|
unsigned long chunk)
|
2005-06-22 08:17:14 +08:00
|
|
|
{
|
2010-06-01 17:37:31 +08:00
|
|
|
if (file_page_index(bitmap, chunk) >= bitmap->file_pages)
|
|
|
|
return NULL;
|
2009-12-14 09:49:56 +08:00
|
|
|
return bitmap->filemap[file_page_index(bitmap, chunk)
|
|
|
|
- file_page_index(bitmap, 0)];
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void bitmap_file_unmap(struct bitmap *bitmap)
|
|
|
|
{
|
|
|
|
struct page **map, *sb_page;
|
|
|
|
unsigned long *attr;
|
|
|
|
int pages;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&bitmap->lock, flags);
|
|
|
|
map = bitmap->filemap;
|
|
|
|
bitmap->filemap = NULL;
|
|
|
|
attr = bitmap->filemap_attr;
|
|
|
|
bitmap->filemap_attr = NULL;
|
|
|
|
pages = bitmap->file_pages;
|
|
|
|
bitmap->file_pages = 0;
|
|
|
|
sb_page = bitmap->sb_page;
|
|
|
|
bitmap->sb_page = NULL;
|
|
|
|
spin_unlock_irqrestore(&bitmap->lock, flags);
|
|
|
|
|
|
|
|
while (pages--)
|
2009-12-14 09:49:56 +08:00
|
|
|
if (map[pages] != sb_page) /* 0 is sb_page, release it below */
|
2006-06-26 15:27:48 +08:00
|
|
|
free_buffers(map[pages]);
|
2005-06-22 08:17:14 +08:00
|
|
|
kfree(map);
|
|
|
|
kfree(attr);
|
|
|
|
|
2006-06-26 15:27:48 +08:00
|
|
|
if (sb_page)
|
|
|
|
free_buffers(sb_page);
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void bitmap_file_put(struct bitmap *bitmap)
|
|
|
|
{
|
|
|
|
struct file *file;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&bitmap->lock, flags);
|
|
|
|
file = bitmap->file;
|
|
|
|
bitmap->file = NULL;
|
|
|
|
spin_unlock_irqrestore(&bitmap->lock, flags);
|
|
|
|
|
2006-06-26 15:27:48 +08:00
|
|
|
if (file)
|
|
|
|
wait_event(bitmap->write_wait,
|
|
|
|
atomic_read(&bitmap->pending_writes)==0);
|
2005-06-22 08:17:14 +08:00
|
|
|
bitmap_file_unmap(bitmap);
|
|
|
|
|
2006-06-26 15:27:48 +08:00
|
|
|
if (file) {
|
2006-12-08 18:37:19 +08:00
|
|
|
struct inode *inode = file->f_path.dentry->d_inode;
|
2007-02-10 17:45:39 +08:00
|
|
|
invalidate_mapping_pages(inode->i_mapping, 0, -1);
|
2005-06-22 08:17:14 +08:00
|
|
|
fput(file);
|
2006-06-26 15:27:48 +08:00
|
|
|
}
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* bitmap_file_kick - if an error occurs while manipulating the bitmap file
|
|
|
|
* then it is no longer reliable, so we stop using it and we mark the file
|
|
|
|
* as failed in the superblock
|
|
|
|
*/
|
|
|
|
static void bitmap_file_kick(struct bitmap *bitmap)
|
|
|
|
{
|
|
|
|
char *path, *ptr = NULL;
|
|
|
|
|
2007-07-17 19:06:13 +08:00
|
|
|
if (bitmap_mask_state(bitmap, BITMAP_STALE, MASK_SET) == 0) {
|
|
|
|
bitmap_update_sb(bitmap);
|
2005-06-22 08:17:14 +08:00
|
|
|
|
2007-07-17 19:06:13 +08:00
|
|
|
if (bitmap->file) {
|
|
|
|
path = kmalloc(PAGE_SIZE, GFP_KERNEL);
|
|
|
|
if (path)
|
2008-05-24 04:04:34 +08:00
|
|
|
ptr = d_path(&bitmap->file->f_path, path,
|
|
|
|
PAGE_SIZE);
|
|
|
|
|
2007-07-17 19:06:13 +08:00
|
|
|
printk(KERN_ALERT
|
|
|
|
"%s: kicking failed bitmap file %s from array!\n",
|
2008-05-24 04:04:34 +08:00
|
|
|
bmname(bitmap), IS_ERR(ptr) ? "" : ptr);
|
2005-06-22 08:17:14 +08:00
|
|
|
|
2007-07-17 19:06:13 +08:00
|
|
|
kfree(path);
|
|
|
|
} else
|
|
|
|
printk(KERN_ALERT
|
|
|
|
"%s: disabling internal bitmap due to errors\n",
|
|
|
|
bmname(bitmap));
|
2005-06-22 08:17:27 +08:00
|
|
|
}
|
2005-06-22 08:17:14 +08:00
|
|
|
|
|
|
|
bitmap_file_put(bitmap);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
enum bitmap_page_attr {
|
2010-06-01 17:37:31 +08:00
|
|
|
BITMAP_PAGE_DIRTY = 0, /* there are set bits that need to be synced */
|
2011-09-21 13:37:46 +08:00
|
|
|
BITMAP_PAGE_PENDING = 1, /* there are bits that are being cleaned.
|
|
|
|
* i.e. counter is 1 or 2. */
|
2010-06-01 17:37:31 +08:00
|
|
|
BITMAP_PAGE_NEEDWRITE = 2, /* there are cleared bits that need to be synced */
|
2005-06-22 08:17:14 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static inline void set_page_attr(struct bitmap *bitmap, struct page *page,
|
|
|
|
enum bitmap_page_attr attr)
|
|
|
|
{
|
2011-07-27 09:00:37 +08:00
|
|
|
__set_bit((page->index<<2) + attr, bitmap->filemap_attr);
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void clear_page_attr(struct bitmap *bitmap, struct page *page,
|
|
|
|
enum bitmap_page_attr attr)
|
|
|
|
{
|
2011-07-27 09:00:37 +08:00
|
|
|
__clear_bit((page->index<<2) + attr, bitmap->filemap_attr);
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
|
|
|
|
2006-06-26 15:27:45 +08:00
|
|
|
static inline unsigned long test_page_attr(struct bitmap *bitmap, struct page *page,
|
|
|
|
enum bitmap_page_attr attr)
|
2005-06-22 08:17:14 +08:00
|
|
|
{
|
2011-07-27 09:00:37 +08:00
|
|
|
return test_bit((page->index<<2) + attr, bitmap->filemap_attr);
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* bitmap_file_set_bit -- called before performing a write to the md device
|
|
|
|
* to set (and eventually sync) a particular bit in the bitmap file
|
|
|
|
*
|
|
|
|
* we set the bit immediately, then we record the page number so that
|
|
|
|
* when an unplug occurs, we can flush the dirty pages out to disk
|
|
|
|
*/
|
|
|
|
static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
|
|
|
|
{
|
|
|
|
unsigned long bit;
|
2011-07-27 09:00:37 +08:00
|
|
|
struct page *page;
|
2005-06-22 08:17:14 +08:00
|
|
|
void *kaddr;
|
|
|
|
unsigned long chunk = block >> CHUNK_BLOCK_SHIFT(bitmap);
|
|
|
|
|
2011-07-27 09:00:37 +08:00
|
|
|
if (!bitmap->filemap)
|
|
|
|
return;
|
2005-06-22 08:17:14 +08:00
|
|
|
|
2011-07-27 09:00:37 +08:00
|
|
|
page = filemap_get_page(bitmap, chunk);
|
|
|
|
if (!page)
|
|
|
|
return;
|
|
|
|
bit = file_page_offset(bitmap, chunk);
|
2005-06-22 08:17:14 +08:00
|
|
|
|
2011-07-27 09:00:37 +08:00
|
|
|
/* set the bit */
|
|
|
|
kaddr = kmap_atomic(page, KM_USER0);
|
|
|
|
if (bitmap->flags & BITMAP_HOSTENDIAN)
|
|
|
|
set_bit(bit, kaddr);
|
|
|
|
else
|
|
|
|
__set_bit_le(bit, kaddr);
|
|
|
|
kunmap_atomic(kaddr, KM_USER0);
|
2011-10-07 11:23:17 +08:00
|
|
|
pr_debug("set file bit %lu page %lu\n", bit, page->index);
|
2005-06-22 08:17:14 +08:00
|
|
|
/* record page number so it gets flushed to disk when unplug occurs */
|
|
|
|
set_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* this gets called when the md device is ready to unplug its underlying
|
|
|
|
* (slave) device queues -- before we let any writes go down, we need to
|
|
|
|
* sync the dirty pages of the bitmap file to disk */
|
2007-07-17 19:06:13 +08:00
|
|
|
void bitmap_unplug(struct bitmap *bitmap)
|
2005-06-22 08:17:14 +08:00
|
|
|
{
|
2006-06-26 15:27:45 +08:00
|
|
|
unsigned long i, flags;
|
|
|
|
int dirty, need_write;
|
2005-06-22 08:17:14 +08:00
|
|
|
struct page *page;
|
|
|
|
int wait = 0;
|
|
|
|
|
|
|
|
if (!bitmap)
|
2007-07-17 19:06:13 +08:00
|
|
|
return;
|
2005-06-22 08:17:14 +08:00
|
|
|
|
|
|
|
/* look at each page to see if there are any set bits that need to be
|
|
|
|
* flushed out to disk */
|
|
|
|
for (i = 0; i < bitmap->file_pages; i++) {
|
|
|
|
spin_lock_irqsave(&bitmap->lock, flags);
|
2005-06-22 08:17:27 +08:00
|
|
|
if (!bitmap->filemap) {
|
2005-06-22 08:17:14 +08:00
|
|
|
spin_unlock_irqrestore(&bitmap->lock, flags);
|
2007-07-17 19:06:13 +08:00
|
|
|
return;
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
|
|
|
page = bitmap->filemap[i];
|
2006-06-26 15:27:45 +08:00
|
|
|
dirty = test_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
|
|
|
|
need_write = test_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
|
2005-06-22 08:17:14 +08:00
|
|
|
clear_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
|
|
|
|
clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
|
2006-06-26 15:27:45 +08:00
|
|
|
if (dirty)
|
2005-06-22 08:17:14 +08:00
|
|
|
wait = 1;
|
|
|
|
spin_unlock_irqrestore(&bitmap->lock, flags);
|
|
|
|
|
2010-06-01 17:37:31 +08:00
|
|
|
if (dirty || need_write)
|
2007-07-17 19:06:13 +08:00
|
|
|
write_page(bitmap, page, 0);
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
|
|
|
if (wait) { /* if any writes were performed, we need to wait on them */
|
2006-06-26 15:27:44 +08:00
|
|
|
if (bitmap->file)
|
2006-06-26 15:27:48 +08:00
|
|
|
wait_event(bitmap->write_wait,
|
|
|
|
atomic_read(&bitmap->pending_writes)==0);
|
2006-06-26 15:27:44 +08:00
|
|
|
else
|
[PATCH] md: support BIO_RW_BARRIER for md/raid1
We can only accept BARRIER requests if all slaves handle
barriers, and that can, of course, change with time....
So we keep track of whether the whole array seems safe for barriers,
and also whether each individual rdev handles barriers.
We initially assumes barriers are OK.
When writing the superblock we try a barrier, and if that fails, we flag
things for no-barriers. This will usually clear the flags fairly quickly.
If writing the superblock finds that BIO_RW_BARRIER is -ENOTSUPP, we need to
resubmit, so introduce function "md_super_wait" which waits for requests to
finish, and retries ENOTSUPP requests without the barrier flag.
When writing the real raid1, write requests which were BIO_RW_BARRIER but
which aresn't supported need to be retried. So raid1d is enhanced to do this,
and when any bio write completes (i.e. no retry needed) we remove it from the
r1bio, so that devices needing retry are easy to find.
We should hardly ever get -ENOTSUPP errors when writing data to the raid.
It should only happen if:
1/ the device used to support BARRIER, but now doesn't. Few devices
change like this, though raid1 can!
or
2/ the array has no persistent superblock, so there was no opportunity to
pre-test for barriers when writing the superblock.
Signed-off-by: Neil Brown <neilb@cse.unsw.edu.au>
Signed-off-by: Neil Brown <neilb@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-11-09 13:39:34 +08:00
|
|
|
md_super_wait(bitmap->mddev);
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
2006-06-26 15:27:48 +08:00
|
|
|
if (bitmap->flags & BITMAP_WRITE_ERROR)
|
|
|
|
bitmap_file_kick(bitmap);
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
2010-06-01 17:37:31 +08:00
|
|
|
EXPORT_SYMBOL(bitmap_unplug);
|
2005-06-22 08:17:14 +08:00
|
|
|
|
2005-09-10 07:23:44 +08:00
|
|
|
static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
|
2005-06-22 08:17:14 +08:00
|
|
|
/* * bitmap_init_from_disk -- called at bitmap_create time to initialize
|
|
|
|
* the in-memory bitmap from the on-disk bitmap -- also, sets up the
|
|
|
|
* memory mapping of the bitmap file
|
|
|
|
* Special cases:
|
|
|
|
* if there's no bitmap file, or if the bitmap file had been
|
|
|
|
* previously kicked from the array, we mark all the bits as
|
|
|
|
* 1's in order to cause a full resync.
|
2005-09-10 07:23:44 +08:00
|
|
|
*
|
|
|
|
* We ignore all bits for sectors that end earlier than 'start'.
|
|
|
|
* This is used when reading an out-of-date bitmap...
|
2005-06-22 08:17:14 +08:00
|
|
|
*/
|
2005-09-10 07:23:44 +08:00
|
|
|
static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
|
2005-06-22 08:17:14 +08:00
|
|
|
{
|
|
|
|
unsigned long i, chunks, index, oldindex, bit;
|
|
|
|
struct page *page = NULL, *oldpage = NULL;
|
|
|
|
unsigned long num_pages, bit_cnt = 0;
|
|
|
|
struct file *file;
|
2006-06-26 15:27:48 +08:00
|
|
|
unsigned long bytes, offset;
|
2005-06-22 08:17:14 +08:00
|
|
|
int outofdate;
|
|
|
|
int ret = -ENOSPC;
|
2006-01-06 16:20:34 +08:00
|
|
|
void *paddr;
|
2005-06-22 08:17:14 +08:00
|
|
|
|
|
|
|
chunks = bitmap->chunks;
|
|
|
|
file = bitmap->file;
|
|
|
|
|
2009-12-14 09:49:53 +08:00
|
|
|
BUG_ON(!file && !bitmap->mddev->bitmap_info.offset);
|
2005-06-22 08:17:14 +08:00
|
|
|
|
|
|
|
outofdate = bitmap->flags & BITMAP_STALE;
|
|
|
|
if (outofdate)
|
|
|
|
printk(KERN_INFO "%s: bitmap file is out of date, doing full "
|
|
|
|
"recovery\n", bmname(bitmap));
|
|
|
|
|
2010-06-01 17:37:34 +08:00
|
|
|
bytes = DIV_ROUND_UP(bitmap->chunks, 8);
|
2009-12-14 09:49:56 +08:00
|
|
|
if (!bitmap->mddev->bitmap_info.external)
|
|
|
|
bytes += sizeof(bitmap_super_t);
|
2005-06-22 08:17:17 +08:00
|
|
|
|
2010-06-01 17:37:34 +08:00
|
|
|
num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
|
2005-06-22 08:17:17 +08:00
|
|
|
|
2009-12-14 09:49:56 +08:00
|
|
|
if (file && i_size_read(file->f_mapping->host) < bytes) {
|
2005-06-22 08:17:14 +08:00
|
|
|
printk(KERN_INFO "%s: bitmap file too short %lu < %lu\n",
|
|
|
|
bmname(bitmap),
|
|
|
|
(unsigned long) i_size_read(file->f_mapping->host),
|
2009-12-14 09:49:56 +08:00
|
|
|
bytes);
|
2007-07-17 19:06:13 +08:00
|
|
|
goto err;
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
2005-06-22 08:17:17 +08:00
|
|
|
|
|
|
|
ret = -ENOMEM;
|
|
|
|
|
2005-06-22 08:17:14 +08:00
|
|
|
bitmap->filemap = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
|
2005-06-22 08:17:17 +08:00
|
|
|
if (!bitmap->filemap)
|
2007-07-17 19:06:13 +08:00
|
|
|
goto err;
|
2005-06-22 08:17:14 +08:00
|
|
|
|
2006-06-26 15:27:45 +08:00
|
|
|
/* We need 4 bits per page, rounded up to a multiple of sizeof(unsigned long) */
|
|
|
|
bitmap->filemap_attr = kzalloc(
|
2010-06-01 17:37:31 +08:00
|
|
|
roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)),
|
2006-06-26 15:27:45 +08:00
|
|
|
GFP_KERNEL);
|
2005-06-22 08:17:17 +08:00
|
|
|
if (!bitmap->filemap_attr)
|
2007-07-17 19:06:13 +08:00
|
|
|
goto err;
|
2005-06-22 08:17:14 +08:00
|
|
|
|
|
|
|
oldindex = ~0L;
|
|
|
|
|
|
|
|
for (i = 0; i < chunks; i++) {
|
2005-11-09 13:39:32 +08:00
|
|
|
int b;
|
2009-12-14 09:49:56 +08:00
|
|
|
index = file_page_index(bitmap, i);
|
|
|
|
bit = file_page_offset(bitmap, i);
|
2005-06-22 08:17:14 +08:00
|
|
|
if (index != oldindex) { /* this is a new page, read it in */
|
2006-06-26 15:27:48 +08:00
|
|
|
int count;
|
2005-06-22 08:17:14 +08:00
|
|
|
/* unmap the old page, we're done with it */
|
2006-06-26 15:27:48 +08:00
|
|
|
if (index == num_pages-1)
|
2009-12-14 09:49:56 +08:00
|
|
|
count = bytes - index * PAGE_SIZE;
|
2006-06-26 15:27:48 +08:00
|
|
|
else
|
|
|
|
count = PAGE_SIZE;
|
2009-12-14 09:49:56 +08:00
|
|
|
if (index == 0 && bitmap->sb_page) {
|
2005-06-22 08:17:14 +08:00
|
|
|
/*
|
|
|
|
* if we're here then the superblock page
|
|
|
|
* contains some bits (PAGE_SIZE != sizeof sb)
|
|
|
|
* we've already read it in, so just use it
|
|
|
|
*/
|
|
|
|
page = bitmap->sb_page;
|
|
|
|
offset = sizeof(bitmap_super_t);
|
2009-01-09 05:31:05 +08:00
|
|
|
if (!file)
|
2010-10-02 05:18:12 +08:00
|
|
|
page = read_sb_page(
|
|
|
|
bitmap->mddev,
|
|
|
|
bitmap->mddev->bitmap_info.offset,
|
|
|
|
page,
|
|
|
|
index, count);
|
2005-06-22 08:17:27 +08:00
|
|
|
} else if (file) {
|
2006-06-26 15:27:48 +08:00
|
|
|
page = read_page(file, index, bitmap, count);
|
2005-06-22 08:17:27 +08:00
|
|
|
offset = 0;
|
|
|
|
} else {
|
2009-12-14 09:49:53 +08:00
|
|
|
page = read_sb_page(bitmap->mddev,
|
|
|
|
bitmap->mddev->bitmap_info.offset,
|
2008-12-19 13:25:01 +08:00
|
|
|
NULL,
|
|
|
|
index, count);
|
2005-06-22 08:17:14 +08:00
|
|
|
offset = 0;
|
|
|
|
}
|
2005-06-22 08:17:27 +08:00
|
|
|
if (IS_ERR(page)) { /* read error */
|
|
|
|
ret = PTR_ERR(page);
|
2007-07-17 19:06:13 +08:00
|
|
|
goto err;
|
2005-06-22 08:17:27 +08:00
|
|
|
}
|
|
|
|
|
2005-06-22 08:17:14 +08:00
|
|
|
oldindex = index;
|
|
|
|
oldpage = page;
|
|
|
|
|
2009-05-07 10:47:19 +08:00
|
|
|
bitmap->filemap[bitmap->file_pages++] = page;
|
|
|
|
bitmap->last_page_size = count;
|
|
|
|
|
2005-06-22 08:17:14 +08:00
|
|
|
if (outofdate) {
|
|
|
|
/*
|
|
|
|
* if bitmap is out of date, dirty the
|
2010-06-01 17:37:31 +08:00
|
|
|
* whole page and write it out
|
2005-06-22 08:17:14 +08:00
|
|
|
*/
|
2006-01-06 16:20:34 +08:00
|
|
|
paddr = kmap_atomic(page, KM_USER0);
|
|
|
|
memset(paddr + offset, 0xff,
|
2005-09-10 07:23:44 +08:00
|
|
|
PAGE_SIZE - offset);
|
2006-01-06 16:20:34 +08:00
|
|
|
kunmap_atomic(paddr, KM_USER0);
|
2007-07-17 19:06:13 +08:00
|
|
|
write_page(bitmap, page, 1);
|
|
|
|
|
|
|
|
ret = -EIO;
|
2009-05-07 10:47:19 +08:00
|
|
|
if (bitmap->flags & BITMAP_WRITE_ERROR)
|
2007-07-17 19:06:13 +08:00
|
|
|
goto err;
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
|
|
|
}
|
2006-01-06 16:20:34 +08:00
|
|
|
paddr = kmap_atomic(page, KM_USER0);
|
2005-11-09 13:39:32 +08:00
|
|
|
if (bitmap->flags & BITMAP_HOSTENDIAN)
|
2006-01-06 16:20:34 +08:00
|
|
|
b = test_bit(bit, paddr);
|
2005-11-09 13:39:32 +08:00
|
|
|
else
|
2011-03-24 07:42:13 +08:00
|
|
|
b = test_bit_le(bit, paddr);
|
2006-01-06 16:20:34 +08:00
|
|
|
kunmap_atomic(paddr, KM_USER0);
|
2005-11-09 13:39:32 +08:00
|
|
|
if (b) {
|
2005-06-22 08:17:14 +08:00
|
|
|
/* if the disk bit is set, set the memory bit */
|
2009-05-07 10:49:06 +08:00
|
|
|
int needed = ((sector_t)(i+1) << (CHUNK_BLOCK_SHIFT(bitmap))
|
|
|
|
>= start);
|
|
|
|
bitmap_set_memory_bits(bitmap,
|
|
|
|
(sector_t)i << CHUNK_BLOCK_SHIFT(bitmap),
|
|
|
|
needed);
|
2005-06-22 08:17:14 +08:00
|
|
|
bit_cnt++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-06-01 17:37:31 +08:00
|
|
|
/* everything went OK */
|
2005-06-22 08:17:14 +08:00
|
|
|
ret = 0;
|
|
|
|
bitmap_mask_state(bitmap, BITMAP_STALE, MASK_UNSET);
|
|
|
|
|
|
|
|
if (bit_cnt) { /* Kick recovery if any bits were set */
|
|
|
|
set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery);
|
|
|
|
md_wakeup_thread(bitmap->mddev->thread);
|
|
|
|
}
|
|
|
|
|
|
|
|
printk(KERN_INFO "%s: bitmap initialized from disk: "
|
2011-06-09 06:59:30 +08:00
|
|
|
"read %lu/%lu pages, set %lu of %lu bits\n",
|
|
|
|
bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt, chunks);
|
2007-07-17 19:06:13 +08:00
|
|
|
|
|
|
|
return 0;
|
2005-06-22 08:17:14 +08:00
|
|
|
|
2007-07-17 19:06:13 +08:00
|
|
|
err:
|
|
|
|
printk(KERN_INFO "%s: bitmap initialisation failed: %d\n",
|
|
|
|
bmname(bitmap), ret);
|
2005-06-22 08:17:14 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2005-06-22 08:17:27 +08:00
|
|
|
void bitmap_write_all(struct bitmap *bitmap)
|
|
|
|
{
|
|
|
|
/* We don't actually write all bitmap blocks here,
|
|
|
|
* just flag them as needing to be written
|
|
|
|
*/
|
2006-06-26 15:27:45 +08:00
|
|
|
int i;
|
2005-06-22 08:17:27 +08:00
|
|
|
|
2011-11-23 07:18:52 +08:00
|
|
|
spin_lock_irq(&bitmap->lock);
|
2010-06-01 17:37:31 +08:00
|
|
|
for (i = 0; i < bitmap->file_pages; i++)
|
2006-06-26 15:27:45 +08:00
|
|
|
set_page_attr(bitmap, bitmap->filemap[i],
|
|
|
|
BITMAP_PAGE_NEEDWRITE);
|
2011-09-21 13:37:46 +08:00
|
|
|
bitmap->allclean = 0;
|
2011-11-23 07:18:52 +08:00
|
|
|
spin_unlock_irq(&bitmap->lock);
|
2005-06-22 08:17:27 +08:00
|
|
|
}
|
|
|
|
|
2005-06-22 08:17:14 +08:00
|
|
|
static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc)
|
|
|
|
{
|
|
|
|
sector_t chunk = offset >> CHUNK_BLOCK_SHIFT(bitmap);
|
|
|
|
unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
|
|
|
|
bitmap->bp[page].count += inc;
|
|
|
|
bitmap_checkfree(bitmap, page);
|
|
|
|
}
|
|
|
|
static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
|
2010-10-19 07:03:39 +08:00
|
|
|
sector_t offset, sector_t *blocks,
|
2005-06-22 08:17:14 +08:00
|
|
|
int create);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* bitmap daemon -- periodically wakes up to clean bits and flush pages
|
|
|
|
* out to disk
|
|
|
|
*/
|
|
|
|
|
2011-10-11 13:47:53 +08:00
|
|
|
void bitmap_daemon_work(struct mddev *mddev)
|
2005-06-22 08:17:14 +08:00
|
|
|
{
|
2009-12-14 09:49:46 +08:00
|
|
|
struct bitmap *bitmap;
|
2005-06-22 08:17:22 +08:00
|
|
|
unsigned long j;
|
2005-06-22 08:17:14 +08:00
|
|
|
unsigned long flags;
|
|
|
|
struct page *page = NULL, *lastpage = NULL;
|
2010-10-19 07:03:39 +08:00
|
|
|
sector_t blocks;
|
2006-01-06 16:20:34 +08:00
|
|
|
void *paddr;
|
2005-06-22 08:17:14 +08:00
|
|
|
|
2009-12-14 09:49:46 +08:00
|
|
|
/* Use a mutex to guard daemon_work against
|
|
|
|
* bitmap_destroy.
|
|
|
|
*/
|
2009-12-14 09:49:52 +08:00
|
|
|
mutex_lock(&mddev->bitmap_info.mutex);
|
2009-12-14 09:49:46 +08:00
|
|
|
bitmap = mddev->bitmap;
|
|
|
|
if (bitmap == NULL) {
|
2009-12-14 09:49:52 +08:00
|
|
|
mutex_unlock(&mddev->bitmap_info.mutex);
|
2007-07-17 19:06:13 +08:00
|
|
|
return;
|
2009-12-14 09:49:46 +08:00
|
|
|
}
|
2009-12-14 09:49:53 +08:00
|
|
|
if (time_before(jiffies, bitmap->daemon_lastrun
|
2011-12-23 07:17:50 +08:00
|
|
|
+ mddev->bitmap_info.daemon_sleep))
|
2008-03-11 02:43:48 +08:00
|
|
|
goto done;
|
|
|
|
|
2005-06-22 08:17:14 +08:00
|
|
|
bitmap->daemon_lastrun = jiffies;
|
2008-03-05 06:29:30 +08:00
|
|
|
if (bitmap->allclean) {
|
2011-12-23 07:17:50 +08:00
|
|
|
mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
|
2009-12-14 09:49:46 +08:00
|
|
|
goto done;
|
2008-03-05 06:29:30 +08:00
|
|
|
}
|
|
|
|
bitmap->allclean = 1;
|
2005-06-22 08:17:14 +08:00
|
|
|
|
2009-05-26 07:41:17 +08:00
|
|
|
spin_lock_irqsave(&bitmap->lock, flags);
|
2005-06-22 08:17:14 +08:00
|
|
|
for (j = 0; j < bitmap->chunks; j++) {
|
|
|
|
bitmap_counter_t *bmc;
|
2011-07-27 09:00:37 +08:00
|
|
|
if (!bitmap->filemap)
|
|
|
|
/* error or shutdown */
|
|
|
|
break;
|
|
|
|
|
|
|
|
page = filemap_get_page(bitmap, j);
|
2005-06-22 08:17:14 +08:00
|
|
|
|
|
|
|
if (page != lastpage) {
|
2005-06-22 08:17:22 +08:00
|
|
|
/* skip this page unless it's marked as needing cleaning */
|
2011-09-21 13:37:46 +08:00
|
|
|
if (!test_page_attr(bitmap, page, BITMAP_PAGE_PENDING)) {
|
2006-06-26 15:27:45 +08:00
|
|
|
int need_write = test_page_attr(bitmap, page,
|
|
|
|
BITMAP_PAGE_NEEDWRITE);
|
2006-06-26 15:27:46 +08:00
|
|
|
if (need_write)
|
2005-06-22 08:17:22 +08:00
|
|
|
clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
|
2006-06-26 15:27:46 +08:00
|
|
|
|
2005-06-22 08:17:22 +08:00
|
|
|
spin_unlock_irqrestore(&bitmap->lock, flags);
|
2011-09-21 13:37:46 +08:00
|
|
|
if (need_write)
|
2007-07-17 19:06:13 +08:00
|
|
|
write_page(bitmap, page, 0);
|
2009-05-26 07:41:17 +08:00
|
|
|
spin_lock_irqsave(&bitmap->lock, flags);
|
|
|
|
j |= (PAGE_BITS - 1);
|
2005-06-22 08:17:22 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2005-06-22 08:17:14 +08:00
|
|
|
/* grab the new page, sync and release the old */
|
|
|
|
if (lastpage != NULL) {
|
2011-09-21 13:37:46 +08:00
|
|
|
if (test_page_attr(bitmap, lastpage,
|
|
|
|
BITMAP_PAGE_NEEDWRITE)) {
|
|
|
|
clear_page_attr(bitmap, lastpage,
|
|
|
|
BITMAP_PAGE_NEEDWRITE);
|
2005-06-22 08:17:14 +08:00
|
|
|
spin_unlock_irqrestore(&bitmap->lock, flags);
|
2007-07-17 19:06:13 +08:00
|
|
|
write_page(bitmap, lastpage, 0);
|
2005-06-22 08:17:14 +08:00
|
|
|
} else {
|
2011-09-21 13:37:46 +08:00
|
|
|
set_page_attr(bitmap, lastpage,
|
|
|
|
BITMAP_PAGE_NEEDWRITE);
|
|
|
|
bitmap->allclean = 0;
|
2005-06-22 08:17:14 +08:00
|
|
|
spin_unlock_irqrestore(&bitmap->lock, flags);
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
spin_unlock_irqrestore(&bitmap->lock, flags);
|
|
|
|
lastpage = page;
|
2008-06-28 06:31:22 +08:00
|
|
|
|
|
|
|
/* We are possibly going to clear some bits, so make
|
|
|
|
* sure that events_cleared is up-to-date.
|
|
|
|
*/
|
2009-12-14 09:49:56 +08:00
|
|
|
if (bitmap->need_sync &&
|
2011-12-23 07:17:50 +08:00
|
|
|
mddev->bitmap_info.external == 0) {
|
2008-06-28 06:31:22 +08:00
|
|
|
bitmap_super_t *sb;
|
|
|
|
bitmap->need_sync = 0;
|
|
|
|
sb = kmap_atomic(bitmap->sb_page, KM_USER0);
|
|
|
|
sb->events_cleared =
|
|
|
|
cpu_to_le64(bitmap->events_cleared);
|
|
|
|
kunmap_atomic(sb, KM_USER0);
|
|
|
|
write_page(bitmap, bitmap->sb_page, 1);
|
|
|
|
}
|
2005-06-22 08:17:14 +08:00
|
|
|
spin_lock_irqsave(&bitmap->lock, flags);
|
2009-12-14 09:49:56 +08:00
|
|
|
if (!bitmap->need_sync)
|
2011-09-21 13:37:46 +08:00
|
|
|
clear_page_attr(bitmap, page, BITMAP_PAGE_PENDING);
|
2011-09-21 13:37:46 +08:00
|
|
|
else
|
|
|
|
bitmap->allclean = 0;
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
2009-05-07 10:49:06 +08:00
|
|
|
bmc = bitmap_get_counter(bitmap,
|
|
|
|
(sector_t)j << CHUNK_BLOCK_SHIFT(bitmap),
|
|
|
|
&blocks, 0);
|
2011-09-21 13:37:46 +08:00
|
|
|
if (!bmc)
|
|
|
|
j |= PAGE_COUNTER_MASK;
|
|
|
|
else if (*bmc) {
|
|
|
|
if (*bmc == 1 && !bitmap->need_sync) {
|
2005-06-22 08:17:14 +08:00
|
|
|
/* we can clear the bit */
|
|
|
|
*bmc = 0;
|
2009-05-07 10:49:06 +08:00
|
|
|
bitmap_count_page(bitmap,
|
|
|
|
(sector_t)j << CHUNK_BLOCK_SHIFT(bitmap),
|
2005-06-22 08:17:14 +08:00
|
|
|
-1);
|
|
|
|
|
|
|
|
/* clear the bit */
|
2011-07-27 09:00:37 +08:00
|
|
|
paddr = kmap_atomic(page, KM_USER0);
|
|
|
|
if (bitmap->flags & BITMAP_HOSTENDIAN)
|
|
|
|
clear_bit(file_page_offset(bitmap, j),
|
|
|
|
paddr);
|
|
|
|
else
|
|
|
|
__clear_bit_le(
|
2011-09-21 13:37:46 +08:00
|
|
|
file_page_offset(bitmap,
|
|
|
|
j),
|
|
|
|
paddr);
|
2011-07-27 09:00:37 +08:00
|
|
|
kunmap_atomic(paddr, KM_USER0);
|
2011-09-21 13:37:46 +08:00
|
|
|
} else if (*bmc <= 2) {
|
|
|
|
*bmc = 1; /* maybe clear the bit next time */
|
|
|
|
set_page_attr(bitmap, page, BITMAP_PAGE_PENDING);
|
2011-09-21 13:37:46 +08:00
|
|
|
bitmap->allclean = 0;
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
2011-09-21 13:37:46 +08:00
|
|
|
}
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
2009-05-26 07:41:17 +08:00
|
|
|
spin_unlock_irqrestore(&bitmap->lock, flags);
|
2005-06-22 08:17:14 +08:00
|
|
|
|
|
|
|
/* now sync the final page */
|
2011-07-27 09:00:37 +08:00
|
|
|
if (lastpage != NULL) {
|
2005-06-22 08:17:14 +08:00
|
|
|
spin_lock_irqsave(&bitmap->lock, flags);
|
2006-06-26 15:27:45 +08:00
|
|
|
if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) {
|
2005-06-22 08:17:14 +08:00
|
|
|
clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
|
|
|
|
spin_unlock_irqrestore(&bitmap->lock, flags);
|
2011-07-27 09:00:37 +08:00
|
|
|
write_page(bitmap, lastpage, 0);
|
2005-06-22 08:17:14 +08:00
|
|
|
} else {
|
|
|
|
set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
|
2011-09-21 13:37:46 +08:00
|
|
|
bitmap->allclean = 0;
|
2005-06-22 08:17:14 +08:00
|
|
|
spin_unlock_irqrestore(&bitmap->lock, flags);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-03-11 02:43:48 +08:00
|
|
|
done:
|
2008-03-05 06:29:30 +08:00
|
|
|
if (bitmap->allclean == 0)
|
2011-12-23 07:17:50 +08:00
|
|
|
mddev->thread->timeout =
|
|
|
|
mddev->bitmap_info.daemon_sleep;
|
2009-12-14 09:49:52 +08:00
|
|
|
mutex_unlock(&mddev->bitmap_info.mutex);
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
|
2010-10-19 07:03:39 +08:00
|
|
|
sector_t offset, sector_t *blocks,
|
2005-06-22 08:17:14 +08:00
|
|
|
int create)
|
2009-09-23 16:06:44 +08:00
|
|
|
__releases(bitmap->lock)
|
|
|
|
__acquires(bitmap->lock)
|
2005-06-22 08:17:14 +08:00
|
|
|
{
|
|
|
|
/* If 'create', we might release the lock and reclaim it.
|
|
|
|
* The lock must have been taken with interrupts enabled.
|
|
|
|
* If !create, we don't release the lock.
|
|
|
|
*/
|
|
|
|
sector_t chunk = offset >> CHUNK_BLOCK_SHIFT(bitmap);
|
|
|
|
unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
|
|
|
|
unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT;
|
|
|
|
sector_t csize;
|
2010-06-01 17:37:33 +08:00
|
|
|
int err;
|
2005-06-22 08:17:14 +08:00
|
|
|
|
2010-06-01 17:37:33 +08:00
|
|
|
err = bitmap_checkpage(bitmap, page, create);
|
|
|
|
|
|
|
|
if (bitmap->bp[page].hijacked ||
|
|
|
|
bitmap->bp[page].map == NULL)
|
|
|
|
csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap) +
|
|
|
|
PAGE_COUNTER_SHIFT - 1);
|
|
|
|
else
|
2005-06-22 08:17:14 +08:00
|
|
|
csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap));
|
2010-06-01 17:37:33 +08:00
|
|
|
*blocks = csize - (offset & (csize - 1));
|
|
|
|
|
|
|
|
if (err < 0)
|
2005-06-22 08:17:14 +08:00
|
|
|
return NULL;
|
2010-06-01 17:37:33 +08:00
|
|
|
|
2005-06-22 08:17:14 +08:00
|
|
|
/* now locked ... */
|
|
|
|
|
|
|
|
if (bitmap->bp[page].hijacked) { /* hijacked pointer */
|
|
|
|
/* should we use the first or second counter field
|
|
|
|
* of the hijacked pointer? */
|
|
|
|
int hi = (pageoff > PAGE_COUNTER_MASK);
|
|
|
|
return &((bitmap_counter_t *)
|
|
|
|
&bitmap->bp[page].map)[hi];
|
2010-06-01 17:37:33 +08:00
|
|
|
} else /* page is allocated */
|
2005-06-22 08:17:14 +08:00
|
|
|
return (bitmap_counter_t *)
|
|
|
|
&(bitmap->bp[page].map[pageoff]);
|
|
|
|
}
|
|
|
|
|
2005-09-10 07:23:47 +08:00
|
|
|
int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind)
|
2005-06-22 08:17:14 +08:00
|
|
|
{
|
2010-06-01 17:37:31 +08:00
|
|
|
if (!bitmap)
|
|
|
|
return 0;
|
2005-09-10 07:23:47 +08:00
|
|
|
|
|
|
|
if (behind) {
|
2010-03-08 13:02:37 +08:00
|
|
|
int bw;
|
2005-09-10 07:23:47 +08:00
|
|
|
atomic_inc(&bitmap->behind_writes);
|
2010-03-08 13:02:37 +08:00
|
|
|
bw = atomic_read(&bitmap->behind_writes);
|
|
|
|
if (bw > bitmap->behind_writes_used)
|
|
|
|
bitmap->behind_writes_used = bw;
|
|
|
|
|
2011-10-07 11:23:17 +08:00
|
|
|
pr_debug("inc write-behind count %d/%lu\n",
|
|
|
|
bw, bitmap->mddev->bitmap_info.max_write_behind);
|
2005-09-10 07:23:47 +08:00
|
|
|
}
|
|
|
|
|
2005-06-22 08:17:14 +08:00
|
|
|
while (sectors) {
|
2010-10-19 07:03:39 +08:00
|
|
|
sector_t blocks;
|
2005-06-22 08:17:14 +08:00
|
|
|
bitmap_counter_t *bmc;
|
|
|
|
|
|
|
|
spin_lock_irq(&bitmap->lock);
|
|
|
|
bmc = bitmap_get_counter(bitmap, offset, &blocks, 1);
|
|
|
|
if (!bmc) {
|
|
|
|
spin_unlock_irq(&bitmap->lock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-06-09 09:42:57 +08:00
|
|
|
if (unlikely(COUNTER(*bmc) == COUNTER_MAX)) {
|
2007-02-09 06:20:37 +08:00
|
|
|
DEFINE_WAIT(__wait);
|
|
|
|
/* note that it is safe to do the prepare_to_wait
|
|
|
|
* after the test as long as we do it before dropping
|
|
|
|
* the spinlock.
|
|
|
|
*/
|
|
|
|
prepare_to_wait(&bitmap->overflow_wait, &__wait,
|
|
|
|
TASK_UNINTERRUPTIBLE);
|
|
|
|
spin_unlock_irq(&bitmap->lock);
|
2011-03-10 15:52:07 +08:00
|
|
|
io_schedule();
|
2007-02-09 06:20:37 +08:00
|
|
|
finish_wait(&bitmap->overflow_wait, &__wait);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2010-06-01 17:37:31 +08:00
|
|
|
switch (*bmc) {
|
2005-06-22 08:17:14 +08:00
|
|
|
case 0:
|
|
|
|
bitmap_file_set_bit(bitmap, offset);
|
2010-06-01 17:37:31 +08:00
|
|
|
bitmap_count_page(bitmap, offset, 1);
|
2005-06-22 08:17:14 +08:00
|
|
|
/* fall through */
|
|
|
|
case 1:
|
|
|
|
*bmc = 2;
|
|
|
|
}
|
2007-02-09 06:20:37 +08:00
|
|
|
|
2005-06-22 08:17:14 +08:00
|
|
|
(*bmc)++;
|
|
|
|
|
|
|
|
spin_unlock_irq(&bitmap->lock);
|
|
|
|
|
|
|
|
offset += blocks;
|
|
|
|
if (sectors > blocks)
|
|
|
|
sectors -= blocks;
|
2010-06-01 17:37:31 +08:00
|
|
|
else
|
|
|
|
sectors = 0;
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2010-06-01 17:37:31 +08:00
|
|
|
EXPORT_SYMBOL(bitmap_startwrite);
|
2005-06-22 08:17:14 +08:00
|
|
|
|
|
|
|
void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors,
|
2005-09-10 07:23:47 +08:00
|
|
|
int success, int behind)
|
2005-06-22 08:17:14 +08:00
|
|
|
{
|
2010-06-01 17:37:31 +08:00
|
|
|
if (!bitmap)
|
|
|
|
return;
|
2005-09-10 07:23:47 +08:00
|
|
|
if (behind) {
|
2010-03-31 08:21:44 +08:00
|
|
|
if (atomic_dec_and_test(&bitmap->behind_writes))
|
|
|
|
wake_up(&bitmap->behind_wait);
|
2011-10-07 11:23:17 +08:00
|
|
|
pr_debug("dec write-behind count %d/%lu\n",
|
|
|
|
atomic_read(&bitmap->behind_writes),
|
|
|
|
bitmap->mddev->bitmap_info.max_write_behind);
|
2005-09-10 07:23:47 +08:00
|
|
|
}
|
|
|
|
|
2005-06-22 08:17:14 +08:00
|
|
|
while (sectors) {
|
2010-10-19 07:03:39 +08:00
|
|
|
sector_t blocks;
|
2005-06-22 08:17:14 +08:00
|
|
|
unsigned long flags;
|
|
|
|
bitmap_counter_t *bmc;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&bitmap->lock, flags);
|
|
|
|
bmc = bitmap_get_counter(bitmap, offset, &blocks, 0);
|
|
|
|
if (!bmc) {
|
|
|
|
spin_unlock_irqrestore(&bitmap->lock, flags);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-12-23 06:57:48 +08:00
|
|
|
if (success && !bitmap->mddev->degraded &&
|
2008-06-28 06:31:22 +08:00
|
|
|
bitmap->events_cleared < bitmap->mddev->events) {
|
|
|
|
bitmap->events_cleared = bitmap->mddev->events;
|
|
|
|
bitmap->need_sync = 1;
|
2010-06-01 17:37:32 +08:00
|
|
|
sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
|
2008-06-28 06:31:22 +08:00
|
|
|
}
|
|
|
|
|
2011-06-09 09:42:57 +08:00
|
|
|
if (!success && !NEEDED(*bmc))
|
2005-06-22 08:17:14 +08:00
|
|
|
*bmc |= NEEDED_MASK;
|
|
|
|
|
2011-06-09 09:42:57 +08:00
|
|
|
if (COUNTER(*bmc) == COUNTER_MAX)
|
2007-02-09 06:20:37 +08:00
|
|
|
wake_up(&bitmap->overflow_wait);
|
|
|
|
|
2005-06-22 08:17:14 +08:00
|
|
|
(*bmc)--;
|
2011-09-21 13:37:46 +08:00
|
|
|
if (*bmc <= 2) {
|
2005-06-22 08:17:14 +08:00
|
|
|
set_page_attr(bitmap,
|
2010-06-01 17:37:34 +08:00
|
|
|
filemap_get_page(
|
|
|
|
bitmap,
|
|
|
|
offset >> CHUNK_BLOCK_SHIFT(bitmap)),
|
2011-09-21 13:37:46 +08:00
|
|
|
BITMAP_PAGE_PENDING);
|
2011-09-21 13:37:46 +08:00
|
|
|
bitmap->allclean = 0;
|
|
|
|
}
|
2005-06-22 08:17:14 +08:00
|
|
|
spin_unlock_irqrestore(&bitmap->lock, flags);
|
|
|
|
offset += blocks;
|
|
|
|
if (sectors > blocks)
|
|
|
|
sectors -= blocks;
|
2010-06-01 17:37:31 +08:00
|
|
|
else
|
|
|
|
sectors = 0;
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
|
|
|
}
|
2010-06-01 17:37:31 +08:00
|
|
|
EXPORT_SYMBOL(bitmap_endwrite);
|
2005-06-22 08:17:14 +08:00
|
|
|
|
2010-10-19 07:03:39 +08:00
|
|
|
static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
|
2009-03-31 11:27:02 +08:00
|
|
|
int degraded)
|
2005-06-22 08:17:14 +08:00
|
|
|
{
|
|
|
|
bitmap_counter_t *bmc;
|
|
|
|
int rv;
|
|
|
|
if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */
|
|
|
|
*blocks = 1024;
|
|
|
|
return 1; /* always resync if no bitmap */
|
|
|
|
}
|
|
|
|
spin_lock_irq(&bitmap->lock);
|
|
|
|
bmc = bitmap_get_counter(bitmap, offset, blocks, 0);
|
|
|
|
rv = 0;
|
|
|
|
if (bmc) {
|
|
|
|
/* locked */
|
|
|
|
if (RESYNC(*bmc))
|
|
|
|
rv = 1;
|
|
|
|
else if (NEEDED(*bmc)) {
|
|
|
|
rv = 1;
|
2005-07-15 18:56:35 +08:00
|
|
|
if (!degraded) { /* don't set/clear bits if degraded */
|
|
|
|
*bmc |= RESYNC_MASK;
|
|
|
|
*bmc &= ~NEEDED_MASK;
|
|
|
|
}
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
spin_unlock_irq(&bitmap->lock);
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
2010-10-19 07:03:39 +08:00
|
|
|
int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
|
2009-03-31 11:27:02 +08:00
|
|
|
int degraded)
|
|
|
|
{
|
|
|
|
/* bitmap_start_sync must always report on multiples of whole
|
|
|
|
* pages, otherwise resync (which is very PAGE_SIZE based) will
|
|
|
|
* get confused.
|
|
|
|
* So call __bitmap_start_sync repeatedly (if needed) until
|
|
|
|
* At least PAGE_SIZE>>9 blocks are covered.
|
|
|
|
* Return the 'or' of the result.
|
|
|
|
*/
|
|
|
|
int rv = 0;
|
2010-10-19 07:03:39 +08:00
|
|
|
sector_t blocks1;
|
2009-03-31 11:27:02 +08:00
|
|
|
|
|
|
|
*blocks = 0;
|
|
|
|
while (*blocks < (PAGE_SIZE>>9)) {
|
|
|
|
rv |= __bitmap_start_sync(bitmap, offset,
|
|
|
|
&blocks1, degraded);
|
|
|
|
offset += blocks1;
|
|
|
|
*blocks += blocks1;
|
|
|
|
}
|
|
|
|
return rv;
|
|
|
|
}
|
2010-06-01 17:37:31 +08:00
|
|
|
EXPORT_SYMBOL(bitmap_start_sync);
|
2009-03-31 11:27:02 +08:00
|
|
|
|
2010-10-19 07:03:39 +08:00
|
|
|
void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted)
|
2005-06-22 08:17:14 +08:00
|
|
|
{
|
|
|
|
bitmap_counter_t *bmc;
|
|
|
|
unsigned long flags;
|
2010-06-01 17:37:31 +08:00
|
|
|
|
|
|
|
if (bitmap == NULL) {
|
2005-06-22 08:17:14 +08:00
|
|
|
*blocks = 1024;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
spin_lock_irqsave(&bitmap->lock, flags);
|
|
|
|
bmc = bitmap_get_counter(bitmap, offset, blocks, 0);
|
|
|
|
if (bmc == NULL)
|
|
|
|
goto unlock;
|
|
|
|
/* locked */
|
|
|
|
if (RESYNC(*bmc)) {
|
|
|
|
*bmc &= ~RESYNC_MASK;
|
|
|
|
|
|
|
|
if (!NEEDED(*bmc) && aborted)
|
|
|
|
*bmc |= NEEDED_MASK;
|
|
|
|
else {
|
2011-09-21 13:37:46 +08:00
|
|
|
if (*bmc <= 2) {
|
2005-06-22 08:17:14 +08:00
|
|
|
set_page_attr(bitmap,
|
|
|
|
filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)),
|
2011-09-21 13:37:46 +08:00
|
|
|
BITMAP_PAGE_PENDING);
|
2011-09-21 13:37:46 +08:00
|
|
|
bitmap->allclean = 0;
|
|
|
|
}
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
unlock:
|
|
|
|
spin_unlock_irqrestore(&bitmap->lock, flags);
|
|
|
|
}
|
2010-06-01 17:37:31 +08:00
|
|
|
EXPORT_SYMBOL(bitmap_end_sync);
|
2005-06-22 08:17:14 +08:00
|
|
|
|
|
|
|
void bitmap_close_sync(struct bitmap *bitmap)
|
|
|
|
{
|
|
|
|
/* Sync has finished, and any bitmap chunks that weren't synced
|
|
|
|
* properly have been aborted. It remains to us to clear the
|
|
|
|
* RESYNC bit wherever it is still on
|
|
|
|
*/
|
|
|
|
sector_t sector = 0;
|
2010-10-19 07:03:39 +08:00
|
|
|
sector_t blocks;
|
2008-02-06 17:39:50 +08:00
|
|
|
if (!bitmap)
|
|
|
|
return;
|
2005-06-22 08:17:14 +08:00
|
|
|
while (sector < bitmap->mddev->resync_max_sectors) {
|
|
|
|
bitmap_end_sync(bitmap, sector, &blocks, 0);
|
2008-02-06 17:39:50 +08:00
|
|
|
sector += blocks;
|
|
|
|
}
|
|
|
|
}
|
2010-06-01 17:37:31 +08:00
|
|
|
EXPORT_SYMBOL(bitmap_close_sync);
|
2008-02-06 17:39:50 +08:00
|
|
|
|
|
|
|
void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
|
|
|
|
{
|
|
|
|
sector_t s = 0;
|
2010-10-19 07:03:39 +08:00
|
|
|
sector_t blocks;
|
2008-02-06 17:39:50 +08:00
|
|
|
|
|
|
|
if (!bitmap)
|
|
|
|
return;
|
|
|
|
if (sector == 0) {
|
|
|
|
bitmap->last_end_sync = jiffies;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (time_before(jiffies, (bitmap->last_end_sync
|
2009-12-14 09:49:53 +08:00
|
|
|
+ bitmap->mddev->bitmap_info.daemon_sleep)))
|
2008-02-06 17:39:50 +08:00
|
|
|
return;
|
|
|
|
wait_event(bitmap->mddev->recovery_wait,
|
|
|
|
atomic_read(&bitmap->mddev->recovery_active) == 0);
|
|
|
|
|
2011-01-14 06:14:34 +08:00
|
|
|
bitmap->mddev->curr_resync_completed = sector;
|
2010-08-30 15:33:34 +08:00
|
|
|
set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
|
2008-02-06 17:39:50 +08:00
|
|
|
sector &= ~((1ULL << CHUNK_BLOCK_SHIFT(bitmap)) - 1);
|
|
|
|
s = 0;
|
|
|
|
while (s < sector && s < bitmap->mddev->resync_max_sectors) {
|
|
|
|
bitmap_end_sync(bitmap, s, &blocks, 0);
|
|
|
|
s += blocks;
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
2008-02-06 17:39:50 +08:00
|
|
|
bitmap->last_end_sync = jiffies;
|
2009-04-14 14:28:34 +08:00
|
|
|
sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed");
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
2010-06-01 17:37:31 +08:00
|
|
|
EXPORT_SYMBOL(bitmap_cond_end_sync);
|
2005-06-22 08:17:14 +08:00
|
|
|
|
2005-09-10 07:23:44 +08:00
|
|
|
static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
|
2005-06-22 08:17:14 +08:00
|
|
|
{
|
|
|
|
/* For each chunk covered by any of these sectors, set the
|
2005-08-05 03:53:33 +08:00
|
|
|
* counter to 1 and set resync_needed. They should all
|
2005-06-22 08:17:14 +08:00
|
|
|
* be 0 at this point
|
|
|
|
*/
|
2005-08-05 03:53:33 +08:00
|
|
|
|
2010-10-19 07:03:39 +08:00
|
|
|
sector_t secs;
|
2005-08-05 03:53:33 +08:00
|
|
|
bitmap_counter_t *bmc;
|
|
|
|
spin_lock_irq(&bitmap->lock);
|
|
|
|
bmc = bitmap_get_counter(bitmap, offset, &secs, 1);
|
|
|
|
if (!bmc) {
|
2005-06-22 08:17:14 +08:00
|
|
|
spin_unlock_irq(&bitmap->lock);
|
2005-08-05 03:53:33 +08:00
|
|
|
return;
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
2010-06-01 17:37:31 +08:00
|
|
|
if (!*bmc) {
|
2005-08-05 03:53:33 +08:00
|
|
|
struct page *page;
|
2011-12-23 07:17:51 +08:00
|
|
|
*bmc = 2 | (needed ? NEEDED_MASK : 0);
|
2005-08-05 03:53:33 +08:00
|
|
|
bitmap_count_page(bitmap, offset, 1);
|
|
|
|
page = filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap));
|
2011-09-21 13:37:46 +08:00
|
|
|
set_page_attr(bitmap, page, BITMAP_PAGE_PENDING);
|
2011-09-21 13:37:46 +08:00
|
|
|
bitmap->allclean = 0;
|
2005-08-05 03:53:33 +08:00
|
|
|
}
|
|
|
|
spin_unlock_irq(&bitmap->lock);
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
|
|
|
|
2006-10-03 16:15:49 +08:00
|
|
|
/* dirty the memory and file bits for bitmap chunks "s" to "e" */
|
|
|
|
void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
|
|
|
|
{
|
|
|
|
unsigned long chunk;
|
|
|
|
|
|
|
|
for (chunk = s; chunk <= e; chunk++) {
|
2009-05-07 10:49:06 +08:00
|
|
|
sector_t sec = (sector_t)chunk << CHUNK_BLOCK_SHIFT(bitmap);
|
2006-10-03 16:15:49 +08:00
|
|
|
bitmap_set_memory_bits(bitmap, sec, 1);
|
2011-11-23 07:18:52 +08:00
|
|
|
spin_lock_irq(&bitmap->lock);
|
2006-10-03 16:15:49 +08:00
|
|
|
bitmap_file_set_bit(bitmap, sec);
|
2011-11-23 07:18:52 +08:00
|
|
|
spin_unlock_irq(&bitmap->lock);
|
2009-12-14 09:49:56 +08:00
|
|
|
if (sec < bitmap->mddev->recovery_cp)
|
|
|
|
/* We are asserting that the array is dirty,
|
|
|
|
* so move the recovery_cp address back so
|
|
|
|
* that it is obvious that it is dirty
|
|
|
|
*/
|
|
|
|
bitmap->mddev->recovery_cp = sec;
|
2006-10-03 16:15:49 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-08-05 03:53:35 +08:00
|
|
|
/*
|
|
|
|
* flush out any pending updates
|
|
|
|
*/
|
2011-10-11 13:47:53 +08:00
|
|
|
void bitmap_flush(struct mddev *mddev)
|
2005-08-05 03:53:35 +08:00
|
|
|
{
|
|
|
|
struct bitmap *bitmap = mddev->bitmap;
|
2009-12-14 09:49:53 +08:00
|
|
|
long sleep;
|
2005-08-05 03:53:35 +08:00
|
|
|
|
|
|
|
if (!bitmap) /* there was no bitmap */
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* run the daemon_work three time to ensure everything is flushed
|
|
|
|
* that can be
|
|
|
|
*/
|
2009-12-14 09:49:53 +08:00
|
|
|
sleep = mddev->bitmap_info.daemon_sleep * 2;
|
2009-12-14 09:49:53 +08:00
|
|
|
bitmap->daemon_lastrun -= sleep;
|
2009-12-14 09:49:46 +08:00
|
|
|
bitmap_daemon_work(mddev);
|
2009-12-14 09:49:53 +08:00
|
|
|
bitmap->daemon_lastrun -= sleep;
|
2009-12-14 09:49:46 +08:00
|
|
|
bitmap_daemon_work(mddev);
|
2009-12-14 09:49:53 +08:00
|
|
|
bitmap->daemon_lastrun -= sleep;
|
2009-12-14 09:49:46 +08:00
|
|
|
bitmap_daemon_work(mddev);
|
2005-08-05 03:53:35 +08:00
|
|
|
bitmap_update_sb(bitmap);
|
|
|
|
}
|
|
|
|
|
2005-06-22 08:17:14 +08:00
|
|
|
/*
|
|
|
|
* free memory that was allocated
|
|
|
|
*/
|
2005-09-10 07:23:50 +08:00
|
|
|
static void bitmap_free(struct bitmap *bitmap)
|
2005-06-22 08:17:14 +08:00
|
|
|
{
|
|
|
|
unsigned long k, pages;
|
|
|
|
struct bitmap_page *bp;
|
|
|
|
|
|
|
|
if (!bitmap) /* there was no bitmap */
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* release the bitmap file and kill the daemon */
|
|
|
|
bitmap_file_put(bitmap);
|
|
|
|
|
|
|
|
bp = bitmap->bp;
|
|
|
|
pages = bitmap->pages;
|
|
|
|
|
|
|
|
/* free all allocated memory */
|
|
|
|
|
|
|
|
if (bp) /* deallocate the page memory */
|
|
|
|
for (k = 0; k < pages; k++)
|
|
|
|
if (bp[k].map && !bp[k].hijacked)
|
|
|
|
kfree(bp[k].map);
|
|
|
|
kfree(bp);
|
|
|
|
kfree(bitmap);
|
|
|
|
}
|
2009-12-14 09:49:46 +08:00
|
|
|
|
2011-10-11 13:47:53 +08:00
|
|
|
void bitmap_destroy(struct mddev *mddev)
|
2005-09-10 07:23:50 +08:00
|
|
|
{
|
|
|
|
struct bitmap *bitmap = mddev->bitmap;
|
|
|
|
|
|
|
|
if (!bitmap) /* there was no bitmap */
|
|
|
|
return;
|
|
|
|
|
2009-12-14 09:49:52 +08:00
|
|
|
mutex_lock(&mddev->bitmap_info.mutex);
|
2005-09-10 07:23:50 +08:00
|
|
|
mddev->bitmap = NULL; /* disconnect from the md device */
|
2009-12-14 09:49:52 +08:00
|
|
|
mutex_unlock(&mddev->bitmap_info.mutex);
|
2006-01-06 16:20:16 +08:00
|
|
|
if (mddev->thread)
|
|
|
|
mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
|
2005-09-10 07:23:50 +08:00
|
|
|
|
2009-12-14 09:49:56 +08:00
|
|
|
if (bitmap->sysfs_can_clear)
|
|
|
|
sysfs_put(bitmap->sysfs_can_clear);
|
|
|
|
|
2005-09-10 07:23:50 +08:00
|
|
|
bitmap_free(bitmap);
|
|
|
|
}
|
2005-06-22 08:17:14 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* initialize the bitmap structure
|
|
|
|
* if this returns an error, bitmap_destroy must be called to do clean up
|
|
|
|
*/
|
2011-10-11 13:47:53 +08:00
|
|
|
int bitmap_create(struct mddev *mddev)
|
2005-06-22 08:17:14 +08:00
|
|
|
{
|
|
|
|
struct bitmap *bitmap;
|
2009-04-20 09:50:24 +08:00
|
|
|
sector_t blocks = mddev->resync_max_sectors;
|
2005-06-22 08:17:14 +08:00
|
|
|
unsigned long chunks;
|
|
|
|
unsigned long pages;
|
2009-12-14 09:49:52 +08:00
|
|
|
struct file *file = mddev->bitmap_info.file;
|
2005-06-22 08:17:14 +08:00
|
|
|
int err;
|
2010-06-01 17:37:32 +08:00
|
|
|
struct sysfs_dirent *bm = NULL;
|
2005-06-22 08:17:14 +08:00
|
|
|
|
2006-10-11 16:22:26 +08:00
|
|
|
BUILD_BUG_ON(sizeof(bitmap_super_t) != 256);
|
2005-06-22 08:17:14 +08:00
|
|
|
|
2010-06-01 17:37:34 +08:00
|
|
|
if (!file
|
2011-07-27 09:00:37 +08:00
|
|
|
&& !mddev->bitmap_info.offset) /* bitmap disabled, nothing to do */
|
2005-06-22 08:17:14 +08:00
|
|
|
return 0;
|
|
|
|
|
2009-12-14 09:49:52 +08:00
|
|
|
BUG_ON(file && mddev->bitmap_info.offset);
|
2005-06-22 08:17:27 +08:00
|
|
|
|
2006-01-06 16:20:32 +08:00
|
|
|
bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
|
2005-06-22 08:17:14 +08:00
|
|
|
if (!bitmap)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
spin_lock_init(&bitmap->lock);
|
2006-06-26 15:27:49 +08:00
|
|
|
atomic_set(&bitmap->pending_writes, 0);
|
|
|
|
init_waitqueue_head(&bitmap->write_wait);
|
2007-02-09 06:20:37 +08:00
|
|
|
init_waitqueue_head(&bitmap->overflow_wait);
|
2010-03-31 08:21:44 +08:00
|
|
|
init_waitqueue_head(&bitmap->behind_wait);
|
2006-06-26 15:27:49 +08:00
|
|
|
|
2005-06-22 08:17:14 +08:00
|
|
|
bitmap->mddev = mddev;
|
|
|
|
|
2010-06-01 17:37:32 +08:00
|
|
|
if (mddev->kobj.sd)
|
|
|
|
bm = sysfs_get_dirent(mddev->kobj.sd, NULL, "bitmap");
|
2009-12-14 09:49:56 +08:00
|
|
|
if (bm) {
|
2010-03-31 02:31:26 +08:00
|
|
|
bitmap->sysfs_can_clear = sysfs_get_dirent(bm, NULL, "can_clear");
|
2009-12-14 09:49:56 +08:00
|
|
|
sysfs_put(bm);
|
|
|
|
} else
|
|
|
|
bitmap->sysfs_can_clear = NULL;
|
|
|
|
|
2005-06-22 08:17:14 +08:00
|
|
|
bitmap->file = file;
|
2006-06-26 15:27:49 +08:00
|
|
|
if (file) {
|
|
|
|
get_file(file);
|
2009-10-16 12:56:01 +08:00
|
|
|
/* As future accesses to this file will use bmap,
|
|
|
|
* and bypass the page cache, we must sync the file
|
|
|
|
* first.
|
|
|
|
*/
|
2010-03-23 00:32:25 +08:00
|
|
|
vfs_fsync(file, 1);
|
2006-06-26 15:27:49 +08:00
|
|
|
}
|
2009-12-14 09:49:53 +08:00
|
|
|
/* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */
|
2011-06-09 06:59:30 +08:00
|
|
|
if (!mddev->bitmap_info.external) {
|
|
|
|
/*
|
|
|
|
* If 'MD_ARRAY_FIRST_USE' is set, then device-mapper is
|
|
|
|
* instructing us to create a new on-disk bitmap instance.
|
|
|
|
*/
|
|
|
|
if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags))
|
|
|
|
err = bitmap_new_disk_sb(bitmap);
|
|
|
|
else
|
|
|
|
err = bitmap_read_sb(bitmap);
|
|
|
|
} else {
|
2009-12-14 09:49:56 +08:00
|
|
|
err = 0;
|
|
|
|
if (mddev->bitmap_info.chunksize == 0 ||
|
|
|
|
mddev->bitmap_info.daemon_sleep == 0)
|
|
|
|
/* chunksize and time_base need to be
|
|
|
|
* set first. */
|
|
|
|
err = -EINVAL;
|
|
|
|
}
|
2005-06-22 08:17:14 +08:00
|
|
|
if (err)
|
2005-09-10 07:23:50 +08:00
|
|
|
goto error;
|
2005-06-22 08:17:14 +08:00
|
|
|
|
2009-12-14 09:49:56 +08:00
|
|
|
bitmap->daemon_lastrun = jiffies;
|
2009-12-14 09:49:53 +08:00
|
|
|
bitmap->chunkshift = ffz(~mddev->bitmap_info.chunksize);
|
2005-06-22 08:17:14 +08:00
|
|
|
|
|
|
|
/* now that chunksize and chunkshift are set, we can use these macros */
|
2010-06-01 17:37:31 +08:00
|
|
|
chunks = (blocks + CHUNK_BLOCK_RATIO(bitmap) - 1) >>
|
2009-04-20 09:50:24 +08:00
|
|
|
CHUNK_BLOCK_SHIFT(bitmap);
|
2010-06-01 17:37:31 +08:00
|
|
|
pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO;
|
2005-06-22 08:17:14 +08:00
|
|
|
|
|
|
|
BUG_ON(!pages);
|
|
|
|
|
|
|
|
bitmap->chunks = chunks;
|
|
|
|
bitmap->pages = pages;
|
|
|
|
bitmap->missing_pages = pages;
|
|
|
|
|
2006-01-06 16:20:32 +08:00
|
|
|
bitmap->bp = kzalloc(pages * sizeof(*bitmap->bp), GFP_KERNEL);
|
2011-10-11 13:49:56 +08:00
|
|
|
|
2005-09-10 07:23:50 +08:00
|
|
|
err = -ENOMEM;
|
2005-06-22 08:17:14 +08:00
|
|
|
if (!bitmap->bp)
|
2005-09-10 07:23:50 +08:00
|
|
|
goto error;
|
2005-06-22 08:17:14 +08:00
|
|
|
|
2010-06-01 17:37:35 +08:00
|
|
|
printk(KERN_INFO "created bitmap (%lu pages) for device %s\n",
|
|
|
|
pages, bmname(bitmap));
|
|
|
|
|
|
|
|
mddev->bitmap = bitmap;
|
|
|
|
|
|
|
|
|
|
|
|
return (bitmap->flags & BITMAP_WRITE_ERROR) ? -EIO : 0;
|
|
|
|
|
|
|
|
error:
|
|
|
|
bitmap_free(bitmap);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2011-10-11 13:47:53 +08:00
|
|
|
int bitmap_load(struct mddev *mddev)
|
2010-06-01 17:37:35 +08:00
|
|
|
{
|
|
|
|
int err = 0;
|
2011-07-27 09:00:37 +08:00
|
|
|
sector_t start = 0;
|
2010-06-01 17:37:35 +08:00
|
|
|
sector_t sector = 0;
|
|
|
|
struct bitmap *bitmap = mddev->bitmap;
|
|
|
|
|
|
|
|
if (!bitmap)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* Clear out old bitmap info first: Either there is none, or we
|
|
|
|
* are resuming after someone else has possibly changed things,
|
|
|
|
* so we should forget old cached info.
|
|
|
|
* All chunks should be clean, but some might need_sync.
|
|
|
|
*/
|
|
|
|
while (sector < mddev->resync_max_sectors) {
|
2010-10-19 07:03:39 +08:00
|
|
|
sector_t blocks;
|
2010-06-01 17:37:35 +08:00
|
|
|
bitmap_start_sync(bitmap, sector, &blocks, 0);
|
|
|
|
sector += blocks;
|
|
|
|
}
|
|
|
|
bitmap_close_sync(bitmap);
|
|
|
|
|
2011-07-27 09:00:37 +08:00
|
|
|
if (mddev->degraded == 0
|
|
|
|
|| bitmap->events_cleared == mddev->events)
|
|
|
|
/* no need to keep dirty bits to optimise a
|
|
|
|
* re-add of a missing device */
|
|
|
|
start = mddev->recovery_cp;
|
|
|
|
|
|
|
|
err = bitmap_init_from_disk(bitmap, start);
|
|
|
|
|
2005-06-22 08:17:14 +08:00
|
|
|
if (err)
|
2010-06-01 17:37:35 +08:00
|
|
|
goto out;
|
2005-09-10 07:23:50 +08:00
|
|
|
|
2009-12-14 09:49:53 +08:00
|
|
|
mddev->thread->timeout = mddev->bitmap_info.daemon_sleep;
|
2009-12-14 09:49:54 +08:00
|
|
|
md_wakeup_thread(mddev->thread);
|
2006-01-06 16:20:16 +08:00
|
|
|
|
2007-07-17 19:06:13 +08:00
|
|
|
bitmap_update_sb(bitmap);
|
|
|
|
|
2010-06-01 17:37:35 +08:00
|
|
|
if (bitmap->flags & BITMAP_WRITE_ERROR)
|
|
|
|
err = -EIO;
|
|
|
|
out:
|
2005-09-10 07:23:50 +08:00
|
|
|
return err;
|
2005-06-22 08:17:14 +08:00
|
|
|
}
|
2010-06-01 17:37:35 +08:00
|
|
|
EXPORT_SYMBOL_GPL(bitmap_load);
|
2005-06-22 08:17:14 +08:00
|
|
|
|
2009-12-14 09:49:55 +08:00
|
|
|
static ssize_t
|
2011-10-11 13:47:53 +08:00
|
|
|
location_show(struct mddev *mddev, char *page)
|
2009-12-14 09:49:55 +08:00
|
|
|
{
|
|
|
|
ssize_t len;
|
2010-06-01 17:37:31 +08:00
|
|
|
if (mddev->bitmap_info.file)
|
2009-12-14 09:49:55 +08:00
|
|
|
len = sprintf(page, "file");
|
2010-06-01 17:37:31 +08:00
|
|
|
else if (mddev->bitmap_info.offset)
|
2009-12-14 09:49:55 +08:00
|
|
|
len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset);
|
2010-06-01 17:37:31 +08:00
|
|
|
else
|
2009-12-14 09:49:55 +08:00
|
|
|
len = sprintf(page, "none");
|
|
|
|
len += sprintf(page+len, "\n");
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
2011-10-11 13:47:53 +08:00
|
|
|
location_store(struct mddev *mddev, const char *buf, size_t len)
|
2009-12-14 09:49:55 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
if (mddev->pers) {
|
|
|
|
if (!mddev->pers->quiesce)
|
|
|
|
return -EBUSY;
|
|
|
|
if (mddev->recovery || mddev->sync_thread)
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mddev->bitmap || mddev->bitmap_info.file ||
|
|
|
|
mddev->bitmap_info.offset) {
|
|
|
|
/* bitmap already configured. Only option is to clear it */
|
|
|
|
if (strncmp(buf, "none", 4) != 0)
|
|
|
|
return -EBUSY;
|
|
|
|
if (mddev->pers) {
|
|
|
|
mddev->pers->quiesce(mddev, 1);
|
|
|
|
bitmap_destroy(mddev);
|
|
|
|
mddev->pers->quiesce(mddev, 0);
|
|
|
|
}
|
|
|
|
mddev->bitmap_info.offset = 0;
|
|
|
|
if (mddev->bitmap_info.file) {
|
|
|
|
struct file *f = mddev->bitmap_info.file;
|
|
|
|
mddev->bitmap_info.file = NULL;
|
|
|
|
restore_bitmap_write_access(f);
|
|
|
|
fput(f);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* No bitmap, OK to set a location */
|
|
|
|
long long offset;
|
|
|
|
if (strncmp(buf, "none", 4) == 0)
|
|
|
|
/* nothing to be done */;
|
|
|
|
else if (strncmp(buf, "file:", 5) == 0) {
|
|
|
|
/* Not supported yet */
|
|
|
|
return -EINVAL;
|
|
|
|
} else {
|
|
|
|
int rv;
|
|
|
|
if (buf[0] == '+')
|
|
|
|
rv = strict_strtoll(buf+1, 10, &offset);
|
|
|
|
else
|
|
|
|
rv = strict_strtoll(buf, 10, &offset);
|
|
|
|
if (rv)
|
|
|
|
return rv;
|
|
|
|
if (offset == 0)
|
|
|
|
return -EINVAL;
|
2009-12-14 09:49:56 +08:00
|
|
|
if (mddev->bitmap_info.external == 0 &&
|
|
|
|
mddev->major_version == 0 &&
|
2009-12-14 09:49:55 +08:00
|
|
|
offset != mddev->bitmap_info.default_offset)
|
|
|
|
return -EINVAL;
|
|
|
|
mddev->bitmap_info.offset = offset;
|
|
|
|
if (mddev->pers) {
|
|
|
|
mddev->pers->quiesce(mddev, 1);
|
|
|
|
rv = bitmap_create(mddev);
|
|
|
|
if (rv) {
|
|
|
|
bitmap_destroy(mddev);
|
|
|
|
mddev->bitmap_info.offset = 0;
|
|
|
|
}
|
|
|
|
mddev->pers->quiesce(mddev, 0);
|
|
|
|
if (rv)
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!mddev->external) {
|
|
|
|
/* Ensure new bitmap info is stored in
|
|
|
|
* metadata promptly.
|
|
|
|
*/
|
|
|
|
set_bit(MD_CHANGE_DEVS, &mddev->flags);
|
|
|
|
md_wakeup_thread(mddev->thread);
|
|
|
|
}
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct md_sysfs_entry bitmap_location =
|
|
|
|
__ATTR(location, S_IRUGO|S_IWUSR, location_show, location_store);
|
|
|
|
|
|
|
|
static ssize_t
|
2011-10-11 13:47:53 +08:00
|
|
|
timeout_show(struct mddev *mddev, char *page)
|
2009-12-14 09:49:55 +08:00
|
|
|
{
|
|
|
|
ssize_t len;
|
|
|
|
unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ;
|
|
|
|
unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ;
|
2010-06-01 17:37:31 +08:00
|
|
|
|
2009-12-14 09:49:55 +08:00
|
|
|
len = sprintf(page, "%lu", secs);
|
|
|
|
if (jifs)
|
|
|
|
len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs));
|
|
|
|
len += sprintf(page+len, "\n");
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
2011-10-11 13:47:53 +08:00
|
|
|
timeout_store(struct mddev *mddev, const char *buf, size_t len)
|
2009-12-14 09:49:55 +08:00
|
|
|
{
|
|
|
|
/* timeout can be set at any time */
|
|
|
|
unsigned long timeout;
|
|
|
|
int rv = strict_strtoul_scaled(buf, &timeout, 4);
|
|
|
|
if (rv)
|
|
|
|
return rv;
|
|
|
|
|
|
|
|
/* just to make sure we don't overflow... */
|
|
|
|
if (timeout >= LONG_MAX / HZ)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
timeout = timeout * HZ / 10000;
|
|
|
|
|
|
|
|
if (timeout >= MAX_SCHEDULE_TIMEOUT)
|
|
|
|
timeout = MAX_SCHEDULE_TIMEOUT-1;
|
|
|
|
if (timeout < 1)
|
|
|
|
timeout = 1;
|
|
|
|
mddev->bitmap_info.daemon_sleep = timeout;
|
|
|
|
if (mddev->thread) {
|
|
|
|
/* if thread->timeout is MAX_SCHEDULE_TIMEOUT, then
|
|
|
|
* the bitmap is all clean and we don't need to
|
|
|
|
* adjust the timeout right now
|
|
|
|
*/
|
|
|
|
if (mddev->thread->timeout < MAX_SCHEDULE_TIMEOUT) {
|
|
|
|
mddev->thread->timeout = timeout;
|
|
|
|
md_wakeup_thread(mddev->thread);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct md_sysfs_entry bitmap_timeout =
|
|
|
|
__ATTR(time_base, S_IRUGO|S_IWUSR, timeout_show, timeout_store);
|
|
|
|
|
|
|
|
static ssize_t
|
2011-10-11 13:47:53 +08:00
|
|
|
backlog_show(struct mddev *mddev, char *page)
|
2009-12-14 09:49:55 +08:00
|
|
|
{
|
|
|
|
return sprintf(page, "%lu\n", mddev->bitmap_info.max_write_behind);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
2011-10-11 13:47:53 +08:00
|
|
|
backlog_store(struct mddev *mddev, const char *buf, size_t len)
|
2009-12-14 09:49:55 +08:00
|
|
|
{
|
|
|
|
unsigned long backlog;
|
|
|
|
int rv = strict_strtoul(buf, 10, &backlog);
|
|
|
|
if (rv)
|
|
|
|
return rv;
|
|
|
|
if (backlog > COUNTER_MAX)
|
|
|
|
return -EINVAL;
|
|
|
|
mddev->bitmap_info.max_write_behind = backlog;
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct md_sysfs_entry bitmap_backlog =
|
|
|
|
__ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store);
|
|
|
|
|
|
|
|
static ssize_t
|
2011-10-11 13:47:53 +08:00
|
|
|
chunksize_show(struct mddev *mddev, char *page)
|
2009-12-14 09:49:55 +08:00
|
|
|
{
|
|
|
|
return sprintf(page, "%lu\n", mddev->bitmap_info.chunksize);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
2011-10-11 13:47:53 +08:00
|
|
|
chunksize_store(struct mddev *mddev, const char *buf, size_t len)
|
2009-12-14 09:49:55 +08:00
|
|
|
{
|
|
|
|
/* Can only be changed when no bitmap is active */
|
|
|
|
int rv;
|
|
|
|
unsigned long csize;
|
|
|
|
if (mddev->bitmap)
|
|
|
|
return -EBUSY;
|
|
|
|
rv = strict_strtoul(buf, 10, &csize);
|
|
|
|
if (rv)
|
|
|
|
return rv;
|
|
|
|
if (csize < 512 ||
|
|
|
|
!is_power_of_2(csize))
|
|
|
|
return -EINVAL;
|
|
|
|
mddev->bitmap_info.chunksize = csize;
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct md_sysfs_entry bitmap_chunksize =
|
|
|
|
__ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store);
|
|
|
|
|
2011-10-11 13:47:53 +08:00
|
|
|
static ssize_t metadata_show(struct mddev *mddev, char *page)
|
2009-12-14 09:49:56 +08:00
|
|
|
{
|
|
|
|
return sprintf(page, "%s\n", (mddev->bitmap_info.external
|
|
|
|
? "external" : "internal"));
|
|
|
|
}
|
|
|
|
|
2011-10-11 13:47:53 +08:00
|
|
|
static ssize_t metadata_store(struct mddev *mddev, const char *buf, size_t len)
|
2009-12-14 09:49:56 +08:00
|
|
|
{
|
|
|
|
if (mddev->bitmap ||
|
|
|
|
mddev->bitmap_info.file ||
|
|
|
|
mddev->bitmap_info.offset)
|
|
|
|
return -EBUSY;
|
|
|
|
if (strncmp(buf, "external", 8) == 0)
|
|
|
|
mddev->bitmap_info.external = 1;
|
|
|
|
else if (strncmp(buf, "internal", 8) == 0)
|
|
|
|
mddev->bitmap_info.external = 0;
|
|
|
|
else
|
|
|
|
return -EINVAL;
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct md_sysfs_entry bitmap_metadata =
|
|
|
|
__ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
|
|
|
|
|
2011-10-11 13:47:53 +08:00
|
|
|
static ssize_t can_clear_show(struct mddev *mddev, char *page)
|
2009-12-14 09:49:56 +08:00
|
|
|
{
|
|
|
|
int len;
|
|
|
|
if (mddev->bitmap)
|
|
|
|
len = sprintf(page, "%s\n", (mddev->bitmap->need_sync ?
|
|
|
|
"false" : "true"));
|
|
|
|
else
|
|
|
|
len = sprintf(page, "\n");
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
2011-10-11 13:47:53 +08:00
|
|
|
static ssize_t can_clear_store(struct mddev *mddev, const char *buf, size_t len)
|
2009-12-14 09:49:56 +08:00
|
|
|
{
|
|
|
|
if (mddev->bitmap == NULL)
|
|
|
|
return -ENOENT;
|
|
|
|
if (strncmp(buf, "false", 5) == 0)
|
|
|
|
mddev->bitmap->need_sync = 1;
|
|
|
|
else if (strncmp(buf, "true", 4) == 0) {
|
|
|
|
if (mddev->degraded)
|
|
|
|
return -EBUSY;
|
|
|
|
mddev->bitmap->need_sync = 0;
|
|
|
|
} else
|
|
|
|
return -EINVAL;
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct md_sysfs_entry bitmap_can_clear =
|
|
|
|
__ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store);
|
|
|
|
|
2010-03-08 13:02:37 +08:00
|
|
|
static ssize_t
|
2011-10-11 13:47:53 +08:00
|
|
|
behind_writes_used_show(struct mddev *mddev, char *page)
|
2010-03-08 13:02:37 +08:00
|
|
|
{
|
|
|
|
if (mddev->bitmap == NULL)
|
|
|
|
return sprintf(page, "0\n");
|
|
|
|
return sprintf(page, "%lu\n",
|
|
|
|
mddev->bitmap->behind_writes_used);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
2011-10-11 13:47:53 +08:00
|
|
|
behind_writes_used_reset(struct mddev *mddev, const char *buf, size_t len)
|
2010-03-08 13:02:37 +08:00
|
|
|
{
|
|
|
|
if (mddev->bitmap)
|
|
|
|
mddev->bitmap->behind_writes_used = 0;
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct md_sysfs_entry max_backlog_used =
|
|
|
|
__ATTR(max_backlog_used, S_IRUGO | S_IWUSR,
|
|
|
|
behind_writes_used_show, behind_writes_used_reset);
|
|
|
|
|
2009-12-14 09:49:55 +08:00
|
|
|
static struct attribute *md_bitmap_attrs[] = {
|
|
|
|
&bitmap_location.attr,
|
|
|
|
&bitmap_timeout.attr,
|
|
|
|
&bitmap_backlog.attr,
|
|
|
|
&bitmap_chunksize.attr,
|
2009-12-14 09:49:56 +08:00
|
|
|
&bitmap_metadata.attr,
|
|
|
|
&bitmap_can_clear.attr,
|
2010-03-08 13:02:37 +08:00
|
|
|
&max_backlog_used.attr,
|
2009-12-14 09:49:55 +08:00
|
|
|
NULL
|
|
|
|
};
|
|
|
|
struct attribute_group md_bitmap_group = {
|
|
|
|
.name = "bitmap",
|
|
|
|
.attrs = md_bitmap_attrs,
|
|
|
|
};
|
|
|
|
|