Merge git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm
* git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm: dm snapshot: allow chunk size to be less than page size dm snapshot: use unsigned integer chunk size dm snapshot: lock snapshot while supplying status dm exception store: fix failed set_chunk_size error path dm snapshot: require non zero chunk size by end of ctr dm: dec_pending needs locking to save error value dm: add missing del_gendisk to alloc_dev error path dm log: userspace fix incorrect luid cast in userspace_ctr dm snapshot: free exception store on init failure dm snapshot: sort by chunk size to fix race
This commit is contained in:
commit
1b7607030d
|
@ -138,16 +138,6 @@ int dm_exception_store_type_unregister(struct dm_exception_store_type *type)
|
|||
}
|
||||
EXPORT_SYMBOL(dm_exception_store_type_unregister);
|
||||
|
||||
/*
|
||||
* Round a number up to the nearest 'size' boundary. size must
|
||||
* be a power of 2.
|
||||
*/
|
||||
static ulong round_up(ulong n, ulong size)
|
||||
{
|
||||
size--;
|
||||
return (n + size) & ~size;
|
||||
}
|
||||
|
||||
static int set_chunk_size(struct dm_exception_store *store,
|
||||
const char *chunk_size_arg, char **error)
|
||||
{
|
||||
|
@ -155,7 +145,8 @@ static int set_chunk_size(struct dm_exception_store *store,
|
|||
char *value;
|
||||
|
||||
chunk_size_ulong = simple_strtoul(chunk_size_arg, &value, 10);
|
||||
if (*chunk_size_arg == '\0' || *value != '\0') {
|
||||
if (*chunk_size_arg == '\0' || *value != '\0' ||
|
||||
chunk_size_ulong > UINT_MAX) {
|
||||
*error = "Invalid chunk size";
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -165,40 +156,35 @@ static int set_chunk_size(struct dm_exception_store *store,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Chunk size must be multiple of page size. Silently
|
||||
* round up if it's not.
|
||||
*/
|
||||
chunk_size_ulong = round_up(chunk_size_ulong, PAGE_SIZE >> 9);
|
||||
|
||||
return dm_exception_store_set_chunk_size(store, chunk_size_ulong,
|
||||
return dm_exception_store_set_chunk_size(store,
|
||||
(unsigned) chunk_size_ulong,
|
||||
error);
|
||||
}
|
||||
|
||||
int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
|
||||
unsigned long chunk_size_ulong,
|
||||
unsigned chunk_size,
|
||||
char **error)
|
||||
{
|
||||
/* Check chunk_size is a power of 2 */
|
||||
if (!is_power_of_2(chunk_size_ulong)) {
|
||||
if (!is_power_of_2(chunk_size)) {
|
||||
*error = "Chunk size is not a power of 2";
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Validate the chunk size against the device block size */
|
||||
if (chunk_size_ulong % (bdev_logical_block_size(store->cow->bdev) >> 9)) {
|
||||
if (chunk_size % (bdev_logical_block_size(store->cow->bdev) >> 9)) {
|
||||
*error = "Chunk size is not a multiple of device blocksize";
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (chunk_size_ulong > INT_MAX >> SECTOR_SHIFT) {
|
||||
if (chunk_size > INT_MAX >> SECTOR_SHIFT) {
|
||||
*error = "Chunk size is too high";
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
store->chunk_size = chunk_size_ulong;
|
||||
store->chunk_mask = chunk_size_ulong - 1;
|
||||
store->chunk_shift = ffs(chunk_size_ulong) - 1;
|
||||
store->chunk_size = chunk_size;
|
||||
store->chunk_mask = chunk_size - 1;
|
||||
store->chunk_shift = ffs(chunk_size) - 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -251,7 +237,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
|
|||
|
||||
r = set_chunk_size(tmp_store, argv[2], &ti->error);
|
||||
if (r)
|
||||
goto bad_cow;
|
||||
goto bad_ctr;
|
||||
|
||||
r = type->ctr(tmp_store, 0, NULL);
|
||||
if (r) {
|
||||
|
|
|
@ -101,9 +101,9 @@ struct dm_exception_store {
|
|||
struct dm_dev *cow;
|
||||
|
||||
/* Size of data blocks saved - must be a power of 2 */
|
||||
chunk_t chunk_size;
|
||||
chunk_t chunk_mask;
|
||||
chunk_t chunk_shift;
|
||||
unsigned chunk_size;
|
||||
unsigned chunk_mask;
|
||||
unsigned chunk_shift;
|
||||
|
||||
void *context;
|
||||
};
|
||||
|
@ -169,7 +169,7 @@ int dm_exception_store_type_register(struct dm_exception_store_type *type);
|
|||
int dm_exception_store_type_unregister(struct dm_exception_store_type *type);
|
||||
|
||||
int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
|
||||
unsigned long chunk_size_ulong,
|
||||
unsigned chunk_size,
|
||||
char **error);
|
||||
|
||||
int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
|
||||
|
|
|
@ -156,7 +156,7 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
|
|||
}
|
||||
|
||||
/* The ptr value is sufficient for local unique id */
|
||||
lc->luid = (uint64_t)lc;
|
||||
lc->luid = (unsigned long)lc;
|
||||
|
||||
lc->ti = ti;
|
||||
|
||||
|
|
|
@ -284,12 +284,13 @@ static int read_header(struct pstore *ps, int *new_snapshot)
|
|||
{
|
||||
int r;
|
||||
struct disk_header *dh;
|
||||
chunk_t chunk_size;
|
||||
unsigned chunk_size;
|
||||
int chunk_size_supplied = 1;
|
||||
char *chunk_err;
|
||||
|
||||
/*
|
||||
* Use default chunk size (or hardsect_size, if larger) if none supplied
|
||||
* Use default chunk size (or logical_block_size, if larger)
|
||||
* if none supplied
|
||||
*/
|
||||
if (!ps->store->chunk_size) {
|
||||
ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
|
||||
|
@ -334,10 +335,9 @@ static int read_header(struct pstore *ps, int *new_snapshot)
|
|||
return 0;
|
||||
|
||||
if (chunk_size_supplied)
|
||||
DMWARN("chunk size %llu in device metadata overrides "
|
||||
"table chunk size of %llu.",
|
||||
(unsigned long long)chunk_size,
|
||||
(unsigned long long)ps->store->chunk_size);
|
||||
DMWARN("chunk size %u in device metadata overrides "
|
||||
"table chunk size of %u.",
|
||||
chunk_size, ps->store->chunk_size);
|
||||
|
||||
/* We had a bogus chunk_size. Fix stuff up. */
|
||||
free_area(ps);
|
||||
|
@ -345,8 +345,8 @@ static int read_header(struct pstore *ps, int *new_snapshot)
|
|||
r = dm_exception_store_set_chunk_size(ps->store, chunk_size,
|
||||
&chunk_err);
|
||||
if (r) {
|
||||
DMERR("invalid on-disk chunk size %llu: %s.",
|
||||
(unsigned long long)chunk_size, chunk_err);
|
||||
DMERR("invalid on-disk chunk size %u: %s.",
|
||||
chunk_size, chunk_err);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
@ -296,6 +296,7 @@ static void __insert_origin(struct origin *o)
|
|||
*/
|
||||
static int register_snapshot(struct dm_snapshot *snap)
|
||||
{
|
||||
struct dm_snapshot *l;
|
||||
struct origin *o, *new_o;
|
||||
struct block_device *bdev = snap->origin->bdev;
|
||||
|
||||
|
@ -319,7 +320,11 @@ static int register_snapshot(struct dm_snapshot *snap)
|
|||
__insert_origin(o);
|
||||
}
|
||||
|
||||
list_add_tail(&snap->list, &o->snapshots);
|
||||
/* Sort the list according to chunk size, largest-first smallest-last */
|
||||
list_for_each_entry(l, &o->snapshots, list)
|
||||
if (l->store->chunk_size < snap->store->chunk_size)
|
||||
break;
|
||||
list_add_tail(&snap->list, &l->list);
|
||||
|
||||
up_write(&_origins_lock);
|
||||
return 0;
|
||||
|
@ -668,6 +673,11 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|||
bio_list_init(&s->queued_bios);
|
||||
INIT_WORK(&s->queued_bios_work, flush_queued_bios);
|
||||
|
||||
if (!s->store->chunk_size) {
|
||||
ti->error = "Chunk size not set";
|
||||
goto bad_load_and_register;
|
||||
}
|
||||
|
||||
/* Add snapshot to the list of snapshots for this origin */
|
||||
/* Exceptions aren't triggered till snapshot_resume() is called */
|
||||
if (register_snapshot(s)) {
|
||||
|
@ -951,7 +961,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
|
|||
|
||||
src.bdev = bdev;
|
||||
src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
|
||||
src.count = min(s->store->chunk_size, dev_size - src.sector);
|
||||
src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
|
||||
|
||||
dest.bdev = s->store->cow->bdev;
|
||||
dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
|
||||
|
@ -1142,6 +1152,8 @@ static int snapshot_status(struct dm_target *ti, status_type_t type,
|
|||
unsigned sz = 0;
|
||||
struct dm_snapshot *snap = ti->private;
|
||||
|
||||
down_write(&snap->lock);
|
||||
|
||||
switch (type) {
|
||||
case STATUSTYPE_INFO:
|
||||
if (!snap->valid)
|
||||
|
@ -1173,6 +1185,8 @@ static int snapshot_status(struct dm_target *ti, status_type_t type,
|
|||
break;
|
||||
}
|
||||
|
||||
up_write(&snap->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1388,7 +1402,7 @@ static void origin_resume(struct dm_target *ti)
|
|||
struct dm_dev *dev = ti->private;
|
||||
struct dm_snapshot *snap;
|
||||
struct origin *o;
|
||||
chunk_t chunk_size = 0;
|
||||
unsigned chunk_size = 0;
|
||||
|
||||
down_read(&_origins_lock);
|
||||
o = __lookup_origin(dev->bdev);
|
||||
|
@ -1465,7 +1479,7 @@ static int __init dm_snapshot_init(void)
|
|||
r = dm_register_target(&snapshot_target);
|
||||
if (r) {
|
||||
DMERR("snapshot target register failed %d", r);
|
||||
return r;
|
||||
goto bad_register_snapshot_target;
|
||||
}
|
||||
|
||||
r = dm_register_target(&origin_target);
|
||||
|
@ -1522,6 +1536,9 @@ bad2:
|
|||
dm_unregister_target(&origin_target);
|
||||
bad1:
|
||||
dm_unregister_target(&snapshot_target);
|
||||
|
||||
bad_register_snapshot_target:
|
||||
dm_exception_store_exit();
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
@ -47,6 +47,7 @@ struct dm_io {
|
|||
atomic_t io_count;
|
||||
struct bio *bio;
|
||||
unsigned long start_time;
|
||||
spinlock_t endio_lock;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -578,8 +579,12 @@ static void dec_pending(struct dm_io *io, int error)
|
|||
struct mapped_device *md = io->md;
|
||||
|
||||
/* Push-back supersedes any I/O errors */
|
||||
if (error && !(io->error > 0 && __noflush_suspending(md)))
|
||||
io->error = error;
|
||||
if (unlikely(error)) {
|
||||
spin_lock_irqsave(&io->endio_lock, flags);
|
||||
if (!(io->error > 0 && __noflush_suspending(md)))
|
||||
io->error = error;
|
||||
spin_unlock_irqrestore(&io->endio_lock, flags);
|
||||
}
|
||||
|
||||
if (atomic_dec_and_test(&io->io_count)) {
|
||||
if (io->error == DM_ENDIO_REQUEUE) {
|
||||
|
@ -1226,6 +1231,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
|
|||
atomic_set(&ci.io->io_count, 1);
|
||||
ci.io->bio = bio;
|
||||
ci.io->md = md;
|
||||
spin_lock_init(&ci.io->endio_lock);
|
||||
ci.sector = bio->bi_sector;
|
||||
ci.sector_count = bio_sectors(bio);
|
||||
if (unlikely(bio_empty_barrier(bio)))
|
||||
|
@ -1822,6 +1828,7 @@ static struct mapped_device *alloc_dev(int minor)
|
|||
bad_bdev:
|
||||
destroy_workqueue(md->wq);
|
||||
bad_thread:
|
||||
del_gendisk(md->disk);
|
||||
put_disk(md->disk);
|
||||
bad_disk:
|
||||
blk_cleanup_queue(md->queue);
|
||||
|
|
Loading…
Reference in New Issue