Merge git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm
* git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm: (24 commits) dm crypt: add documentation dm: remove md argument from specific_minor dm table: remove unused dm_create_error_table dm table: drop void suspend_targets return dm: unplug queues in threads dm raid1: use timer dm: move include files dm kcopyd: rename dm: expose macros dm kcopyd: remove redundant client counting dm kcopyd: private mempool dm kcopyd: per device dm log: make module use tracking internal dm log: move register functions dm log: clean interface dm kcopyd: clean interface dm io: clean interface dm io: rename error to error_bits dm snapshot: store pointer to target instance dm log: move dirty region log code into separate module ...
This commit is contained in:
commit
6f97b220f4
|
@ -0,0 +1,52 @@
|
|||
dm-crypt
|
||||
=========
|
||||
|
||||
Device-Mapper's "crypt" target provides transparent encryption of block devices
|
||||
using the kernel crypto API.
|
||||
|
||||
Parameters: <cipher> <key> <iv_offset> <device path> <offset>
|
||||
|
||||
<cipher>
|
||||
Encryption cipher and an optional IV generation mode.
|
||||
(In format cipher-chainmode-ivopts:ivmode).
|
||||
Examples:
|
||||
des
|
||||
aes-cbc-essiv:sha256
|
||||
twofish-ecb
|
||||
|
||||
/proc/crypto contains supported crypto modes
|
||||
|
||||
<key>
|
||||
Key used for encryption. It is encoded as a hexadecimal number.
|
||||
You can only use key sizes that are valid for the selected cipher.
|
||||
|
||||
<iv_offset>
|
||||
The IV offset is a sector count that is added to the sector number
|
||||
before creating the IV.
|
||||
|
||||
<device path>
|
||||
This is the device that is going to be used as backend and contains the
|
||||
encrypted data. You can specify it as a path like /dev/xxx or a device
|
||||
number <major>:<minor>.
|
||||
|
||||
<offset>
|
||||
Starting sector within the device where the encrypted data begins.
|
||||
|
||||
Example scripts
|
||||
===============
|
||||
LUKS (Linux Unified Key Setup) is now the preferred way to set up disk
|
||||
encryption with dm-crypt using the 'cryptsetup' utility, see
|
||||
http://luks.endorphin.org/
|
||||
|
||||
[[
|
||||
#!/bin/sh
|
||||
# Create a crypt device using dmsetup
|
||||
dmsetup create crypt1 --table "0 `blockdev --getsize $1` crypt aes-cbc-essiv:sha256 babebabebabebabebabebabebabebabe 0 $1 0"
|
||||
]]
|
||||
|
||||
[[
|
||||
#!/bin/sh
|
||||
# Create a crypt device using cryptsetup and LUKS header with default cipher
|
||||
cryptsetup luksFormat $1
|
||||
cryptsetup luksOpen $1 crypt1
|
||||
]]
|
|
@ -3,10 +3,10 @@
|
|||
#
|
||||
|
||||
dm-mod-objs := dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \
|
||||
dm-ioctl.o dm-io.o kcopyd.o
|
||||
dm-ioctl.o dm-io.o dm-kcopyd.o
|
||||
dm-multipath-objs := dm-hw-handler.o dm-path-selector.o dm-mpath.o
|
||||
dm-snapshot-objs := dm-snap.o dm-exception-store.o
|
||||
dm-mirror-objs := dm-log.o dm-raid1.o
|
||||
dm-mirror-objs := dm-raid1.o
|
||||
dm-rdac-objs := dm-mpath-rdac.o
|
||||
dm-hp-sw-objs := dm-mpath-hp-sw.o
|
||||
md-mod-objs := md.o bitmap.o
|
||||
|
@ -39,7 +39,7 @@ obj-$(CONFIG_DM_MULTIPATH_EMC) += dm-emc.o
|
|||
obj-$(CONFIG_DM_MULTIPATH_HP) += dm-hp-sw.o
|
||||
obj-$(CONFIG_DM_MULTIPATH_RDAC) += dm-rdac.o
|
||||
obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o
|
||||
obj-$(CONFIG_DM_MIRROR) += dm-mirror.o
|
||||
obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o
|
||||
obj-$(CONFIG_DM_ZERO) += dm-zero.o
|
||||
|
||||
quiet_cmd_unroll = UNROLL $@
|
||||
|
|
|
@ -9,13 +9,13 @@
|
|||
|
||||
#include "dm.h"
|
||||
#include "dm-snap.h"
|
||||
#include "dm-io.h"
|
||||
#include "kcopyd.h"
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/dm-io.h>
|
||||
#include <linux/dm-kcopyd.h>
|
||||
|
||||
#define DM_MSG_PREFIX "snapshots"
|
||||
#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */
|
||||
|
@ -131,7 +131,7 @@ struct pstore {
|
|||
|
||||
static unsigned sectors_to_pages(unsigned sectors)
|
||||
{
|
||||
return sectors / (PAGE_SIZE >> 9);
|
||||
return DIV_ROUND_UP(sectors, PAGE_SIZE >> 9);
|
||||
}
|
||||
|
||||
static int alloc_area(struct pstore *ps)
|
||||
|
@ -159,7 +159,7 @@ static void free_area(struct pstore *ps)
|
|||
}
|
||||
|
||||
struct mdata_req {
|
||||
struct io_region *where;
|
||||
struct dm_io_region *where;
|
||||
struct dm_io_request *io_req;
|
||||
struct work_struct work;
|
||||
int result;
|
||||
|
@ -177,7 +177,7 @@ static void do_metadata(struct work_struct *work)
|
|||
*/
|
||||
static int chunk_io(struct pstore *ps, uint32_t chunk, int rw, int metadata)
|
||||
{
|
||||
struct io_region where = {
|
||||
struct dm_io_region where = {
|
||||
.bdev = ps->snap->cow->bdev,
|
||||
.sector = ps->snap->chunk_size * chunk,
|
||||
.count = ps->snap->chunk_size,
|
||||
|
|
|
@ -5,13 +5,14 @@
|
|||
* This file is released under the GPL.
|
||||
*/
|
||||
|
||||
#include "dm-io.h"
|
||||
#include "dm.h"
|
||||
|
||||
#include <linux/bio.h>
|
||||
#include <linux/mempool.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/dm-io.h>
|
||||
|
||||
struct dm_io_client {
|
||||
mempool_t *pool;
|
||||
|
@ -20,7 +21,7 @@ struct dm_io_client {
|
|||
|
||||
/* FIXME: can we shrink this ? */
|
||||
struct io {
|
||||
unsigned long error;
|
||||
unsigned long error_bits;
|
||||
atomic_t count;
|
||||
struct task_struct *sleeper;
|
||||
struct dm_io_client *client;
|
||||
|
@ -107,14 +108,14 @@ static inline unsigned bio_get_region(struct bio *bio)
|
|||
static void dec_count(struct io *io, unsigned int region, int error)
|
||||
{
|
||||
if (error)
|
||||
set_bit(region, &io->error);
|
||||
set_bit(region, &io->error_bits);
|
||||
|
||||
if (atomic_dec_and_test(&io->count)) {
|
||||
if (io->sleeper)
|
||||
wake_up_process(io->sleeper);
|
||||
|
||||
else {
|
||||
unsigned long r = io->error;
|
||||
unsigned long r = io->error_bits;
|
||||
io_notify_fn fn = io->callback;
|
||||
void *context = io->context;
|
||||
|
||||
|
@ -271,7 +272,7 @@ static void km_dp_init(struct dpages *dp, void *data)
|
|||
/*-----------------------------------------------------------------
|
||||
* IO routines that accept a list of pages.
|
||||
*---------------------------------------------------------------*/
|
||||
static void do_region(int rw, unsigned int region, struct io_region *where,
|
||||
static void do_region(int rw, unsigned region, struct dm_io_region *where,
|
||||
struct dpages *dp, struct io *io)
|
||||
{
|
||||
struct bio *bio;
|
||||
|
@ -320,7 +321,7 @@ static void do_region(int rw, unsigned int region, struct io_region *where,
|
|||
}
|
||||
|
||||
static void dispatch_io(int rw, unsigned int num_regions,
|
||||
struct io_region *where, struct dpages *dp,
|
||||
struct dm_io_region *where, struct dpages *dp,
|
||||
struct io *io, int sync)
|
||||
{
|
||||
int i;
|
||||
|
@ -347,17 +348,17 @@ static void dispatch_io(int rw, unsigned int num_regions,
|
|||
}
|
||||
|
||||
static int sync_io(struct dm_io_client *client, unsigned int num_regions,
|
||||
struct io_region *where, int rw, struct dpages *dp,
|
||||
struct dm_io_region *where, int rw, struct dpages *dp,
|
||||
unsigned long *error_bits)
|
||||
{
|
||||
struct io io;
|
||||
|
||||
if (num_regions > 1 && rw != WRITE) {
|
||||
if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
|
||||
WARN_ON(1);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
io.error = 0;
|
||||
io.error_bits = 0;
|
||||
atomic_set(&io.count, 1); /* see dispatch_io() */
|
||||
io.sleeper = current;
|
||||
io.client = client;
|
||||
|
@ -378,25 +379,25 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
|
|||
return -EINTR;
|
||||
|
||||
if (error_bits)
|
||||
*error_bits = io.error;
|
||||
*error_bits = io.error_bits;
|
||||
|
||||
return io.error ? -EIO : 0;
|
||||
return io.error_bits ? -EIO : 0;
|
||||
}
|
||||
|
||||
static int async_io(struct dm_io_client *client, unsigned int num_regions,
|
||||
struct io_region *where, int rw, struct dpages *dp,
|
||||
struct dm_io_region *where, int rw, struct dpages *dp,
|
||||
io_notify_fn fn, void *context)
|
||||
{
|
||||
struct io *io;
|
||||
|
||||
if (num_regions > 1 && rw != WRITE) {
|
||||
if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
|
||||
WARN_ON(1);
|
||||
fn(1, context);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
io = mempool_alloc(client->pool, GFP_NOIO);
|
||||
io->error = 0;
|
||||
io->error_bits = 0;
|
||||
atomic_set(&io->count, 1); /* see dispatch_io() */
|
||||
io->sleeper = NULL;
|
||||
io->client = client;
|
||||
|
@ -435,10 +436,15 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
|
|||
}
|
||||
|
||||
/*
|
||||
* New collapsed (a)synchronous interface
|
||||
* New collapsed (a)synchronous interface.
|
||||
*
|
||||
* If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
|
||||
* the queue with blk_unplug() some time later or set the BIO_RW_SYNC bit in
|
||||
* io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
|
||||
* the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
|
||||
*/
|
||||
int dm_io(struct dm_io_request *io_req, unsigned num_regions,
|
||||
struct io_region *where, unsigned long *sync_error_bits)
|
||||
struct dm_io_region *where, unsigned long *sync_error_bits)
|
||||
{
|
||||
int r;
|
||||
struct dpages dp;
|
||||
|
|
|
@ -9,9 +9,8 @@
|
|||
* completion notification.
|
||||
*/
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/init.h>
|
||||
|
@ -23,24 +22,15 @@
|
|||
#include <linux/vmalloc.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/dm-kcopyd.h>
|
||||
|
||||
#include "kcopyd.h"
|
||||
|
||||
static struct workqueue_struct *_kcopyd_wq;
|
||||
static struct work_struct _kcopyd_work;
|
||||
|
||||
static void wake(void)
|
||||
{
|
||||
queue_work(_kcopyd_wq, &_kcopyd_work);
|
||||
}
|
||||
#include "dm.h"
|
||||
|
||||
/*-----------------------------------------------------------------
|
||||
* Each kcopyd client has its own little pool of preallocated
|
||||
* pages for kcopyd io.
|
||||
*---------------------------------------------------------------*/
|
||||
struct kcopyd_client {
|
||||
struct list_head list;
|
||||
|
||||
struct dm_kcopyd_client {
|
||||
spinlock_t lock;
|
||||
struct page_list *pages;
|
||||
unsigned int nr_pages;
|
||||
|
@ -50,8 +40,32 @@ struct kcopyd_client {
|
|||
|
||||
wait_queue_head_t destroyq;
|
||||
atomic_t nr_jobs;
|
||||
|
||||
mempool_t *job_pool;
|
||||
|
||||
struct workqueue_struct *kcopyd_wq;
|
||||
struct work_struct kcopyd_work;
|
||||
|
||||
/*
|
||||
* We maintain three lists of jobs:
|
||||
*
|
||||
* i) jobs waiting for pages
|
||||
* ii) jobs that have pages, and are waiting for the io to be issued.
|
||||
* iii) jobs that have completed.
|
||||
*
|
||||
* All three of these are protected by job_lock.
|
||||
*/
|
||||
spinlock_t job_lock;
|
||||
struct list_head complete_jobs;
|
||||
struct list_head io_jobs;
|
||||
struct list_head pages_jobs;
|
||||
};
|
||||
|
||||
static void wake(struct dm_kcopyd_client *kc)
|
||||
{
|
||||
queue_work(kc->kcopyd_wq, &kc->kcopyd_work);
|
||||
}
|
||||
|
||||
static struct page_list *alloc_pl(void)
|
||||
{
|
||||
struct page_list *pl;
|
||||
|
@ -75,7 +89,7 @@ static void free_pl(struct page_list *pl)
|
|||
kfree(pl);
|
||||
}
|
||||
|
||||
static int kcopyd_get_pages(struct kcopyd_client *kc,
|
||||
static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
|
||||
unsigned int nr, struct page_list **pages)
|
||||
{
|
||||
struct page_list *pl;
|
||||
|
@ -98,7 +112,7 @@ static int kcopyd_get_pages(struct kcopyd_client *kc,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void kcopyd_put_pages(struct kcopyd_client *kc, struct page_list *pl)
|
||||
static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl)
|
||||
{
|
||||
struct page_list *cursor;
|
||||
|
||||
|
@ -126,7 +140,7 @@ static void drop_pages(struct page_list *pl)
|
|||
}
|
||||
}
|
||||
|
||||
static int client_alloc_pages(struct kcopyd_client *kc, unsigned int nr)
|
||||
static int client_alloc_pages(struct dm_kcopyd_client *kc, unsigned int nr)
|
||||
{
|
||||
unsigned int i;
|
||||
struct page_list *pl = NULL, *next;
|
||||
|
@ -147,7 +161,7 @@ static int client_alloc_pages(struct kcopyd_client *kc, unsigned int nr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void client_free_pages(struct kcopyd_client *kc)
|
||||
static void client_free_pages(struct dm_kcopyd_client *kc)
|
||||
{
|
||||
BUG_ON(kc->nr_free_pages != kc->nr_pages);
|
||||
drop_pages(kc->pages);
|
||||
|
@ -161,7 +175,7 @@ static void client_free_pages(struct kcopyd_client *kc)
|
|||
* ever having to do io (which could cause a deadlock).
|
||||
*---------------------------------------------------------------*/
|
||||
struct kcopyd_job {
|
||||
struct kcopyd_client *kc;
|
||||
struct dm_kcopyd_client *kc;
|
||||
struct list_head list;
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -175,13 +189,13 @@ struct kcopyd_job {
|
|||
* Either READ or WRITE
|
||||
*/
|
||||
int rw;
|
||||
struct io_region source;
|
||||
struct dm_io_region source;
|
||||
|
||||
/*
|
||||
* The destinations for the transfer.
|
||||
*/
|
||||
unsigned int num_dests;
|
||||
struct io_region dests[KCOPYD_MAX_REGIONS];
|
||||
struct dm_io_region dests[DM_KCOPYD_MAX_REGIONS];
|
||||
|
||||
sector_t offset;
|
||||
unsigned int nr_pages;
|
||||
|
@ -191,7 +205,7 @@ struct kcopyd_job {
|
|||
* Set this to ensure you are notified when the job has
|
||||
* completed. 'context' is for callback to use.
|
||||
*/
|
||||
kcopyd_notify_fn fn;
|
||||
dm_kcopyd_notify_fn fn;
|
||||
void *context;
|
||||
|
||||
/*
|
||||
|
@ -207,47 +221,19 @@ struct kcopyd_job {
|
|||
#define MIN_JOBS 512
|
||||
|
||||
static struct kmem_cache *_job_cache;
|
||||
static mempool_t *_job_pool;
|
||||
|
||||
/*
|
||||
* We maintain three lists of jobs:
|
||||
*
|
||||
* i) jobs waiting for pages
|
||||
* ii) jobs that have pages, and are waiting for the io to be issued.
|
||||
* iii) jobs that have completed.
|
||||
*
|
||||
* All three of these are protected by job_lock.
|
||||
*/
|
||||
static DEFINE_SPINLOCK(_job_lock);
|
||||
|
||||
static LIST_HEAD(_complete_jobs);
|
||||
static LIST_HEAD(_io_jobs);
|
||||
static LIST_HEAD(_pages_jobs);
|
||||
|
||||
static int jobs_init(void)
|
||||
int __init dm_kcopyd_init(void)
|
||||
{
|
||||
_job_cache = KMEM_CACHE(kcopyd_job, 0);
|
||||
if (!_job_cache)
|
||||
return -ENOMEM;
|
||||
|
||||
_job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
|
||||
if (!_job_pool) {
|
||||
kmem_cache_destroy(_job_cache);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void jobs_exit(void)
|
||||
void dm_kcopyd_exit(void)
|
||||
{
|
||||
BUG_ON(!list_empty(&_complete_jobs));
|
||||
BUG_ON(!list_empty(&_io_jobs));
|
||||
BUG_ON(!list_empty(&_pages_jobs));
|
||||
|
||||
mempool_destroy(_job_pool);
|
||||
kmem_cache_destroy(_job_cache);
|
||||
_job_pool = NULL;
|
||||
_job_cache = NULL;
|
||||
}
|
||||
|
||||
|
@ -255,18 +241,19 @@ static void jobs_exit(void)
|
|||
* Functions to push and pop a job onto the head of a given job
|
||||
* list.
|
||||
*/
|
||||
static struct kcopyd_job *pop(struct list_head *jobs)
|
||||
static struct kcopyd_job *pop(struct list_head *jobs,
|
||||
struct dm_kcopyd_client *kc)
|
||||
{
|
||||
struct kcopyd_job *job = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&_job_lock, flags);
|
||||
spin_lock_irqsave(&kc->job_lock, flags);
|
||||
|
||||
if (!list_empty(jobs)) {
|
||||
job = list_entry(jobs->next, struct kcopyd_job, list);
|
||||
list_del(&job->list);
|
||||
}
|
||||
spin_unlock_irqrestore(&_job_lock, flags);
|
||||
spin_unlock_irqrestore(&kc->job_lock, flags);
|
||||
|
||||
return job;
|
||||
}
|
||||
|
@ -274,10 +261,11 @@ static struct kcopyd_job *pop(struct list_head *jobs)
|
|||
static void push(struct list_head *jobs, struct kcopyd_job *job)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct dm_kcopyd_client *kc = job->kc;
|
||||
|
||||
spin_lock_irqsave(&_job_lock, flags);
|
||||
spin_lock_irqsave(&kc->job_lock, flags);
|
||||
list_add_tail(&job->list, jobs);
|
||||
spin_unlock_irqrestore(&_job_lock, flags);
|
||||
spin_unlock_irqrestore(&kc->job_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -294,11 +282,11 @@ static int run_complete_job(struct kcopyd_job *job)
|
|||
void *context = job->context;
|
||||
int read_err = job->read_err;
|
||||
unsigned long write_err = job->write_err;
|
||||
kcopyd_notify_fn fn = job->fn;
|
||||
struct kcopyd_client *kc = job->kc;
|
||||
dm_kcopyd_notify_fn fn = job->fn;
|
||||
struct dm_kcopyd_client *kc = job->kc;
|
||||
|
||||
kcopyd_put_pages(kc, job->pages);
|
||||
mempool_free(job, _job_pool);
|
||||
mempool_free(job, kc->job_pool);
|
||||
fn(read_err, write_err, context);
|
||||
|
||||
if (atomic_dec_and_test(&kc->nr_jobs))
|
||||
|
@ -310,6 +298,7 @@ static int run_complete_job(struct kcopyd_job *job)
|
|||
static void complete_io(unsigned long error, void *context)
|
||||
{
|
||||
struct kcopyd_job *job = (struct kcopyd_job *) context;
|
||||
struct dm_kcopyd_client *kc = job->kc;
|
||||
|
||||
if (error) {
|
||||
if (job->rw == WRITE)
|
||||
|
@ -317,22 +306,22 @@ static void complete_io(unsigned long error, void *context)
|
|||
else
|
||||
job->read_err = 1;
|
||||
|
||||
if (!test_bit(KCOPYD_IGNORE_ERROR, &job->flags)) {
|
||||
push(&_complete_jobs, job);
|
||||
wake();
|
||||
if (!test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) {
|
||||
push(&kc->complete_jobs, job);
|
||||
wake(kc);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (job->rw == WRITE)
|
||||
push(&_complete_jobs, job);
|
||||
push(&kc->complete_jobs, job);
|
||||
|
||||
else {
|
||||
job->rw = WRITE;
|
||||
push(&_io_jobs, job);
|
||||
push(&kc->io_jobs, job);
|
||||
}
|
||||
|
||||
wake();
|
||||
wake(kc);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -343,7 +332,7 @@ static int run_io_job(struct kcopyd_job *job)
|
|||
{
|
||||
int r;
|
||||
struct dm_io_request io_req = {
|
||||
.bi_rw = job->rw,
|
||||
.bi_rw = job->rw | (1 << BIO_RW_SYNC),
|
||||
.mem.type = DM_IO_PAGE_LIST,
|
||||
.mem.ptr.pl = job->pages,
|
||||
.mem.offset = job->offset,
|
||||
|
@ -369,7 +358,7 @@ static int run_pages_job(struct kcopyd_job *job)
|
|||
r = kcopyd_get_pages(job->kc, job->nr_pages, &job->pages);
|
||||
if (!r) {
|
||||
/* this job is ready for io */
|
||||
push(&_io_jobs, job);
|
||||
push(&job->kc->io_jobs, job);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -384,12 +373,13 @@ static int run_pages_job(struct kcopyd_job *job)
|
|||
* Run through a list for as long as possible. Returns the count
|
||||
* of successful jobs.
|
||||
*/
|
||||
static int process_jobs(struct list_head *jobs, int (*fn) (struct kcopyd_job *))
|
||||
static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
|
||||
int (*fn) (struct kcopyd_job *))
|
||||
{
|
||||
struct kcopyd_job *job;
|
||||
int r, count = 0;
|
||||
|
||||
while ((job = pop(jobs))) {
|
||||
while ((job = pop(jobs, kc))) {
|
||||
|
||||
r = fn(job);
|
||||
|
||||
|
@ -399,7 +389,7 @@ static int process_jobs(struct list_head *jobs, int (*fn) (struct kcopyd_job *))
|
|||
job->write_err = (unsigned long) -1L;
|
||||
else
|
||||
job->read_err = 1;
|
||||
push(&_complete_jobs, job);
|
||||
push(&kc->complete_jobs, job);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -421,8 +411,11 @@ static int process_jobs(struct list_head *jobs, int (*fn) (struct kcopyd_job *))
|
|||
/*
|
||||
* kcopyd does this every time it's woken up.
|
||||
*/
|
||||
static void do_work(struct work_struct *ignored)
|
||||
static void do_work(struct work_struct *work)
|
||||
{
|
||||
struct dm_kcopyd_client *kc = container_of(work,
|
||||
struct dm_kcopyd_client, kcopyd_work);
|
||||
|
||||
/*
|
||||
* The order that these are called is *very* important.
|
||||
* complete jobs can free some pages for pages jobs.
|
||||
|
@ -430,9 +423,9 @@ static void do_work(struct work_struct *ignored)
|
|||
* list. io jobs call wake when they complete and it all
|
||||
* starts again.
|
||||
*/
|
||||
process_jobs(&_complete_jobs, run_complete_job);
|
||||
process_jobs(&_pages_jobs, run_pages_job);
|
||||
process_jobs(&_io_jobs, run_io_job);
|
||||
process_jobs(&kc->complete_jobs, kc, run_complete_job);
|
||||
process_jobs(&kc->pages_jobs, kc, run_pages_job);
|
||||
process_jobs(&kc->io_jobs, kc, run_io_job);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -442,9 +435,10 @@ static void do_work(struct work_struct *ignored)
|
|||
*/
|
||||
static void dispatch_job(struct kcopyd_job *job)
|
||||
{
|
||||
atomic_inc(&job->kc->nr_jobs);
|
||||
push(&_pages_jobs, job);
|
||||
wake();
|
||||
struct dm_kcopyd_client *kc = job->kc;
|
||||
atomic_inc(&kc->nr_jobs);
|
||||
push(&kc->pages_jobs, job);
|
||||
wake(kc);
|
||||
}
|
||||
|
||||
#define SUB_JOB_SIZE 128
|
||||
|
@ -469,7 +463,7 @@ static void segment_complete(int read_err, unsigned long write_err,
|
|||
* Only dispatch more work if there hasn't been an error.
|
||||
*/
|
||||
if ((!job->read_err && !job->write_err) ||
|
||||
test_bit(KCOPYD_IGNORE_ERROR, &job->flags)) {
|
||||
test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) {
|
||||
/* get the next chunk of work */
|
||||
progress = job->progress;
|
||||
count = job->source.count - progress;
|
||||
|
@ -484,7 +478,8 @@ static void segment_complete(int read_err, unsigned long write_err,
|
|||
|
||||
if (count) {
|
||||
int i;
|
||||
struct kcopyd_job *sub_job = mempool_alloc(_job_pool, GFP_NOIO);
|
||||
struct kcopyd_job *sub_job = mempool_alloc(job->kc->job_pool,
|
||||
GFP_NOIO);
|
||||
|
||||
*sub_job = *job;
|
||||
sub_job->source.sector += progress;
|
||||
|
@ -508,7 +503,7 @@ static void segment_complete(int read_err, unsigned long write_err,
|
|||
* after we've completed.
|
||||
*/
|
||||
job->fn(read_err, write_err, job->context);
|
||||
mempool_free(job, _job_pool);
|
||||
mempool_free(job, job->kc->job_pool);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -526,16 +521,16 @@ static void split_job(struct kcopyd_job *job)
|
|||
segment_complete(0, 0u, job);
|
||||
}
|
||||
|
||||
int kcopyd_copy(struct kcopyd_client *kc, struct io_region *from,
|
||||
unsigned int num_dests, struct io_region *dests,
|
||||
unsigned int flags, kcopyd_notify_fn fn, void *context)
|
||||
int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
|
||||
unsigned int num_dests, struct dm_io_region *dests,
|
||||
unsigned int flags, dm_kcopyd_notify_fn fn, void *context)
|
||||
{
|
||||
struct kcopyd_job *job;
|
||||
|
||||
/*
|
||||
* Allocate a new job.
|
||||
*/
|
||||
job = mempool_alloc(_job_pool, GFP_NOIO);
|
||||
job = mempool_alloc(kc->job_pool, GFP_NOIO);
|
||||
|
||||
/*
|
||||
* set up for the read.
|
||||
|
@ -569,6 +564,7 @@ int kcopyd_copy(struct kcopyd_client *kc, struct io_region *from,
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(dm_kcopyd_copy);
|
||||
|
||||
/*
|
||||
* Cancels a kcopyd job, eg. someone might be deactivating a
|
||||
|
@ -583,126 +579,76 @@ int kcopyd_cancel(struct kcopyd_job *job, int block)
|
|||
#endif /* 0 */
|
||||
|
||||
/*-----------------------------------------------------------------
|
||||
* Unit setup
|
||||
* Client setup
|
||||
*---------------------------------------------------------------*/
|
||||
static DEFINE_MUTEX(_client_lock);
|
||||
static LIST_HEAD(_clients);
|
||||
|
||||
static void client_add(struct kcopyd_client *kc)
|
||||
int dm_kcopyd_client_create(unsigned int nr_pages,
|
||||
struct dm_kcopyd_client **result)
|
||||
{
|
||||
mutex_lock(&_client_lock);
|
||||
list_add(&kc->list, &_clients);
|
||||
mutex_unlock(&_client_lock);
|
||||
}
|
||||
|
||||
static void client_del(struct kcopyd_client *kc)
|
||||
{
|
||||
mutex_lock(&_client_lock);
|
||||
list_del(&kc->list);
|
||||
mutex_unlock(&_client_lock);
|
||||
}
|
||||
|
||||
static DEFINE_MUTEX(kcopyd_init_lock);
|
||||
static int kcopyd_clients = 0;
|
||||
|
||||
static int kcopyd_init(void)
|
||||
{
|
||||
int r;
|
||||
|
||||
mutex_lock(&kcopyd_init_lock);
|
||||
|
||||
if (kcopyd_clients) {
|
||||
/* Already initialized. */
|
||||
kcopyd_clients++;
|
||||
mutex_unlock(&kcopyd_init_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
r = jobs_init();
|
||||
if (r) {
|
||||
mutex_unlock(&kcopyd_init_lock);
|
||||
return r;
|
||||
}
|
||||
|
||||
_kcopyd_wq = create_singlethread_workqueue("kcopyd");
|
||||
if (!_kcopyd_wq) {
|
||||
jobs_exit();
|
||||
mutex_unlock(&kcopyd_init_lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
kcopyd_clients++;
|
||||
INIT_WORK(&_kcopyd_work, do_work);
|
||||
mutex_unlock(&kcopyd_init_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void kcopyd_exit(void)
|
||||
{
|
||||
mutex_lock(&kcopyd_init_lock);
|
||||
kcopyd_clients--;
|
||||
if (!kcopyd_clients) {
|
||||
jobs_exit();
|
||||
destroy_workqueue(_kcopyd_wq);
|
||||
_kcopyd_wq = NULL;
|
||||
}
|
||||
mutex_unlock(&kcopyd_init_lock);
|
||||
}
|
||||
|
||||
int kcopyd_client_create(unsigned int nr_pages, struct kcopyd_client **result)
|
||||
{
|
||||
int r = 0;
|
||||
struct kcopyd_client *kc;
|
||||
|
||||
r = kcopyd_init();
|
||||
if (r)
|
||||
return r;
|
||||
int r = -ENOMEM;
|
||||
struct dm_kcopyd_client *kc;
|
||||
|
||||
kc = kmalloc(sizeof(*kc), GFP_KERNEL);
|
||||
if (!kc) {
|
||||
kcopyd_exit();
|
||||
if (!kc)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
spin_lock_init(&kc->lock);
|
||||
spin_lock_init(&kc->job_lock);
|
||||
INIT_LIST_HEAD(&kc->complete_jobs);
|
||||
INIT_LIST_HEAD(&kc->io_jobs);
|
||||
INIT_LIST_HEAD(&kc->pages_jobs);
|
||||
|
||||
kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
|
||||
if (!kc->job_pool)
|
||||
goto bad_slab;
|
||||
|
||||
INIT_WORK(&kc->kcopyd_work, do_work);
|
||||
kc->kcopyd_wq = create_singlethread_workqueue("kcopyd");
|
||||
if (!kc->kcopyd_wq)
|
||||
goto bad_workqueue;
|
||||
|
||||
kc->pages = NULL;
|
||||
kc->nr_pages = kc->nr_free_pages = 0;
|
||||
r = client_alloc_pages(kc, nr_pages);
|
||||
if (r) {
|
||||
kfree(kc);
|
||||
kcopyd_exit();
|
||||
return r;
|
||||
}
|
||||
if (r)
|
||||
goto bad_client_pages;
|
||||
|
||||
kc->io_client = dm_io_client_create(nr_pages);
|
||||
if (IS_ERR(kc->io_client)) {
|
||||
r = PTR_ERR(kc->io_client);
|
||||
client_free_pages(kc);
|
||||
kfree(kc);
|
||||
kcopyd_exit();
|
||||
return r;
|
||||
goto bad_io_client;
|
||||
}
|
||||
|
||||
init_waitqueue_head(&kc->destroyq);
|
||||
atomic_set(&kc->nr_jobs, 0);
|
||||
|
||||
client_add(kc);
|
||||
*result = kc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kcopyd_client_destroy(struct kcopyd_client *kc)
|
||||
bad_io_client:
|
||||
client_free_pages(kc);
|
||||
bad_client_pages:
|
||||
destroy_workqueue(kc->kcopyd_wq);
|
||||
bad_workqueue:
|
||||
mempool_destroy(kc->job_pool);
|
||||
bad_slab:
|
||||
kfree(kc);
|
||||
|
||||
return r;
|
||||
}
|
||||
EXPORT_SYMBOL(dm_kcopyd_client_create);
|
||||
|
||||
void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc)
|
||||
{
|
||||
/* Wait for completion of all jobs submitted by this client. */
|
||||
wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));
|
||||
|
||||
BUG_ON(!list_empty(&kc->complete_jobs));
|
||||
BUG_ON(!list_empty(&kc->io_jobs));
|
||||
BUG_ON(!list_empty(&kc->pages_jobs));
|
||||
destroy_workqueue(kc->kcopyd_wq);
|
||||
dm_io_client_destroy(kc->io_client);
|
||||
client_free_pages(kc);
|
||||
client_del(kc);
|
||||
mempool_destroy(kc->job_pool);
|
||||
kfree(kc);
|
||||
kcopyd_exit();
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(kcopyd_client_create);
|
||||
EXPORT_SYMBOL(kcopyd_client_destroy);
|
||||
EXPORT_SYMBOL(kcopyd_copy);
|
||||
EXPORT_SYMBOL(dm_kcopyd_client_destroy);
|
|
@ -1,5 +1,6 @@
|
|||
/*
|
||||
* Copyright (C) 2003 Sistina Software
|
||||
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is released under the LGPL.
|
||||
*/
|
||||
|
@ -8,64 +9,58 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/dm-io.h>
|
||||
#include <linux/dm-dirty-log.h>
|
||||
|
||||
#include "dm-log.h"
|
||||
#include "dm-io.h"
|
||||
#include "dm.h"
|
||||
|
||||
#define DM_MSG_PREFIX "mirror log"
|
||||
#define DM_MSG_PREFIX "dirty region log"
|
||||
|
||||
struct dm_dirty_log_internal {
|
||||
struct dm_dirty_log_type *type;
|
||||
|
||||
struct list_head list;
|
||||
long use;
|
||||
};
|
||||
|
||||
static LIST_HEAD(_log_types);
|
||||
static DEFINE_SPINLOCK(_lock);
|
||||
|
||||
int dm_register_dirty_log_type(struct dirty_log_type *type)
|
||||
static struct dm_dirty_log_internal *__find_dirty_log_type(const char *name)
|
||||
{
|
||||
spin_lock(&_lock);
|
||||
type->use_count = 0;
|
||||
list_add(&type->list, &_log_types);
|
||||
spin_unlock(&_lock);
|
||||
struct dm_dirty_log_internal *log_type;
|
||||
|
||||
return 0;
|
||||
list_for_each_entry(log_type, &_log_types, list)
|
||||
if (!strcmp(name, log_type->type->name))
|
||||
return log_type;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int dm_unregister_dirty_log_type(struct dirty_log_type *type)
|
||||
static struct dm_dirty_log_internal *_get_dirty_log_type(const char *name)
|
||||
{
|
||||
struct dm_dirty_log_internal *log_type;
|
||||
|
||||
spin_lock(&_lock);
|
||||
|
||||
if (type->use_count)
|
||||
DMWARN("Attempt to unregister a log type that is still in use");
|
||||
log_type = __find_dirty_log_type(name);
|
||||
if (log_type) {
|
||||
if (!log_type->use && !try_module_get(log_type->type->module))
|
||||
log_type = NULL;
|
||||
else
|
||||
list_del(&type->list);
|
||||
|
||||
spin_unlock(&_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct dirty_log_type *_get_type(const char *type_name)
|
||||
{
|
||||
struct dirty_log_type *type;
|
||||
|
||||
spin_lock(&_lock);
|
||||
list_for_each_entry (type, &_log_types, list)
|
||||
if (!strcmp(type_name, type->name)) {
|
||||
if (!type->use_count && !try_module_get(type->module)){
|
||||
spin_unlock(&_lock);
|
||||
return NULL;
|
||||
}
|
||||
type->use_count++;
|
||||
spin_unlock(&_lock);
|
||||
return type;
|
||||
log_type->use++;
|
||||
}
|
||||
|
||||
spin_unlock(&_lock);
|
||||
return NULL;
|
||||
|
||||
return log_type;
|
||||
}
|
||||
|
||||
/*
|
||||
* get_type
|
||||
* @type_name
|
||||
*
|
||||
* Attempt to retrieve the dirty_log_type by name. If not already
|
||||
* Attempt to retrieve the dm_dirty_log_type by name. If not already
|
||||
* available, attempt to load the appropriate module.
|
||||
*
|
||||
* Log modules are named "dm-log-" followed by the 'type_name'.
|
||||
|
@ -78,14 +73,17 @@ static struct dirty_log_type *_get_type(const char *type_name)
|
|||
*
|
||||
* Returns: dirty_log_type* on success, NULL on failure
|
||||
*/
|
||||
static struct dirty_log_type *get_type(const char *type_name)
|
||||
static struct dm_dirty_log_type *get_type(const char *type_name)
|
||||
{
|
||||
char *p, *type_name_dup;
|
||||
struct dirty_log_type *type;
|
||||
struct dm_dirty_log_internal *log_type;
|
||||
|
||||
type = _get_type(type_name);
|
||||
if (type)
|
||||
return type;
|
||||
if (!type_name)
|
||||
return NULL;
|
||||
|
||||
log_type = _get_dirty_log_type(type_name);
|
||||
if (log_type)
|
||||
return log_type->type;
|
||||
|
||||
type_name_dup = kstrdup(type_name, GFP_KERNEL);
|
||||
if (!type_name_dup) {
|
||||
|
@ -95,34 +93,106 @@ static struct dirty_log_type *get_type(const char *type_name)
|
|||
}
|
||||
|
||||
while (request_module("dm-log-%s", type_name_dup) ||
|
||||
!(type = _get_type(type_name))) {
|
||||
!(log_type = _get_dirty_log_type(type_name))) {
|
||||
p = strrchr(type_name_dup, '-');
|
||||
if (!p)
|
||||
break;
|
||||
p[0] = '\0';
|
||||
}
|
||||
|
||||
if (!type)
|
||||
if (!log_type)
|
||||
DMWARN("Module for logging type \"%s\" not found.", type_name);
|
||||
|
||||
kfree(type_name_dup);
|
||||
|
||||
return type;
|
||||
return log_type ? log_type->type : NULL;
|
||||
}
|
||||
|
||||
static void put_type(struct dirty_log_type *type)
|
||||
static void put_type(struct dm_dirty_log_type *type)
|
||||
{
|
||||
struct dm_dirty_log_internal *log_type;
|
||||
|
||||
if (!type)
|
||||
return;
|
||||
|
||||
spin_lock(&_lock);
|
||||
if (!--type->use_count)
|
||||
log_type = __find_dirty_log_type(type->name);
|
||||
if (!log_type)
|
||||
goto out;
|
||||
|
||||
if (!--log_type->use)
|
||||
module_put(type->module);
|
||||
|
||||
BUG_ON(log_type->use < 0);
|
||||
|
||||
out:
|
||||
spin_unlock(&_lock);
|
||||
}
|
||||
|
||||
struct dirty_log *dm_create_dirty_log(const char *type_name, struct dm_target *ti,
|
||||
static struct dm_dirty_log_internal *_alloc_dirty_log_type(struct dm_dirty_log_type *type)
|
||||
{
|
||||
struct dm_dirty_log_internal *log_type = kzalloc(sizeof(*log_type),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (log_type)
|
||||
log_type->type = type;
|
||||
|
||||
return log_type;
|
||||
}
|
||||
|
||||
int dm_dirty_log_type_register(struct dm_dirty_log_type *type)
|
||||
{
|
||||
struct dm_dirty_log_internal *log_type = _alloc_dirty_log_type(type);
|
||||
int r = 0;
|
||||
|
||||
if (!log_type)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock(&_lock);
|
||||
if (!__find_dirty_log_type(type->name))
|
||||
list_add(&log_type->list, &_log_types);
|
||||
else {
|
||||
kfree(log_type);
|
||||
r = -EEXIST;
|
||||
}
|
||||
spin_unlock(&_lock);
|
||||
|
||||
return r;
|
||||
}
|
||||
EXPORT_SYMBOL(dm_dirty_log_type_register);
|
||||
|
||||
int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type)
|
||||
{
|
||||
struct dm_dirty_log_internal *log_type;
|
||||
|
||||
spin_lock(&_lock);
|
||||
|
||||
log_type = __find_dirty_log_type(type->name);
|
||||
if (!log_type) {
|
||||
spin_unlock(&_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (log_type->use) {
|
||||
spin_unlock(&_lock);
|
||||
return -ETXTBSY;
|
||||
}
|
||||
|
||||
list_del(&log_type->list);
|
||||
|
||||
spin_unlock(&_lock);
|
||||
kfree(log_type);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(dm_dirty_log_type_unregister);
|
||||
|
||||
struct dm_dirty_log *dm_dirty_log_create(const char *type_name,
|
||||
struct dm_target *ti,
|
||||
unsigned int argc, char **argv)
|
||||
{
|
||||
struct dirty_log_type *type;
|
||||
struct dirty_log *log;
|
||||
struct dm_dirty_log_type *type;
|
||||
struct dm_dirty_log *log;
|
||||
|
||||
log = kmalloc(sizeof(*log), GFP_KERNEL);
|
||||
if (!log)
|
||||
|
@ -143,13 +213,15 @@ struct dirty_log *dm_create_dirty_log(const char *type_name, struct dm_target *t
|
|||
|
||||
return log;
|
||||
}
|
||||
EXPORT_SYMBOL(dm_dirty_log_create);
|
||||
|
||||
void dm_destroy_dirty_log(struct dirty_log *log)
|
||||
void dm_dirty_log_destroy(struct dm_dirty_log *log)
|
||||
{
|
||||
log->type->dtr(log);
|
||||
put_type(log->type);
|
||||
kfree(log);
|
||||
}
|
||||
EXPORT_SYMBOL(dm_dirty_log_destroy);
|
||||
|
||||
/*-----------------------------------------------------------------
|
||||
* Persistent and core logs share a lot of their implementation.
|
||||
|
@ -207,7 +279,7 @@ struct log_c {
|
|||
struct dm_dev *log_dev;
|
||||
struct log_header header;
|
||||
|
||||
struct io_region header_location;
|
||||
struct dm_io_region header_location;
|
||||
struct log_header *disk_header;
|
||||
};
|
||||
|
||||
|
@ -302,7 +374,7 @@ static inline int write_header(struct log_c *log)
|
|||
* argv contains region_size followed optionally by [no]sync
|
||||
*--------------------------------------------------------------*/
|
||||
#define BYTE_SHIFT 3
|
||||
static int create_log_context(struct dirty_log *log, struct dm_target *ti,
|
||||
static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
|
||||
unsigned int argc, char **argv,
|
||||
struct dm_dev *dev)
|
||||
{
|
||||
|
@ -315,7 +387,7 @@ static int create_log_context(struct dirty_log *log, struct dm_target *ti,
|
|||
int r;
|
||||
|
||||
if (argc < 1 || argc > 2) {
|
||||
DMWARN("wrong number of arguments to mirror log");
|
||||
DMWARN("wrong number of arguments to dirty region log");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -325,8 +397,8 @@ static int create_log_context(struct dirty_log *log, struct dm_target *ti,
|
|||
else if (!strcmp(argv[1], "nosync"))
|
||||
sync = NOSYNC;
|
||||
else {
|
||||
DMWARN("unrecognised sync argument to mirror log: %s",
|
||||
argv[1]);
|
||||
DMWARN("unrecognised sync argument to "
|
||||
"dirty region log: %s", argv[1]);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
@ -434,7 +506,7 @@ static int create_log_context(struct dirty_log *log, struct dm_target *ti,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int core_ctr(struct dirty_log *log, struct dm_target *ti,
|
||||
static int core_ctr(struct dm_dirty_log *log, struct dm_target *ti,
|
||||
unsigned int argc, char **argv)
|
||||
{
|
||||
return create_log_context(log, ti, argc, argv, NULL);
|
||||
|
@ -447,7 +519,7 @@ static void destroy_log_context(struct log_c *lc)
|
|||
kfree(lc);
|
||||
}
|
||||
|
||||
static void core_dtr(struct dirty_log *log)
|
||||
static void core_dtr(struct dm_dirty_log *log)
|
||||
{
|
||||
struct log_c *lc = (struct log_c *) log->context;
|
||||
|
||||
|
@ -460,14 +532,14 @@ static void core_dtr(struct dirty_log *log)
|
|||
*
|
||||
* argv contains log_device region_size followed optionally by [no]sync
|
||||
*--------------------------------------------------------------*/
|
||||
static int disk_ctr(struct dirty_log *log, struct dm_target *ti,
|
||||
static int disk_ctr(struct dm_dirty_log *log, struct dm_target *ti,
|
||||
unsigned int argc, char **argv)
|
||||
{
|
||||
int r;
|
||||
struct dm_dev *dev;
|
||||
|
||||
if (argc < 2 || argc > 3) {
|
||||
DMWARN("wrong number of arguments to disk mirror log");
|
||||
DMWARN("wrong number of arguments to disk dirty region log");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -485,7 +557,7 @@ static int disk_ctr(struct dirty_log *log, struct dm_target *ti,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void disk_dtr(struct dirty_log *log)
|
||||
static void disk_dtr(struct dm_dirty_log *log)
|
||||
{
|
||||
struct log_c *lc = (struct log_c *) log->context;
|
||||
|
||||
|
@ -514,7 +586,7 @@ static void fail_log_device(struct log_c *lc)
|
|||
dm_table_event(lc->ti->table);
|
||||
}
|
||||
|
||||
static int disk_resume(struct dirty_log *log)
|
||||
static int disk_resume(struct dm_dirty_log *log)
|
||||
{
|
||||
int r;
|
||||
unsigned i;
|
||||
|
@ -524,7 +596,7 @@ static int disk_resume(struct dirty_log *log)
|
|||
/* read the disk header */
|
||||
r = read_header(lc);
|
||||
if (r) {
|
||||
DMWARN("%s: Failed to read header on mirror log device",
|
||||
DMWARN("%s: Failed to read header on dirty region log device",
|
||||
lc->log_dev->name);
|
||||
fail_log_device(lc);
|
||||
/*
|
||||
|
@ -562,7 +634,7 @@ static int disk_resume(struct dirty_log *log)
|
|||
/* write the new header */
|
||||
r = write_header(lc);
|
||||
if (r) {
|
||||
DMWARN("%s: Failed to write header on mirror log device",
|
||||
DMWARN("%s: Failed to write header on dirty region log device",
|
||||
lc->log_dev->name);
|
||||
fail_log_device(lc);
|
||||
}
|
||||
|
@ -570,38 +642,38 @@ static int disk_resume(struct dirty_log *log)
|
|||
return r;
|
||||
}
|
||||
|
||||
static uint32_t core_get_region_size(struct dirty_log *log)
|
||||
static uint32_t core_get_region_size(struct dm_dirty_log *log)
|
||||
{
|
||||
struct log_c *lc = (struct log_c *) log->context;
|
||||
return lc->region_size;
|
||||
}
|
||||
|
||||
static int core_resume(struct dirty_log *log)
|
||||
static int core_resume(struct dm_dirty_log *log)
|
||||
{
|
||||
struct log_c *lc = (struct log_c *) log->context;
|
||||
lc->sync_search = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int core_is_clean(struct dirty_log *log, region_t region)
|
||||
static int core_is_clean(struct dm_dirty_log *log, region_t region)
|
||||
{
|
||||
struct log_c *lc = (struct log_c *) log->context;
|
||||
return log_test_bit(lc->clean_bits, region);
|
||||
}
|
||||
|
||||
static int core_in_sync(struct dirty_log *log, region_t region, int block)
|
||||
static int core_in_sync(struct dm_dirty_log *log, region_t region, int block)
|
||||
{
|
||||
struct log_c *lc = (struct log_c *) log->context;
|
||||
return log_test_bit(lc->sync_bits, region);
|
||||
}
|
||||
|
||||
static int core_flush(struct dirty_log *log)
|
||||
static int core_flush(struct dm_dirty_log *log)
|
||||
{
|
||||
/* no op */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int disk_flush(struct dirty_log *log)
|
||||
static int disk_flush(struct dm_dirty_log *log)
|
||||
{
|
||||
int r;
|
||||
struct log_c *lc = (struct log_c *) log->context;
|
||||
|
@ -619,19 +691,19 @@ static int disk_flush(struct dirty_log *log)
|
|||
return r;
|
||||
}
|
||||
|
||||
static void core_mark_region(struct dirty_log *log, region_t region)
|
||||
static void core_mark_region(struct dm_dirty_log *log, region_t region)
|
||||
{
|
||||
struct log_c *lc = (struct log_c *) log->context;
|
||||
log_clear_bit(lc, lc->clean_bits, region);
|
||||
}
|
||||
|
||||
static void core_clear_region(struct dirty_log *log, region_t region)
|
||||
static void core_clear_region(struct dm_dirty_log *log, region_t region)
|
||||
{
|
||||
struct log_c *lc = (struct log_c *) log->context;
|
||||
log_set_bit(lc, lc->clean_bits, region);
|
||||
}
|
||||
|
||||
static int core_get_resync_work(struct dirty_log *log, region_t *region)
|
||||
static int core_get_resync_work(struct dm_dirty_log *log, region_t *region)
|
||||
{
|
||||
struct log_c *lc = (struct log_c *) log->context;
|
||||
|
||||
|
@ -654,7 +726,7 @@ static int core_get_resync_work(struct dirty_log *log, region_t *region)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static void core_set_region_sync(struct dirty_log *log, region_t region,
|
||||
static void core_set_region_sync(struct dm_dirty_log *log, region_t region,
|
||||
int in_sync)
|
||||
{
|
||||
struct log_c *lc = (struct log_c *) log->context;
|
||||
|
@ -669,7 +741,7 @@ static void core_set_region_sync(struct dirty_log *log, region_t region,
|
|||
}
|
||||
}
|
||||
|
||||
static region_t core_get_sync_count(struct dirty_log *log)
|
||||
static region_t core_get_sync_count(struct dm_dirty_log *log)
|
||||
{
|
||||
struct log_c *lc = (struct log_c *) log->context;
|
||||
|
||||
|
@ -680,7 +752,7 @@ static region_t core_get_sync_count(struct dirty_log *log)
|
|||
if (lc->sync != DEFAULTSYNC) \
|
||||
DMEMIT("%ssync ", lc->sync == NOSYNC ? "no" : "")
|
||||
|
||||
static int core_status(struct dirty_log *log, status_type_t status,
|
||||
static int core_status(struct dm_dirty_log *log, status_type_t status,
|
||||
char *result, unsigned int maxlen)
|
||||
{
|
||||
int sz = 0;
|
||||
|
@ -700,7 +772,7 @@ static int core_status(struct dirty_log *log, status_type_t status,
|
|||
return sz;
|
||||
}
|
||||
|
||||
static int disk_status(struct dirty_log *log, status_type_t status,
|
||||
static int disk_status(struct dm_dirty_log *log, status_type_t status,
|
||||
char *result, unsigned int maxlen)
|
||||
{
|
||||
int sz = 0;
|
||||
|
@ -722,7 +794,7 @@ static int disk_status(struct dirty_log *log, status_type_t status,
|
|||
return sz;
|
||||
}
|
||||
|
||||
static struct dirty_log_type _core_type = {
|
||||
static struct dm_dirty_log_type _core_type = {
|
||||
.name = "core",
|
||||
.module = THIS_MODULE,
|
||||
.ctr = core_ctr,
|
||||
|
@ -740,7 +812,7 @@ static struct dirty_log_type _core_type = {
|
|||
.status = core_status,
|
||||
};
|
||||
|
||||
static struct dirty_log_type _disk_type = {
|
||||
static struct dm_dirty_log_type _disk_type = {
|
||||
.name = "disk",
|
||||
.module = THIS_MODULE,
|
||||
.ctr = disk_ctr,
|
||||
|
@ -763,26 +835,28 @@ int __init dm_dirty_log_init(void)
|
|||
{
|
||||
int r;
|
||||
|
||||
r = dm_register_dirty_log_type(&_core_type);
|
||||
r = dm_dirty_log_type_register(&_core_type);
|
||||
if (r)
|
||||
DMWARN("couldn't register core log");
|
||||
|
||||
r = dm_register_dirty_log_type(&_disk_type);
|
||||
r = dm_dirty_log_type_register(&_disk_type);
|
||||
if (r) {
|
||||
DMWARN("couldn't register disk type");
|
||||
dm_unregister_dirty_log_type(&_core_type);
|
||||
dm_dirty_log_type_unregister(&_core_type);
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
void dm_dirty_log_exit(void)
|
||||
void __exit dm_dirty_log_exit(void)
|
||||
{
|
||||
dm_unregister_dirty_log_type(&_disk_type);
|
||||
dm_unregister_dirty_log_type(&_core_type);
|
||||
dm_dirty_log_type_unregister(&_disk_type);
|
||||
dm_dirty_log_type_unregister(&_core_type);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(dm_register_dirty_log_type);
|
||||
EXPORT_SYMBOL(dm_unregister_dirty_log_type);
|
||||
EXPORT_SYMBOL(dm_create_dirty_log);
|
||||
EXPORT_SYMBOL(dm_destroy_dirty_log);
|
||||
module_init(dm_dirty_log_init);
|
||||
module_exit(dm_dirty_log_exit);
|
||||
|
||||
MODULE_DESCRIPTION(DM_NAME " dirty region log");
|
||||
MODULE_AUTHOR("Joe Thornber, Heinz Mauelshagen <dm-devel@redhat.com>");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -7,9 +7,6 @@
|
|||
#include "dm.h"
|
||||
#include "dm-bio-list.h"
|
||||
#include "dm-bio-record.h"
|
||||
#include "dm-io.h"
|
||||
#include "dm-log.h"
|
||||
#include "kcopyd.h"
|
||||
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/init.h>
|
||||
|
@ -22,6 +19,9 @@
|
|||
#include <linux/workqueue.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/dm-io.h>
|
||||
#include <linux/dm-dirty-log.h>
|
||||
#include <linux/dm-kcopyd.h>
|
||||
|
||||
#define DM_MSG_PREFIX "raid1"
|
||||
#define DM_IO_PAGES 64
|
||||
|
@ -74,7 +74,7 @@ struct region_hash {
|
|||
unsigned region_shift;
|
||||
|
||||
/* holds persistent region state */
|
||||
struct dirty_log *log;
|
||||
struct dm_dirty_log *log;
|
||||
|
||||
/* hash table */
|
||||
rwlock_t hash_lock;
|
||||
|
@ -133,7 +133,7 @@ struct mirror_set {
|
|||
struct dm_target *ti;
|
||||
struct list_head list;
|
||||
struct region_hash rh;
|
||||
struct kcopyd_client *kcopyd_client;
|
||||
struct dm_kcopyd_client *kcopyd_client;
|
||||
uint64_t features;
|
||||
|
||||
spinlock_t lock; /* protects the lists */
|
||||
|
@ -154,6 +154,9 @@ struct mirror_set {
|
|||
|
||||
struct workqueue_struct *kmirrord_wq;
|
||||
struct work_struct kmirrord_work;
|
||||
struct timer_list timer;
|
||||
unsigned long timer_pending;
|
||||
|
||||
struct work_struct trigger_event;
|
||||
|
||||
unsigned int nr_mirrors;
|
||||
|
@ -178,13 +181,32 @@ static void wake(struct mirror_set *ms)
|
|||
queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
|
||||
}
|
||||
|
||||
static void delayed_wake_fn(unsigned long data)
|
||||
{
|
||||
struct mirror_set *ms = (struct mirror_set *) data;
|
||||
|
||||
clear_bit(0, &ms->timer_pending);
|
||||
wake(ms);
|
||||
}
|
||||
|
||||
static void delayed_wake(struct mirror_set *ms)
|
||||
{
|
||||
if (test_and_set_bit(0, &ms->timer_pending))
|
||||
return;
|
||||
|
||||
ms->timer.expires = jiffies + HZ / 5;
|
||||
ms->timer.data = (unsigned long) ms;
|
||||
ms->timer.function = delayed_wake_fn;
|
||||
add_timer(&ms->timer);
|
||||
}
|
||||
|
||||
/* FIXME move this */
|
||||
static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
|
||||
|
||||
#define MIN_REGIONS 64
|
||||
#define MAX_RECOVERY 1
|
||||
static int rh_init(struct region_hash *rh, struct mirror_set *ms,
|
||||
struct dirty_log *log, uint32_t region_size,
|
||||
struct dm_dirty_log *log, uint32_t region_size,
|
||||
region_t nr_regions)
|
||||
{
|
||||
unsigned int nr_buckets, max_buckets;
|
||||
|
@ -249,7 +271,7 @@ static void rh_exit(struct region_hash *rh)
|
|||
}
|
||||
|
||||
if (rh->log)
|
||||
dm_destroy_dirty_log(rh->log);
|
||||
dm_dirty_log_destroy(rh->log);
|
||||
if (rh->region_pool)
|
||||
mempool_destroy(rh->region_pool);
|
||||
vfree(rh->buckets);
|
||||
|
@ -405,24 +427,22 @@ static void rh_update_states(struct region_hash *rh)
|
|||
write_lock_irq(&rh->hash_lock);
|
||||
spin_lock(&rh->region_lock);
|
||||
if (!list_empty(&rh->clean_regions)) {
|
||||
list_splice(&rh->clean_regions, &clean);
|
||||
INIT_LIST_HEAD(&rh->clean_regions);
|
||||
list_splice_init(&rh->clean_regions, &clean);
|
||||
|
||||
list_for_each_entry(reg, &clean, list)
|
||||
list_del(®->hash_list);
|
||||
}
|
||||
|
||||
if (!list_empty(&rh->recovered_regions)) {
|
||||
list_splice(&rh->recovered_regions, &recovered);
|
||||
INIT_LIST_HEAD(&rh->recovered_regions);
|
||||
list_splice_init(&rh->recovered_regions, &recovered);
|
||||
|
||||
list_for_each_entry (reg, &recovered, list)
|
||||
list_del(®->hash_list);
|
||||
}
|
||||
|
||||
if (!list_empty(&rh->failed_recovered_regions)) {
|
||||
list_splice(&rh->failed_recovered_regions, &failed_recovered);
|
||||
INIT_LIST_HEAD(&rh->failed_recovered_regions);
|
||||
list_splice_init(&rh->failed_recovered_regions,
|
||||
&failed_recovered);
|
||||
|
||||
list_for_each_entry(reg, &failed_recovered, list)
|
||||
list_del(®->hash_list);
|
||||
|
@ -790,7 +810,7 @@ static int recover(struct mirror_set *ms, struct region *reg)
|
|||
{
|
||||
int r;
|
||||
unsigned int i;
|
||||
struct io_region from, to[KCOPYD_MAX_REGIONS], *dest;
|
||||
struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
|
||||
struct mirror *m;
|
||||
unsigned long flags = 0;
|
||||
|
||||
|
@ -822,9 +842,9 @@ static int recover(struct mirror_set *ms, struct region *reg)
|
|||
}
|
||||
|
||||
/* hand to kcopyd */
|
||||
set_bit(KCOPYD_IGNORE_ERROR, &flags);
|
||||
r = kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, flags,
|
||||
recovery_complete, reg);
|
||||
set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);
|
||||
r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
|
||||
flags, recovery_complete, reg);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
@ -833,7 +853,7 @@ static void do_recovery(struct mirror_set *ms)
|
|||
{
|
||||
int r;
|
||||
struct region *reg;
|
||||
struct dirty_log *log = ms->rh.log;
|
||||
struct dm_dirty_log *log = ms->rh.log;
|
||||
|
||||
/*
|
||||
* Start quiescing some regions.
|
||||
|
@ -909,7 +929,7 @@ static void map_bio(struct mirror *m, struct bio *bio)
|
|||
bio->bi_sector = map_sector(m, bio);
|
||||
}
|
||||
|
||||
static void map_region(struct io_region *io, struct mirror *m,
|
||||
static void map_region(struct dm_io_region *io, struct mirror *m,
|
||||
struct bio *bio)
|
||||
{
|
||||
io->bdev = m->dev->bdev;
|
||||
|
@ -951,7 +971,7 @@ static void read_callback(unsigned long error, void *context)
|
|||
/* Asynchronous read. */
|
||||
static void read_async_bio(struct mirror *m, struct bio *bio)
|
||||
{
|
||||
struct io_region io;
|
||||
struct dm_io_region io;
|
||||
struct dm_io_request io_req = {
|
||||
.bi_rw = READ,
|
||||
.mem.type = DM_IO_BVEC,
|
||||
|
@ -1019,7 +1039,7 @@ static void __bio_mark_nosync(struct mirror_set *ms,
|
|||
{
|
||||
unsigned long flags;
|
||||
struct region_hash *rh = &ms->rh;
|
||||
struct dirty_log *log = ms->rh.log;
|
||||
struct dm_dirty_log *log = ms->rh.log;
|
||||
struct region *reg;
|
||||
region_t region = bio_to_region(rh, bio);
|
||||
int recovering = 0;
|
||||
|
@ -1107,7 +1127,7 @@ out:
|
|||
static void do_write(struct mirror_set *ms, struct bio *bio)
|
||||
{
|
||||
unsigned int i;
|
||||
struct io_region io[ms->nr_mirrors], *dest = io;
|
||||
struct dm_io_region io[ms->nr_mirrors], *dest = io;
|
||||
struct mirror *m;
|
||||
struct dm_io_request io_req = {
|
||||
.bi_rw = WRITE,
|
||||
|
@ -1182,6 +1202,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
|
|||
spin_lock_irq(&ms->lock);
|
||||
bio_list_merge(&ms->failures, &sync);
|
||||
spin_unlock_irq(&ms->lock);
|
||||
wake(ms);
|
||||
} else
|
||||
while ((bio = bio_list_pop(&sync)))
|
||||
do_write(ms, bio);
|
||||
|
@ -1241,7 +1262,7 @@ static void do_failures(struct mirror_set *ms, struct bio_list *failures)
|
|||
bio_list_merge(&ms->failures, failures);
|
||||
spin_unlock_irq(&ms->lock);
|
||||
|
||||
wake(ms);
|
||||
delayed_wake(ms);
|
||||
}
|
||||
|
||||
static void trigger_event(struct work_struct *work)
|
||||
|
@ -1255,7 +1276,7 @@ static void trigger_event(struct work_struct *work)
|
|||
/*-----------------------------------------------------------------
|
||||
* kmirrord
|
||||
*---------------------------------------------------------------*/
|
||||
static int _do_mirror(struct work_struct *work)
|
||||
static void do_mirror(struct work_struct *work)
|
||||
{
|
||||
struct mirror_set *ms =container_of(work, struct mirror_set,
|
||||
kmirrord_work);
|
||||
|
@ -1277,23 +1298,7 @@ static int _do_mirror(struct work_struct *work)
|
|||
do_writes(ms, &writes);
|
||||
do_failures(ms, &failures);
|
||||
|
||||
return (ms->failures.head) ? 1 : 0;
|
||||
}
|
||||
|
||||
static void do_mirror(struct work_struct *work)
|
||||
{
|
||||
/*
|
||||
* If _do_mirror returns 1, we give it
|
||||
* another shot. This helps for cases like
|
||||
* 'suspend' where we call flush_workqueue
|
||||
* and expect all work to be finished. If
|
||||
* a failure happens during a suspend, we
|
||||
* couldn't issue a 'wake' because it would
|
||||
* not be honored. Therefore, we return '1'
|
||||
* from _do_mirror, and retry here.
|
||||
*/
|
||||
while (_do_mirror(work))
|
||||
schedule();
|
||||
dm_table_unplug_all(ms->ti->table);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1303,7 +1308,7 @@ static void do_mirror(struct work_struct *work)
|
|||
static struct mirror_set *alloc_context(unsigned int nr_mirrors,
|
||||
uint32_t region_size,
|
||||
struct dm_target *ti,
|
||||
struct dirty_log *dl)
|
||||
struct dm_dirty_log *dl)
|
||||
{
|
||||
size_t len;
|
||||
struct mirror_set *ms = NULL;
|
||||
|
@ -1403,12 +1408,12 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
|
|||
/*
|
||||
* Create dirty log: log_type #log_params <log_params>
|
||||
*/
|
||||
static struct dirty_log *create_dirty_log(struct dm_target *ti,
|
||||
static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
|
||||
unsigned int argc, char **argv,
|
||||
unsigned int *args_used)
|
||||
{
|
||||
unsigned int param_count;
|
||||
struct dirty_log *dl;
|
||||
struct dm_dirty_log *dl;
|
||||
|
||||
if (argc < 2) {
|
||||
ti->error = "Insufficient mirror log arguments";
|
||||
|
@ -1427,7 +1432,7 @@ static struct dirty_log *create_dirty_log(struct dm_target *ti,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
dl = dm_create_dirty_log(argv[0], ti, param_count, argv + 2);
|
||||
dl = dm_dirty_log_create(argv[0], ti, param_count, argv + 2);
|
||||
if (!dl) {
|
||||
ti->error = "Error creating mirror dirty log";
|
||||
return NULL;
|
||||
|
@ -1435,7 +1440,7 @@ static struct dirty_log *create_dirty_log(struct dm_target *ti,
|
|||
|
||||
if (!_check_region_size(ti, dl->type->get_region_size(dl))) {
|
||||
ti->error = "Invalid region size";
|
||||
dm_destroy_dirty_log(dl);
|
||||
dm_dirty_log_destroy(dl);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -1496,7 +1501,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|||
int r;
|
||||
unsigned int nr_mirrors, m, args_used;
|
||||
struct mirror_set *ms;
|
||||
struct dirty_log *dl;
|
||||
struct dm_dirty_log *dl;
|
||||
|
||||
dl = create_dirty_log(ti, argc, argv, &args_used);
|
||||
if (!dl)
|
||||
|
@ -1506,9 +1511,9 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|||
argc -= args_used;
|
||||
|
||||
if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
|
||||
nr_mirrors < 2 || nr_mirrors > KCOPYD_MAX_REGIONS + 1) {
|
||||
nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) {
|
||||
ti->error = "Invalid number of mirrors";
|
||||
dm_destroy_dirty_log(dl);
|
||||
dm_dirty_log_destroy(dl);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -1516,13 +1521,13 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|||
|
||||
if (argc < nr_mirrors * 2) {
|
||||
ti->error = "Too few mirror arguments";
|
||||
dm_destroy_dirty_log(dl);
|
||||
dm_dirty_log_destroy(dl);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
|
||||
if (!ms) {
|
||||
dm_destroy_dirty_log(dl);
|
||||
dm_dirty_log_destroy(dl);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -1547,6 +1552,8 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|||
goto err_free_context;
|
||||
}
|
||||
INIT_WORK(&ms->kmirrord_work, do_mirror);
|
||||
init_timer(&ms->timer);
|
||||
ms->timer_pending = 0;
|
||||
INIT_WORK(&ms->trigger_event, trigger_event);
|
||||
|
||||
r = parse_features(ms, argc, argv, &args_used);
|
||||
|
@ -1571,7 +1578,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|||
goto err_destroy_wq;
|
||||
}
|
||||
|
||||
r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
|
||||
r = dm_kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
|
||||
if (r)
|
||||
goto err_destroy_wq;
|
||||
|
||||
|
@ -1589,8 +1596,9 @@ static void mirror_dtr(struct dm_target *ti)
|
|||
{
|
||||
struct mirror_set *ms = (struct mirror_set *) ti->private;
|
||||
|
||||
del_timer_sync(&ms->timer);
|
||||
flush_workqueue(ms->kmirrord_wq);
|
||||
kcopyd_client_destroy(ms->kcopyd_client);
|
||||
dm_kcopyd_client_destroy(ms->kcopyd_client);
|
||||
destroy_workqueue(ms->kmirrord_wq);
|
||||
free_context(ms, ti, ms->nr_mirrors);
|
||||
}
|
||||
|
@ -1734,7 +1742,7 @@ out:
|
|||
static void mirror_presuspend(struct dm_target *ti)
|
||||
{
|
||||
struct mirror_set *ms = (struct mirror_set *) ti->private;
|
||||
struct dirty_log *log = ms->rh.log;
|
||||
struct dm_dirty_log *log = ms->rh.log;
|
||||
|
||||
atomic_set(&ms->suspend, 1);
|
||||
|
||||
|
@ -1763,7 +1771,7 @@ static void mirror_presuspend(struct dm_target *ti)
|
|||
static void mirror_postsuspend(struct dm_target *ti)
|
||||
{
|
||||
struct mirror_set *ms = ti->private;
|
||||
struct dirty_log *log = ms->rh.log;
|
||||
struct dm_dirty_log *log = ms->rh.log;
|
||||
|
||||
if (log->type->postsuspend && log->type->postsuspend(log))
|
||||
/* FIXME: need better error handling */
|
||||
|
@ -1773,7 +1781,7 @@ static void mirror_postsuspend(struct dm_target *ti)
|
|||
static void mirror_resume(struct dm_target *ti)
|
||||
{
|
||||
struct mirror_set *ms = ti->private;
|
||||
struct dirty_log *log = ms->rh.log;
|
||||
struct dm_dirty_log *log = ms->rh.log;
|
||||
|
||||
atomic_set(&ms->suspend, 0);
|
||||
if (log->type->resume && log->type->resume(log))
|
||||
|
@ -1811,7 +1819,7 @@ static int mirror_status(struct dm_target *ti, status_type_t type,
|
|||
{
|
||||
unsigned int m, sz = 0;
|
||||
struct mirror_set *ms = (struct mirror_set *) ti->private;
|
||||
struct dirty_log *log = ms->rh.log;
|
||||
struct dm_dirty_log *log = ms->rh.log;
|
||||
char buffer[ms->nr_mirrors + 1];
|
||||
|
||||
switch (type) {
|
||||
|
@ -1864,15 +1872,9 @@ static int __init dm_mirror_init(void)
|
|||
{
|
||||
int r;
|
||||
|
||||
r = dm_dirty_log_init();
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = dm_register_target(&mirror_target);
|
||||
if (r < 0) {
|
||||
if (r < 0)
|
||||
DMERR("Failed to register mirror target");
|
||||
dm_dirty_log_exit();
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
@ -1884,8 +1886,6 @@ static void __exit dm_mirror_exit(void)
|
|||
r = dm_unregister_target(&mirror_target);
|
||||
if (r < 0)
|
||||
DMERR("unregister failed %d", r);
|
||||
|
||||
dm_dirty_log_exit();
|
||||
}
|
||||
|
||||
/* Module hooks */
|
||||
|
|
|
@ -18,10 +18,10 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/dm-kcopyd.h>
|
||||
|
||||
#include "dm-snap.h"
|
||||
#include "dm-bio-list.h"
|
||||
#include "kcopyd.h"
|
||||
|
||||
#define DM_MSG_PREFIX "snapshots"
|
||||
|
||||
|
@ -36,9 +36,9 @@
|
|||
#define SNAPSHOT_COPY_PRIORITY 2
|
||||
|
||||
/*
|
||||
* Each snapshot reserves this many pages for io
|
||||
* Reserve 1MB for each snapshot initially (with minimum of 1 page).
|
||||
*/
|
||||
#define SNAPSHOT_PAGES 256
|
||||
#define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
|
||||
|
||||
static struct workqueue_struct *ksnapd;
|
||||
static void flush_queued_bios(struct work_struct *work);
|
||||
|
@ -536,7 +536,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|||
s->last_percent = 0;
|
||||
init_rwsem(&s->lock);
|
||||
spin_lock_init(&s->pe_lock);
|
||||
s->table = ti->table;
|
||||
s->ti = ti;
|
||||
|
||||
/* Allocate hash table for COW data */
|
||||
if (init_hash_tables(s)) {
|
||||
|
@ -558,7 +558,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|||
goto bad4;
|
||||
}
|
||||
|
||||
r = kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
|
||||
r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
|
||||
if (r) {
|
||||
ti->error = "Could not create kcopyd client";
|
||||
goto bad5;
|
||||
|
@ -591,7 +591,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|||
return 0;
|
||||
|
||||
bad6:
|
||||
kcopyd_client_destroy(s->kcopyd_client);
|
||||
dm_kcopyd_client_destroy(s->kcopyd_client);
|
||||
|
||||
bad5:
|
||||
s->store.destroy(&s->store);
|
||||
|
@ -613,7 +613,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|||
|
||||
static void __free_exceptions(struct dm_snapshot *s)
|
||||
{
|
||||
kcopyd_client_destroy(s->kcopyd_client);
|
||||
dm_kcopyd_client_destroy(s->kcopyd_client);
|
||||
s->kcopyd_client = NULL;
|
||||
|
||||
exit_exception_table(&s->pending, pending_cache);
|
||||
|
@ -699,7 +699,7 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err)
|
|||
|
||||
s->valid = 0;
|
||||
|
||||
dm_table_event(s->table);
|
||||
dm_table_event(s->ti->table);
|
||||
}
|
||||
|
||||
static void get_pending_exception(struct dm_snap_pending_exception *pe)
|
||||
|
@ -824,7 +824,7 @@ static void copy_callback(int read_err, unsigned long write_err, void *context)
|
|||
static void start_copy(struct dm_snap_pending_exception *pe)
|
||||
{
|
||||
struct dm_snapshot *s = pe->snap;
|
||||
struct io_region src, dest;
|
||||
struct dm_io_region src, dest;
|
||||
struct block_device *bdev = s->origin->bdev;
|
||||
sector_t dev_size;
|
||||
|
||||
|
@ -839,7 +839,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
|
|||
dest.count = src.count;
|
||||
|
||||
/* Hand over to kcopyd */
|
||||
kcopyd_copy(s->kcopyd_client,
|
||||
dm_kcopyd_copy(s->kcopyd_client,
|
||||
&src, 1, &dest, 0, copy_callback, pe);
|
||||
}
|
||||
|
||||
|
@ -1060,7 +1060,7 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio)
|
|||
goto next_snapshot;
|
||||
|
||||
/* Nothing to do if writing beyond end of snapshot */
|
||||
if (bio->bi_sector >= dm_table_get_size(snap->table))
|
||||
if (bio->bi_sector >= dm_table_get_size(snap->ti->table))
|
||||
goto next_snapshot;
|
||||
|
||||
/*
|
||||
|
|
|
@ -132,7 +132,7 @@ struct exception_store {
|
|||
|
||||
struct dm_snapshot {
|
||||
struct rw_semaphore lock;
|
||||
struct dm_table *table;
|
||||
struct dm_target *ti;
|
||||
|
||||
struct dm_dev *origin;
|
||||
struct dm_dev *cow;
|
||||
|
@ -169,7 +169,7 @@ struct dm_snapshot {
|
|||
/* The on disk metadata handler */
|
||||
struct exception_store store;
|
||||
|
||||
struct kcopyd_client *kcopyd_client;
|
||||
struct dm_kcopyd_client *kcopyd_client;
|
||||
|
||||
/* Queue of snapshot writes for ksnapd to flush */
|
||||
struct bio_list queued_bios;
|
||||
|
|
|
@ -245,44 +245,6 @@ int dm_table_create(struct dm_table **result, int mode,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int dm_create_error_table(struct dm_table **result, struct mapped_device *md)
|
||||
{
|
||||
struct dm_table *t;
|
||||
sector_t dev_size = 1;
|
||||
int r;
|
||||
|
||||
/*
|
||||
* Find current size of device.
|
||||
* Default to 1 sector if inactive.
|
||||
*/
|
||||
t = dm_get_table(md);
|
||||
if (t) {
|
||||
dev_size = dm_table_get_size(t);
|
||||
dm_table_put(t);
|
||||
}
|
||||
|
||||
r = dm_table_create(&t, FMODE_READ, 1, md);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = dm_table_add_target(t, "error", 0, dev_size, NULL);
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
r = dm_table_complete(t);
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
*result = t;
|
||||
|
||||
out:
|
||||
if (r)
|
||||
dm_table_put(t);
|
||||
|
||||
return r;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_create_error_table);
|
||||
|
||||
static void free_devices(struct list_head *devices)
|
||||
{
|
||||
struct list_head *tmp, *next;
|
||||
|
@ -954,7 +916,7 @@ void dm_table_presuspend_targets(struct dm_table *t)
|
|||
if (!t)
|
||||
return;
|
||||
|
||||
return suspend_targets(t, 0);
|
||||
suspend_targets(t, 0);
|
||||
}
|
||||
|
||||
void dm_table_postsuspend_targets(struct dm_table *t)
|
||||
|
@ -962,7 +924,7 @@ void dm_table_postsuspend_targets(struct dm_table *t)
|
|||
if (!t)
|
||||
return;
|
||||
|
||||
return suspend_targets(t, 1);
|
||||
suspend_targets(t, 1);
|
||||
}
|
||||
|
||||
int dm_table_resume_targets(struct dm_table *t)
|
||||
|
|
|
@ -204,6 +204,7 @@ static int (*_inits[])(void) __initdata = {
|
|||
dm_target_init,
|
||||
dm_linear_init,
|
||||
dm_stripe_init,
|
||||
dm_kcopyd_init,
|
||||
dm_interface_init,
|
||||
};
|
||||
|
||||
|
@ -212,6 +213,7 @@ static void (*_exits[])(void) = {
|
|||
dm_target_exit,
|
||||
dm_linear_exit,
|
||||
dm_stripe_exit,
|
||||
dm_kcopyd_exit,
|
||||
dm_interface_exit,
|
||||
};
|
||||
|
||||
|
@ -922,7 +924,7 @@ static void free_minor(int minor)
|
|||
/*
|
||||
* See if the device with a specific minor # is free.
|
||||
*/
|
||||
static int specific_minor(struct mapped_device *md, int minor)
|
||||
static int specific_minor(int minor)
|
||||
{
|
||||
int r, m;
|
||||
|
||||
|
@ -955,7 +957,7 @@ out:
|
|||
return r;
|
||||
}
|
||||
|
||||
static int next_free_minor(struct mapped_device *md, int *minor)
|
||||
static int next_free_minor(int *minor)
|
||||
{
|
||||
int r, m;
|
||||
|
||||
|
@ -966,9 +968,8 @@ static int next_free_minor(struct mapped_device *md, int *minor)
|
|||
spin_lock(&_minor_lock);
|
||||
|
||||
r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
|
||||
if (r) {
|
||||
if (r)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (m >= (1 << MINORBITS)) {
|
||||
idr_remove(&_minor_idr, m);
|
||||
|
@ -991,7 +992,7 @@ static struct block_device_operations dm_blk_dops;
|
|||
static struct mapped_device *alloc_dev(int minor)
|
||||
{
|
||||
int r;
|
||||
struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL);
|
||||
struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
|
||||
void *old_md;
|
||||
|
||||
if (!md) {
|
||||
|
@ -1004,13 +1005,12 @@ static struct mapped_device *alloc_dev(int minor)
|
|||
|
||||
/* get a minor number for the dev */
|
||||
if (minor == DM_ANY_MINOR)
|
||||
r = next_free_minor(md, &minor);
|
||||
r = next_free_minor(&minor);
|
||||
else
|
||||
r = specific_minor(md, minor);
|
||||
r = specific_minor(minor);
|
||||
if (r < 0)
|
||||
goto bad_minor;
|
||||
|
||||
memset(md, 0, sizeof(*md));
|
||||
init_rwsem(&md->io_lock);
|
||||
mutex_init(&md->suspend_lock);
|
||||
spin_lock_init(&md->pushback_lock);
|
||||
|
|
|
@ -16,67 +16,6 @@
|
|||
#include <linux/blkdev.h>
|
||||
#include <linux/hdreg.h>
|
||||
|
||||
#define DM_NAME "device-mapper"
|
||||
|
||||
#define DMERR(f, arg...) \
|
||||
printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
|
||||
#define DMERR_LIMIT(f, arg...) \
|
||||
do { \
|
||||
if (printk_ratelimit()) \
|
||||
printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " \
|
||||
f "\n", ## arg); \
|
||||
} while (0)
|
||||
|
||||
#define DMWARN(f, arg...) \
|
||||
printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
|
||||
#define DMWARN_LIMIT(f, arg...) \
|
||||
do { \
|
||||
if (printk_ratelimit()) \
|
||||
printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " \
|
||||
f "\n", ## arg); \
|
||||
} while (0)
|
||||
|
||||
#define DMINFO(f, arg...) \
|
||||
printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
|
||||
#define DMINFO_LIMIT(f, arg...) \
|
||||
do { \
|
||||
if (printk_ratelimit()) \
|
||||
printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f \
|
||||
"\n", ## arg); \
|
||||
} while (0)
|
||||
|
||||
#ifdef CONFIG_DM_DEBUG
|
||||
# define DMDEBUG(f, arg...) \
|
||||
printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg)
|
||||
# define DMDEBUG_LIMIT(f, arg...) \
|
||||
do { \
|
||||
if (printk_ratelimit()) \
|
||||
printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX ": " f \
|
||||
"\n", ## arg); \
|
||||
} while (0)
|
||||
#else
|
||||
# define DMDEBUG(f, arg...) do {} while (0)
|
||||
# define DMDEBUG_LIMIT(f, arg...) do {} while (0)
|
||||
#endif
|
||||
|
||||
#define DMEMIT(x...) sz += ((sz >= maxlen) ? \
|
||||
0 : scnprintf(result + sz, maxlen - sz, x))
|
||||
|
||||
#define SECTOR_SHIFT 9
|
||||
|
||||
/*
|
||||
* Definitions of return values from target end_io function.
|
||||
*/
|
||||
#define DM_ENDIO_INCOMPLETE 1
|
||||
#define DM_ENDIO_REQUEUE 2
|
||||
|
||||
/*
|
||||
* Definitions of return values from target map function.
|
||||
*/
|
||||
#define DM_MAPIO_SUBMITTED 0
|
||||
#define DM_MAPIO_REMAPPED 1
|
||||
#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
|
||||
|
||||
/*
|
||||
* Suspend feature flags
|
||||
*/
|
||||
|
@ -136,34 +75,6 @@ static inline int array_too_big(unsigned long fixed, unsigned long obj,
|
|||
return (num > (ULONG_MAX - fixed) / obj);
|
||||
}
|
||||
|
||||
/*
|
||||
* Ceiling(n / sz)
|
||||
*/
|
||||
#define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
|
||||
|
||||
#define dm_sector_div_up(n, sz) ( \
|
||||
{ \
|
||||
sector_t _r = ((n) + (sz) - 1); \
|
||||
sector_div(_r, (sz)); \
|
||||
_r; \
|
||||
} \
|
||||
)
|
||||
|
||||
/*
|
||||
* ceiling(n / size) * size
|
||||
*/
|
||||
#define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
|
||||
|
||||
static inline sector_t to_sector(unsigned long n)
|
||||
{
|
||||
return (n >> 9);
|
||||
}
|
||||
|
||||
static inline unsigned long to_bytes(sector_t n)
|
||||
{
|
||||
return (n << 9);
|
||||
}
|
||||
|
||||
int dm_split_args(int *argc, char ***argvp, char *input);
|
||||
|
||||
/*
|
||||
|
@ -189,4 +100,13 @@ int dm_lock_for_deletion(struct mapped_device *md);
|
|||
|
||||
void dm_kobject_uevent(struct mapped_device *md);
|
||||
|
||||
/*
|
||||
* Dirty log
|
||||
*/
|
||||
int dm_dirty_log_init(void);
|
||||
void dm_dirty_log_exit(void);
|
||||
|
||||
int dm_kcopyd_init(void);
|
||||
void dm_kcopyd_exit(void);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1,42 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2001 Sistina Software
|
||||
*
|
||||
* This file is released under the GPL.
|
||||
*
|
||||
* Kcopyd provides a simple interface for copying an area of one
|
||||
* block-device to one or more other block-devices, with an asynchronous
|
||||
* completion notification.
|
||||
*/
|
||||
|
||||
#ifndef DM_KCOPYD_H
|
||||
#define DM_KCOPYD_H
|
||||
|
||||
#include "dm-io.h"
|
||||
|
||||
/* FIXME: make this configurable */
|
||||
#define KCOPYD_MAX_REGIONS 8
|
||||
|
||||
#define KCOPYD_IGNORE_ERROR 1
|
||||
|
||||
/*
|
||||
* To use kcopyd you must first create a kcopyd client object.
|
||||
*/
|
||||
struct kcopyd_client;
|
||||
int kcopyd_client_create(unsigned int num_pages, struct kcopyd_client **result);
|
||||
void kcopyd_client_destroy(struct kcopyd_client *kc);
|
||||
|
||||
/*
|
||||
* Submit a copy job to kcopyd. This is built on top of the
|
||||
* previous three fns.
|
||||
*
|
||||
* read_err is a boolean,
|
||||
* write_err is a bitset, with 1 bit for each destination region
|
||||
*/
|
||||
typedef void (*kcopyd_notify_fn)(int read_err, unsigned long write_err,
|
||||
void *context);
|
||||
|
||||
int kcopyd_copy(struct kcopyd_client *kc, struct io_region *from,
|
||||
unsigned int num_dests, struct io_region *dests,
|
||||
unsigned int flags, kcopyd_notify_fn fn, void *context);
|
||||
|
||||
#endif
|
|
@ -1,6 +1,6 @@
|
|||
/*
|
||||
* Copyright (C) 2001 Sistina Software (UK) Limited.
|
||||
* Copyright (C) 2004 Red Hat, Inc. All rights reserved.
|
||||
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This file is released under the LGPL.
|
||||
*/
|
||||
|
@ -10,6 +10,8 @@
|
|||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/bio.h>
|
||||
|
||||
struct dm_target;
|
||||
struct dm_table;
|
||||
struct dm_dev;
|
||||
|
@ -250,11 +252,97 @@ void dm_table_event(struct dm_table *t);
|
|||
*/
|
||||
int dm_swap_table(struct mapped_device *md, struct dm_table *t);
|
||||
|
||||
/*-----------------------------------------------------------------
|
||||
* Macros.
|
||||
*---------------------------------------------------------------*/
|
||||
#define DM_NAME "device-mapper"
|
||||
|
||||
#define DMERR(f, arg...) \
|
||||
printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
|
||||
#define DMERR_LIMIT(f, arg...) \
|
||||
do { \
|
||||
if (printk_ratelimit()) \
|
||||
printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " \
|
||||
f "\n", ## arg); \
|
||||
} while (0)
|
||||
|
||||
#define DMWARN(f, arg...) \
|
||||
printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
|
||||
#define DMWARN_LIMIT(f, arg...) \
|
||||
do { \
|
||||
if (printk_ratelimit()) \
|
||||
printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " \
|
||||
f "\n", ## arg); \
|
||||
} while (0)
|
||||
|
||||
#define DMINFO(f, arg...) \
|
||||
printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
|
||||
#define DMINFO_LIMIT(f, arg...) \
|
||||
do { \
|
||||
if (printk_ratelimit()) \
|
||||
printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f \
|
||||
"\n", ## arg); \
|
||||
} while (0)
|
||||
|
||||
#ifdef CONFIG_DM_DEBUG
|
||||
# define DMDEBUG(f, arg...) \
|
||||
printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg)
|
||||
# define DMDEBUG_LIMIT(f, arg...) \
|
||||
do { \
|
||||
if (printk_ratelimit()) \
|
||||
printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX ": " f \
|
||||
"\n", ## arg); \
|
||||
} while (0)
|
||||
#else
|
||||
# define DMDEBUG(f, arg...) do {} while (0)
|
||||
# define DMDEBUG_LIMIT(f, arg...) do {} while (0)
|
||||
#endif
|
||||
|
||||
#define DMEMIT(x...) sz += ((sz >= maxlen) ? \
|
||||
0 : scnprintf(result + sz, maxlen - sz, x))
|
||||
|
||||
#define SECTOR_SHIFT 9
|
||||
|
||||
/*
|
||||
* Prepare a table for a device that will error all I/O.
|
||||
* To make it active, call dm_suspend(), dm_swap_table() then dm_resume().
|
||||
* Definitions of return values from target end_io function.
|
||||
*/
|
||||
int dm_create_error_table(struct dm_table **result, struct mapped_device *md);
|
||||
#define DM_ENDIO_INCOMPLETE 1
|
||||
#define DM_ENDIO_REQUEUE 2
|
||||
|
||||
/*
|
||||
* Definitions of return values from target map function.
|
||||
*/
|
||||
#define DM_MAPIO_SUBMITTED 0
|
||||
#define DM_MAPIO_REMAPPED 1
|
||||
#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
|
||||
|
||||
/*
|
||||
* Ceiling(n / sz)
|
||||
*/
|
||||
#define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
|
||||
|
||||
#define dm_sector_div_up(n, sz) ( \
|
||||
{ \
|
||||
sector_t _r = ((n) + (sz) - 1); \
|
||||
sector_div(_r, (sz)); \
|
||||
_r; \
|
||||
} \
|
||||
)
|
||||
|
||||
/*
|
||||
* ceiling(n / size) * size
|
||||
*/
|
||||
#define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
|
||||
|
||||
static inline sector_t to_sector(unsigned long n)
|
||||
{
|
||||
return (n >> SECTOR_SHIFT);
|
||||
}
|
||||
|
||||
static inline unsigned long to_bytes(sector_t n)
|
||||
{
|
||||
return (n << SECTOR_SHIFT);
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _LINUX_DEVICE_MAPPER_H */
|
||||
|
|
|
@ -1,52 +1,56 @@
|
|||
/*
|
||||
* Copyright (C) 2003 Sistina Software
|
||||
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* Device-Mapper dirty region log.
|
||||
*
|
||||
* This file is released under the LGPL.
|
||||
*/
|
||||
|
||||
#ifndef DM_DIRTY_LOG
|
||||
#define DM_DIRTY_LOG
|
||||
#ifndef _LINUX_DM_DIRTY_LOG
|
||||
#define _LINUX_DM_DIRTY_LOG
|
||||
|
||||
#include "dm.h"
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/device-mapper.h>
|
||||
|
||||
typedef sector_t region_t;
|
||||
|
||||
struct dirty_log_type;
|
||||
struct dm_dirty_log_type;
|
||||
|
||||
struct dirty_log {
|
||||
struct dirty_log_type *type;
|
||||
struct dm_dirty_log {
|
||||
struct dm_dirty_log_type *type;
|
||||
void *context;
|
||||
};
|
||||
|
||||
struct dirty_log_type {
|
||||
struct list_head list;
|
||||
struct dm_dirty_log_type {
|
||||
const char *name;
|
||||
struct module *module;
|
||||
unsigned int use_count;
|
||||
|
||||
int (*ctr)(struct dirty_log *log, struct dm_target *ti,
|
||||
unsigned int argc, char **argv);
|
||||
void (*dtr)(struct dirty_log *log);
|
||||
int (*ctr)(struct dm_dirty_log *log, struct dm_target *ti,
|
||||
unsigned argc, char **argv);
|
||||
void (*dtr)(struct dm_dirty_log *log);
|
||||
|
||||
/*
|
||||
* There are times when we don't want the log to touch
|
||||
* the disk.
|
||||
*/
|
||||
int (*presuspend)(struct dirty_log *log);
|
||||
int (*postsuspend)(struct dirty_log *log);
|
||||
int (*resume)(struct dirty_log *log);
|
||||
int (*presuspend)(struct dm_dirty_log *log);
|
||||
int (*postsuspend)(struct dm_dirty_log *log);
|
||||
int (*resume)(struct dm_dirty_log *log);
|
||||
|
||||
/*
|
||||
* Retrieves the smallest size of region that the log can
|
||||
* deal with.
|
||||
*/
|
||||
uint32_t (*get_region_size)(struct dirty_log *log);
|
||||
uint32_t (*get_region_size)(struct dm_dirty_log *log);
|
||||
|
||||
/*
|
||||
* A predicate to say whether a region is clean or not.
|
||||
* May block.
|
||||
*/
|
||||
int (*is_clean)(struct dirty_log *log, region_t region);
|
||||
int (*is_clean)(struct dm_dirty_log *log, region_t region);
|
||||
|
||||
/*
|
||||
* Returns: 0, 1, -EWOULDBLOCK, < 0
|
||||
|
@ -59,13 +63,14 @@ struct dirty_log_type {
|
|||
* passed to a daemon to deal with, since a daemon is
|
||||
* allowed to block.
|
||||
*/
|
||||
int (*in_sync)(struct dirty_log *log, region_t region, int can_block);
|
||||
int (*in_sync)(struct dm_dirty_log *log, region_t region,
|
||||
int can_block);
|
||||
|
||||
/*
|
||||
* Flush the current log state (eg, to disk). This
|
||||
* function may block.
|
||||
*/
|
||||
int (*flush)(struct dirty_log *log);
|
||||
int (*flush)(struct dm_dirty_log *log);
|
||||
|
||||
/*
|
||||
* Mark an area as clean or dirty. These functions may
|
||||
|
@ -73,8 +78,8 @@ struct dirty_log_type {
|
|||
* be extremely rare (eg, allocating another chunk of
|
||||
* memory for some reason).
|
||||
*/
|
||||
void (*mark_region)(struct dirty_log *log, region_t region);
|
||||
void (*clear_region)(struct dirty_log *log, region_t region);
|
||||
void (*mark_region)(struct dm_dirty_log *log, region_t region);
|
||||
void (*clear_region)(struct dm_dirty_log *log, region_t region);
|
||||
|
||||
/*
|
||||
* Returns: <0 (error), 0 (no region), 1 (region)
|
||||
|
@ -88,44 +93,39 @@ struct dirty_log_type {
|
|||
* tells you if an area is synchronised, the other
|
||||
* assigns recovery work.
|
||||
*/
|
||||
int (*get_resync_work)(struct dirty_log *log, region_t *region);
|
||||
int (*get_resync_work)(struct dm_dirty_log *log, region_t *region);
|
||||
|
||||
/*
|
||||
* This notifies the log that the resync status of a region
|
||||
* has changed. It also clears the region from the recovering
|
||||
* list (if present).
|
||||
*/
|
||||
void (*set_region_sync)(struct dirty_log *log,
|
||||
void (*set_region_sync)(struct dm_dirty_log *log,
|
||||
region_t region, int in_sync);
|
||||
|
||||
/*
|
||||
* Returns the number of regions that are in sync.
|
||||
*/
|
||||
region_t (*get_sync_count)(struct dirty_log *log);
|
||||
region_t (*get_sync_count)(struct dm_dirty_log *log);
|
||||
|
||||
/*
|
||||
* Support function for mirror status requests.
|
||||
*/
|
||||
int (*status)(struct dirty_log *log, status_type_t status_type,
|
||||
char *result, unsigned int maxlen);
|
||||
int (*status)(struct dm_dirty_log *log, status_type_t status_type,
|
||||
char *result, unsigned maxlen);
|
||||
};
|
||||
|
||||
int dm_register_dirty_log_type(struct dirty_log_type *type);
|
||||
int dm_unregister_dirty_log_type(struct dirty_log_type *type);
|
||||
|
||||
int dm_dirty_log_type_register(struct dm_dirty_log_type *type);
|
||||
int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type);
|
||||
|
||||
/*
|
||||
* Make sure you use these two functions, rather than calling
|
||||
* type->constructor/destructor() directly.
|
||||
*/
|
||||
struct dirty_log *dm_create_dirty_log(const char *type_name, struct dm_target *ti,
|
||||
unsigned int argc, char **argv);
|
||||
void dm_destroy_dirty_log(struct dirty_log *log);
|
||||
struct dm_dirty_log *dm_dirty_log_create(const char *type_name,
|
||||
struct dm_target *ti,
|
||||
unsigned argc, char **argv);
|
||||
void dm_dirty_log_destroy(struct dm_dirty_log *log);
|
||||
|
||||
/*
|
||||
* init/exit functions.
|
||||
*/
|
||||
int dm_dirty_log_init(void);
|
||||
void dm_dirty_log_exit(void);
|
||||
|
||||
#endif
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _LINUX_DM_DIRTY_LOG_H */
|
|
@ -1,15 +1,20 @@
|
|||
/*
|
||||
* Copyright (C) 2003 Sistina Software
|
||||
* Copyright (C) 2004 - 2008 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* Device-Mapper low-level I/O.
|
||||
*
|
||||
* This file is released under the GPL.
|
||||
*/
|
||||
|
||||
#ifndef _DM_IO_H
|
||||
#define _DM_IO_H
|
||||
#ifndef _LINUX_DM_IO_H
|
||||
#define _LINUX_DM_IO_H
|
||||
|
||||
#include "dm.h"
|
||||
#ifdef __KERNEL__
|
||||
|
||||
struct io_region {
|
||||
#include <linux/types.h>
|
||||
|
||||
struct dm_io_region {
|
||||
struct block_device *bdev;
|
||||
sector_t sector;
|
||||
sector_t count; /* If this is zero the region is ignored. */
|
||||
|
@ -74,6 +79,7 @@ void dm_io_client_destroy(struct dm_io_client *client);
|
|||
* error occurred doing io to the corresponding region.
|
||||
*/
|
||||
int dm_io(struct dm_io_request *io_req, unsigned num_regions,
|
||||
struct io_region *region, unsigned long *sync_error_bits);
|
||||
struct dm_io_region *region, unsigned long *sync_error_bits);
|
||||
|
||||
#endif
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _LINUX_DM_IO_H */
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
* Copyright (C) 2001 - 2003 Sistina Software
|
||||
* Copyright (C) 2004 - 2008 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* kcopyd provides a simple interface for copying an area of one
|
||||
* block-device to one or more other block-devices, either synchronous
|
||||
* or with an asynchronous completion notification.
|
||||
*
|
||||
* This file is released under the GPL.
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_DM_KCOPYD_H
|
||||
#define _LINUX_DM_KCOPYD_H
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/dm-io.h>
|
||||
|
||||
/* FIXME: make this configurable */
|
||||
#define DM_KCOPYD_MAX_REGIONS 8
|
||||
|
||||
#define DM_KCOPYD_IGNORE_ERROR 1
|
||||
|
||||
/*
|
||||
* To use kcopyd you must first create a dm_kcopyd_client object.
|
||||
*/
|
||||
struct dm_kcopyd_client;
|
||||
int dm_kcopyd_client_create(unsigned num_pages,
|
||||
struct dm_kcopyd_client **result);
|
||||
void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc);
|
||||
|
||||
/*
|
||||
* Submit a copy job to kcopyd. This is built on top of the
|
||||
* previous three fns.
|
||||
*
|
||||
* read_err is a boolean,
|
||||
* write_err is a bitset, with 1 bit for each destination region
|
||||
*/
|
||||
typedef void (*dm_kcopyd_notify_fn)(int read_err, unsigned long write_err,
|
||||
void *context);
|
||||
|
||||
int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
|
||||
unsigned num_dests, struct dm_io_region *dests,
|
||||
unsigned flags, dm_kcopyd_notify_fn fn, void *context);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _LINUX_DM_KCOPYD_H */
|
Loading…
Reference in New Issue